instance_id
stringlengths
10
57
base_commit
stringlengths
40
40
created_at
stringdate
2014-04-30 14:58:36
2025-04-30 20:14:11
environment_setup_commit
stringlengths
40
40
hints_text
stringlengths
0
273k
patch
stringlengths
251
7.06M
problem_statement
stringlengths
11
52.5k
repo
stringlengths
7
53
test_patch
stringlengths
231
997k
meta
dict
version
stringclasses
851 values
install_config
dict
requirements
stringlengths
93
34.2k
environment
stringlengths
760
20.5k
FAIL_TO_PASS
listlengths
1
9.39k
FAIL_TO_FAIL
listlengths
0
2.69k
PASS_TO_PASS
listlengths
0
7.87k
PASS_TO_FAIL
listlengths
0
192
license_name
stringclasses
55 values
__index_level_0__
int64
0
21.4k
before_filepaths
listlengths
1
105
after_filepaths
listlengths
1
105
mozilla__bleach-389
a06cd773694721f7cace21d09958afdf301f338d
2018-08-28 19:16:17
c27512d20b48b7901687b62d15c91be1de856f89
diff --git a/CHANGES b/CHANGES index 423c1ec..427363e 100644 --- a/CHANGES +++ b/CHANGES @@ -1,7 +1,7 @@ Bleach changes ============== -Version 2.1.5 (in development) +Version 3.0.0 (in development) ------------------------------ **Security fixes** @@ -10,12 +10,32 @@ None **Backwards incompatible changes** -None +* A bunch of functions were moved from one module to another: + + ``bleach.sanitizer`` -> ``bleach.html5lib_shim``: + + * ``convert_entity`` + * ``convert_entities`` + * ``match_entity`` + * ``next_possible_entity`` + * ``BleachHTMLSerializer`` + * ``BleachHTMLTokenizer`` + * ``BleachHTMLParser`` + + These weren't documented and aren't part of the public API, but people + read code and might be using them so we're considering it an incompatible + API change. + + If you're using them, you'll need to update your code. **Features** -* No longer depends on html5lib. html5lib==1.0.1 was vendored into Bleach. - (#386) +* Bleach no longer depends on html5lib. html5lib==1.0.1 is now vendored into + Bleach. You can remove it from your requirements file if none of your other + requirements require html5lib. + + This means Bleach will now work fine with other libraries that depend on + html5lib regardless of what version of html5lib they require. (#386) **Bug fixes** diff --git a/bleach/__init__.py b/bleach/__init__.py index f953fc5..b4a927a 100644 --- a/bleach/__init__.py +++ b/bleach/__init__.py @@ -20,7 +20,7 @@ from bleach.sanitizer import ( # yyyymmdd __releasedate__ = '' # x.y.z or x.y.z.dev0 -- semver -__version__ = '2.1.5.dev0' +__version__ = '3.0.0.dev0' VERSION = parse_version(__version__) diff --git a/bleach/html5lib_shim.py b/bleach/html5lib_shim.py new file mode 100644 index 0000000..e84011d --- /dev/null +++ b/bleach/html5lib_shim.py @@ -0,0 +1,266 @@ +# flake8: noqa +""" +Shim module between Bleach and html5lib. This makes it easier to upgrade the +html5lib library without having to change a lot of code. +""" + +from __future__ import unicode_literals + +import re +import string + +import six + +from bleach._vendor.html5lib import ( + HTMLParser, + getTreeWalker, +) +from bleach._vendor.html5lib.constants import ( + entities, + namespaces, + prefixes, + tokenTypes, +) +from bleach._vendor.html5lib.constants import _ReparseException as ReparseException +from bleach._vendor.html5lib.filters.base import Filter +from bleach._vendor.html5lib.filters.sanitizer import allowed_protocols +from bleach._vendor.html5lib.filters.sanitizer import Filter as SanitizerFilter +from bleach._vendor.html5lib.serializer import HTMLSerializer +from bleach._vendor.html5lib._tokenizer import HTMLTokenizer +from bleach._vendor.html5lib._trie import Trie + + +#: Map of entity name to expanded entity +ENTITIES = entities + +#: Trie of html entity string -> character representation +ENTITIES_TRIE = Trie(ENTITIES) + + +class BleachHTMLTokenizer(HTMLTokenizer): + """Tokenizer that doesn't consume character entities""" + def consumeEntity(self, allowedChar=None, fromAttribute=False): + # We don't want to consume and convert entities, so this overrides the + # html5lib tokenizer's consumeEntity so that it's now a no-op. + # + # However, when that gets called, it's consumed an &, so we put that in + # the stream. + if fromAttribute: + self.currentToken['data'][-1][1] += '&' + + else: + self.tokenQueue.append({"type": tokenTypes['Characters'], "data": '&'}) + + +class BleachHTMLParser(HTMLParser): + """Parser that uses BleachHTMLTokenizer""" + def _parse(self, stream, innerHTML=False, container='div', scripting=False, **kwargs): + # Override HTMLParser so we can swap out the tokenizer for our own. + self.innerHTMLMode = innerHTML + self.container = container + self.scripting = scripting + self.tokenizer = BleachHTMLTokenizer(stream, parser=self, **kwargs) + self.reset() + + try: + self.mainLoop() + except ReparseException: + self.reset() + self.mainLoop() + + +def convert_entity(value): + """Convert an entity (minus the & and ; part) into what it represents + + This handles numeric, hex, and text entities. + + :arg value: the string (minus the ``&`` and ``;`` part) to convert + + :returns: unicode character or None if it's an ambiguous ampersand that + doesn't match a character entity + + """ + if value[0] == '#': + if value[1] in ('x', 'X'): + return six.unichr(int(value[2:], 16)) + return six.unichr(int(value[1:], 10)) + + return ENTITIES.get(value, None) + + +def convert_entities(text): + """Converts all found entities in the text + + :arg text: the text to convert entities in + + :returns: unicode text with converted entities + + """ + if '&' not in text: + return text + + new_text = [] + for part in next_possible_entity(text): + if not part: + continue + + if part.startswith('&'): + entity = match_entity(part) + if entity is not None: + converted = convert_entity(entity) + + # If it's not an ambiguous ampersand, then replace with the + # unicode character. Otherwise, we leave the entity in. + if converted is not None: + new_text.append(converted) + remainder = part[len(entity) + 2:] + if part: + new_text.append(remainder) + continue + + new_text.append(part) + + return u''.join(new_text) + + +def match_entity(stream): + """Returns first entity in stream or None if no entity exists + + Note: For Bleach purposes, entities must start with a "&" and end with + a ";". This ignoresambiguous character entities that have no ";" at the + end. + + :arg stream: the character stream + + :returns: ``None`` or the entity string without "&" or ";" + + """ + # Nix the & at the beginning + if stream[0] != '&': + raise ValueError('Stream should begin with "&"') + + stream = stream[1:] + + stream = list(stream) + possible_entity = '' + end_characters = '<&=;' + string.whitespace + + # Handle number entities + if stream and stream[0] == '#': + possible_entity = '#' + stream.pop(0) + + if stream and stream[0] in ('x', 'X'): + allowed = '0123456789abcdefABCDEF' + possible_entity += stream.pop(0) + else: + allowed = '0123456789' + + # FIXME(willkg): Do we want to make sure these are valid number + # entities? This doesn't do that currently. + while stream and stream[0] not in end_characters: + c = stream.pop(0) + if c not in allowed: + break + possible_entity += c + + if possible_entity and stream and stream[0] == ';': + return possible_entity + return None + + # Handle character entities + while stream and stream[0] not in end_characters: + c = stream.pop(0) + if not ENTITIES_TRIE.has_keys_with_prefix(possible_entity): + break + possible_entity += c + + if possible_entity and stream and stream[0] == ';': + return possible_entity + + return None + + +AMP_SPLIT_RE = re.compile('(&)') + + +def next_possible_entity(text): + """Takes a text and generates a list of possible entities + + :arg text: the text to look at + + :returns: generator where each part (except the first) starts with an + "&" + + """ + for i, part in enumerate(AMP_SPLIT_RE.split(text)): + if i == 0: + yield part + elif i % 2 == 0: + yield '&' + part + + +class BleachHTMLSerializer(HTMLSerializer): + """HTMLSerializer that undoes & -> &amp; in attributes""" + def escape_base_amp(self, stoken): + """Escapes just bare & in HTML attribute values""" + # First, undo escaping of &. We need to do this because html5lib's + # HTMLSerializer expected the tokenizer to consume all the character + # entities and convert them to their respective characters, but the + # BleachHTMLTokenizer doesn't do that. For example, this fixes + # &amp;entity; back to &entity; . + stoken = stoken.replace('&amp;', '&') + + # However, we do want all bare & that are not marking character + # entities to be changed to &amp;, so let's do that carefully here. + for part in next_possible_entity(stoken): + if not part: + continue + + if part.startswith('&'): + entity = match_entity(part) + # Only leave entities in that are not ambiguous. If they're + # ambiguous, then we escape the ampersand. + if entity is not None and convert_entity(entity) is not None: + yield '&' + entity + ';' + + # Length of the entity plus 2--one for & at the beginning + # and and one for ; at the end + part = part[len(entity) + 2:] + if part: + yield part + continue + + yield part.replace('&', '&amp;') + + def serialize(self, treewalker, encoding=None): + """Wrap HTMLSerializer.serialize and conver & to &amp; in attribute values + + Note that this converts & to &amp; in attribute values where the & isn't + already part of an unambiguous character entity. + + """ + in_tag = False + after_equals = False + + for stoken in super(BleachHTMLSerializer, self).serialize(treewalker, encoding): + if in_tag: + if stoken == '>': + in_tag = False + + elif after_equals: + if stoken != '"': + for part in self.escape_base_amp(stoken): + yield part + + after_equals = False + continue + + elif stoken == '=': + after_equals = True + + yield stoken + else: + if stoken.startswith('<'): + in_tag = True + yield stoken diff --git a/bleach/linkifier.py b/bleach/linkifier.py index ef8afc6..3c8c3ee 100644 --- a/bleach/linkifier.py +++ b/bleach/linkifier.py @@ -2,12 +2,8 @@ from __future__ import unicode_literals import re import six -from bleach._vendor import html5lib -from bleach._vendor.html5lib.filters.base import Filter -from bleach._vendor.html5lib.filters.sanitizer import allowed_protocols -from bleach._vendor.html5lib.serializer import HTMLSerializer - from bleach import callbacks as linkify_callbacks +from bleach import html5lib_shim from bleach.utils import alphabetize_attributes, force_unicode @@ -33,7 +29,7 @@ TLDS = """ac ad ae aero af ag ai al am an ao aq ar arpa as asia at au aw ax az TLDS.reverse() -def build_url_re(tlds=TLDS, protocols=allowed_protocols): +def build_url_re(tlds=TLDS, protocols=html5lib_shim.allowed_protocols): """Builds the url regex used by linkifier If you want a different set of tlds or allowed protocols, pass those in @@ -114,9 +110,9 @@ class Linker(object): self.url_re = url_re self.email_re = email_re - self.parser = html5lib.HTMLParser(namespaceHTMLElements=False) - self.walker = html5lib.getTreeWalker('etree') - self.serializer = HTMLSerializer( + self.parser = html5lib_shim.HTMLParser(namespaceHTMLElements=False) + self.walker = html5lib_shim.getTreeWalker('etree') + self.serializer = html5lib_shim.HTMLSerializer( quote_attr_values='always', omit_optional_tags=False, @@ -157,7 +153,7 @@ class Linker(object): return self.serializer.render(filtered) -class LinkifyFilter(Filter): +class LinkifyFilter(html5lib_shim.Filter): """html5lib filter that linkifies text This will do the following: diff --git a/bleach/sanitizer.py b/bleach/sanitizer.py index 7cbb9e8..05e2675 100644 --- a/bleach/sanitizer.py +++ b/bleach/sanitizer.py @@ -1,35 +1,16 @@ from __future__ import unicode_literals + from itertools import chain import re -import string import six from six.moves.urllib.parse import urlparse from xml.sax.saxutils import unescape -from bleach._vendor import html5lib -from bleach._vendor.html5lib.constants import ( - entities, - namespaces, - prefixes, - tokenTypes, -) -from bleach._vendor.html5lib.constants import _ReparseException as ReparseException -from bleach._vendor.html5lib.filters.base import Filter -from bleach._vendor.html5lib.filters import sanitizer -from bleach._vendor.html5lib.serializer import HTMLSerializer -from bleach._vendor.html5lib._tokenizer import HTMLTokenizer -from bleach._vendor.html5lib._trie import Trie - +from bleach import html5lib_shim from bleach.utils import alphabetize_attributes, force_unicode -#: Map of entity name to expanded entity -ENTITIES = entities - -#: Trie of html entity string -> character representation -ENTITIES_TRIE = Trie(ENTITIES) - #: List of allowed tags ALLOWED_TAGS = [ 'a', @@ -54,17 +35,12 @@ ALLOWED_ATTRIBUTES = { 'acronym': ['title'], } - #: List of allowed styles ALLOWED_STYLES = [] - #: List of allowed protocols ALLOWED_PROTOCOLS = ['http', 'https', 'mailto'] - -AMP_SPLIT_RE = re.compile('(&)') - #: Invisible characters--0 to and including 31 except 9 (tab), 10 (lf), and 13 (cr) INVISIBLE_CHARACTERS = ''.join([chr(c) for c in chain(range(0, 9), range(11, 13), range(14, 32))]) @@ -79,90 +55,6 @@ INVISIBLE_CHARACTERS_RE = re.compile( INVISIBLE_REPLACEMENT_CHAR = '?' -def convert_entity(value): - """Convert an entity (minus the & and ; part) into what it represents - - This handles numeric, hex, and text entities. - - :arg value: the string (minus the ``&`` and ``;`` part) to convert - - :returns: unicode character or None if it's an ambiguous ampersand that - doesn't match a character entity - - """ - if value[0] == '#': - if value[1] in ('x', 'X'): - return six.unichr(int(value[2:], 16)) - return six.unichr(int(value[1:], 10)) - - return ENTITIES.get(value, None) - - -def convert_entities(text): - """Converts all found entities in the text - - :arg text: the text to convert entities in - - :returns: unicode text with converted entities - - """ - if '&' not in text: - return text - - new_text = [] - for part in next_possible_entity(text): - if not part: - continue - - if part.startswith('&'): - entity = match_entity(part) - if entity is not None: - converted = convert_entity(entity) - - # If it's not an ambiguous ampersand, then replace with the - # unicode character. Otherwise, we leave the entity in. - if converted is not None: - new_text.append(converted) - remainder = part[len(entity) + 2:] - if part: - new_text.append(remainder) - continue - - new_text.append(part) - - return u''.join(new_text) - - -class BleachHTMLTokenizer(HTMLTokenizer): - def consumeEntity(self, allowedChar=None, fromAttribute=False): - # We don't want to consume and convert entities, so this overrides the - # html5lib tokenizer's consumeEntity so that it's now a no-op. - # - # However, when that gets called, it's consumed an &, so we put that in - # the stream. - if fromAttribute: - self.currentToken['data'][-1][1] += '&' - - else: - self.tokenQueue.append({"type": tokenTypes['Characters'], "data": '&'}) - - -class BleachHTMLParser(html5lib.HTMLParser): - def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs): - # Override HTMLParser so we can swap out the tokenizer for our own. - self.innerHTMLMode = innerHTML - self.container = container - self.scripting = scripting - self.tokenizer = BleachHTMLTokenizer(stream, parser=self, **kwargs) - self.reset() - - try: - self.mainLoop() - except ReparseException: - self.reset() - self.mainLoop() - - class Cleaner(object): """Cleaner for cleaning HTML fragments of malicious content @@ -223,9 +115,9 @@ class Cleaner(object): self.strip_comments = strip_comments self.filters = filters or [] - self.parser = BleachHTMLParser(namespaceHTMLElements=False) - self.walker = html5lib.getTreeWalker('etree') - self.serializer = BleachHTMLSerializer( + self.parser = html5lib_shim.BleachHTMLParser(namespaceHTMLElements=False) + self.walker = html5lib_shim.getTreeWalker('etree') + self.serializer = html5lib_shim.BleachHTMLSerializer( quote_attr_values='always', omit_optional_tags=False, escape_lt_in_attrs=True, @@ -325,80 +217,7 @@ def attribute_filter_factory(attributes): raise ValueError('attributes needs to be a callable, a list or a dict') -def match_entity(stream): - """Returns first entity in stream or None if no entity exists - - Note: For Bleach purposes, entities must start with a "&" and end with - a ";". - - :arg stream: the character stream - - :returns: ``None`` or the entity string without "&" or ";" - - """ - # Nix the & at the beginning - if stream[0] != '&': - raise ValueError('Stream should begin with "&"') - - stream = stream[1:] - - stream = list(stream) - possible_entity = '' - end_characters = '<&=;' + string.whitespace - - # Handle number entities - if stream and stream[0] == '#': - possible_entity = '#' - stream.pop(0) - - if stream and stream[0] in ('x', 'X'): - allowed = '0123456789abcdefABCDEF' - possible_entity += stream.pop(0) - else: - allowed = '0123456789' - - # FIXME(willkg): Do we want to make sure these are valid number - # entities? This doesn't do that currently. - while stream and stream[0] not in end_characters: - c = stream.pop(0) - if c not in allowed: - break - possible_entity += c - - if possible_entity and stream and stream[0] == ';': - return possible_entity - return None - - # Handle character entities - while stream and stream[0] not in end_characters: - c = stream.pop(0) - if not ENTITIES_TRIE.has_keys_with_prefix(possible_entity): - break - possible_entity += c - - if possible_entity and stream and stream[0] == ';': - return possible_entity - - return None - - -def next_possible_entity(text): - """Takes a text and generates a list of possible entities - - :arg text: the text to look at - - :returns: generator where each part (except the first) starts with an - "&" - - """ - for i, part in enumerate(AMP_SPLIT_RE.split(text)): - if i == 0: - yield part - elif i % 2 == 0: - yield '&' + part - - -class BleachSanitizerFilter(sanitizer.Filter): +class BleachSanitizerFilter(html5lib_shim.SanitizerFilter): """html5lib Filter that sanitizes text This filter can be used anywhere html5lib filters can be used. @@ -430,14 +249,13 @@ class BleachSanitizerFilter(sanitizer.Filter): """ self.attr_filter = attribute_filter_factory(attributes) - self.strip_disallowed_elements = strip_disallowed_elements self.strip_html_comments = strip_html_comments return super(BleachSanitizerFilter, self).__init__(source, **kwargs) def __iter__(self): - for token in Filter.__iter__(self): + for token in html5lib_shim.Filter.__iter__(self): ret = self.sanitize_token(token) if not ret: @@ -523,12 +341,12 @@ class BleachSanitizerFilter(sanitizer.Filter): # For each possible entity that starts with a "&", we try to extract an # actual entity and re-tokenize accordingly - for part in next_possible_entity(data): + for part in html5lib_shim.next_possible_entity(data): if not part: continue if part.startswith('&'): - entity = match_entity(part) + entity = html5lib_shim.match_entity(part) if entity is not None: new_tokens.append({'type': 'Entity', 'name': entity}) # Length of the entity plus 2--one for & at the beginning @@ -556,7 +374,7 @@ class BleachSanitizerFilter(sanitizer.Filter): # different than the original value. # Convert all character entities in the value - new_value = convert_entities(value) + new_value = html5lib_shim.convert_entities(value) # Nix backtick, space characters, and control characters new_value = re.sub( @@ -645,7 +463,9 @@ class BleachSanitizerFilter(sanitizer.Filter): # Drop href and xlink:href attr for svg elements with non-local IRIs if (None, token['name']) in self.svg_allow_local_href: - if namespaced_name in [(None, 'href'), (namespaces['xlink'], 'href')]: + if namespaced_name in [ + (None, 'href'), (html5lib_shim.namespaces['xlink'], 'href') + ]: if re.search(r'^\s*[^#\s]', val): continue @@ -676,10 +496,10 @@ class BleachSanitizerFilter(sanitizer.Filter): # Figure out namespaced name if the namespace is appropriate # and exists; if the ns isn't in prefixes, then drop it. - if ns is None or ns not in prefixes: + if ns is None or ns not in html5lib_shim.prefixes: namespaced_name = name else: - namespaced_name = '%s:%s' % (prefixes[ns], name) + namespaced_name = '%s:%s' % (html5lib_shim.prefixes[ns], name) attrs.append(' %s="%s"' % ( namespaced_name, @@ -704,7 +524,7 @@ class BleachSanitizerFilter(sanitizer.Filter): def sanitize_css(self, style): """Sanitizes css in style tags""" # Convert entities in the style so that it can be parsed as CSS - style = convert_entities(style) + style = html5lib_shim.convert_entities(style) # Drop any url values before we do anything else style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) @@ -737,59 +557,3 @@ class BleachSanitizerFilter(sanitizer.Filter): clean.append(prop + ': ' + value + ';') return ' '.join(clean) - - -class BleachHTMLSerializer(HTMLSerializer): - """Wraps the HTMLSerializer and undoes & -> &amp; in attributes""" - def escape_base_amp(self, stoken): - """Escapes bare & in HTML attribute values""" - # First, undo what the HTMLSerializer did - stoken = stoken.replace('&amp;', '&') - - # Then, escape any bare & - for part in next_possible_entity(stoken): - if not part: - continue - - if part.startswith('&'): - entity = match_entity(part) - # Only leave entities in that are not ambiguous. If they're - # ambiguous, then we escape the ampersand. - if entity is not None and convert_entity(entity) is not None: - yield '&' + entity + ';' - - # Length of the entity plus 2--one for & at the beginning - # and and one for ; at the end - part = part[len(entity) + 2:] - if part: - yield part - continue - - yield part.replace('&', '&amp;') - - def serialize(self, treewalker, encoding=None): - """Wrap HTMLSerializer.serialize and escape bare & in attributes""" - in_tag = False - after_equals = False - - for stoken in super(BleachHTMLSerializer, self).serialize(treewalker, encoding): - if in_tag: - if stoken == '>': - in_tag = False - - elif after_equals: - if stoken != '"': - for part in self.escape_base_amp(stoken): - yield part - - after_equals = False - continue - - elif stoken == '=': - after_equals = True - - yield stoken - else: - if stoken.startswith('<'): - in_tag = True - yield stoken
move html5lib override code into separate "html5lib shim" module We're increasingly subclassing html5lib classes and overriding behavior and that code is getting tangled with Bleach's behavior code. I'm concerned this is getting worse over time and it'll become increasingly more difficult to upgrade it to support new versions of html5lib. I think it behooves us to centralize and move the html5lib shim code into a separate module with a "defined API" and then change the Bleach code to sit on top of that.
mozilla/bleach
diff --git a/tests/test_clean.py b/tests/test_clean.py index 96f0916..10d9c89 100644 --- a/tests/test_clean.py +++ b/tests/test_clean.py @@ -1,10 +1,10 @@ import os -from bleach._vendor.html5lib.filters.base import Filter import pytest from bleach import clean -from bleach.sanitizer import convert_entities, Cleaner +from bleach.html5lib_shim import Filter +from bleach.sanitizer import Cleaner def test_clean_idempotent(): @@ -744,24 +744,6 @@ def test_invisible_characters(data, expected): assert clean(data) == expected [email protected]('data, expected', [ - # Strings without character entities pass through as is - ('', ''), - ('abc', 'abc'), - - # Handles character entities--both named and numeric - ('&nbsp;', u'\xa0'), - ('&#32;', ' '), - ('&#x20;', ' '), - - # Handles ambiguous ampersand - ('&xx;', '&xx;'), -]) -def test_convert_entities(data, expected): - print(repr(convert_entities(data))) - assert convert_entities(data) == expected - - def test_nonexistent_namespace(): """Verify if the namespace doesn't exist, it doesn't fail with a KeyError diff --git a/tests/test_html5lib_shim.py b/tests/test_html5lib_shim.py new file mode 100644 index 0000000..3bd859a --- /dev/null +++ b/tests/test_html5lib_shim.py @@ -0,0 +1,77 @@ +import pytest + +from bleach import html5lib_shim + + [email protected]('data, expected', [ + # Strings without character entities pass through as is + ('', ''), + ('abc', 'abc'), + + # Handles character entities--both named and numeric + ('&nbsp;', u'\xa0'), + ('&#32;', ' '), + ('&#x20;', ' '), + + # Handles ambiguous ampersand + ('&xx;', '&xx;'), + + # Handles multiple entities in the same string + ('this &amp; that &amp; that', 'this & that & that'), +]) +def test_convert_entities(data, expected): + assert html5lib_shim.convert_entities(data) == expected + + [email protected]('data, expected', [ + ('', ''), + ('text', 'text'), + + # & in Characters is escaped + ('&', '&amp;'), + + # FIXME(willkg): This happens because the BleachHTMLTokenizer is ignoring + # character entities. What it should be doing is creating Entity tokens + # for character entities. + # + # That was too hard at the time I was fixing it, so I fixed it in + # BleachSanitizerFilter. When that gest fixed correctly in the tokenizer, + # then this test cases will get fixed. + ('a &amp; b', 'a &amp;amp; b'), # should be 'a &amp; b' + + # & in HTML attribute values are escaped + ( + '<a href="http://example.com?key=value&key2=value">tag</a>', + '<a href="http://example.com?key=value&amp;key2=value">tag</a>' + ), + # & marking character entities in HTML attribute values aren't escaped + ( + '<a href="http://example.com?key=value&amp;key2=value">tag</a>', + '<a href="http://example.com?key=value&amp;key2=value">tag</a>' + ), + # & marking ambiguous character entities in attribute values are escaped + # (&curren; is a character entity) + ( + '<a href="http://example.com?key=value&current=value">tag</a>', + '<a href="http://example.com?key=value&amp;current=value">tag</a>' + ), + +]) +def test_serializer(data, expected): + # Build a parser, walker, and serializer just like we do in clean() + parser = html5lib_shim.BleachHTMLParser(namespaceHTMLElements=False) + walker = html5lib_shim.getTreeWalker('etree') + serializer = html5lib_shim.BleachHTMLSerializer( + quote_attr_values='always', + omit_optional_tags=False, + escape_lt_in_attrs=True, + resolve_entities=False, + sanitize=False, + alphabetical_attributes=False, + ) + + # Parse, walk, and then serialize the output + dom = parser.parseFragment(data) + serialized = serializer.render(walker(dom)) + + assert serialized == expected
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 4 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "pip install -U pip setuptools>=18.5" ], "python": "3.9", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 babel==2.17.0 backports.tarfile==1.2.0 -e git+https://github.com/mozilla/bleach.git@a06cd773694721f7cace21d09958afdf301f338d#egg=bleach cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 colorama==0.4.6 cryptography==44.0.2 distlib==0.3.9 docutils==0.21.2 exceptiongroup==1.2.2 filelock==3.18.0 flake8==7.2.0 id==1.5.0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jeepney==0.9.0 Jinja2==3.1.6 keyring==25.6.0 markdown-it-py==3.0.0 MarkupSafe==3.0.2 mccabe==0.7.0 mdurl==0.1.2 more-itertools==10.6.0 nh3==0.2.21 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pycodestyle==2.13.0 pycparser==2.22 pyflakes==3.3.1 Pygments==2.19.1 pyproject-api==1.9.0 pytest==8.3.5 pytest-wholenodeid==0.2 readme_renderer==44.0 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 SecretStorage==3.3.3 six==1.17.0 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 tomli==2.2.1 tox==4.25.0 twine==6.1.0 typing_extensions==4.13.0 urllib3==2.3.0 virtualenv==20.29.3 webencodings==0.5.1 zipp==3.21.0
name: bleach channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - babel==2.17.0 - backports-tarfile==1.2.0 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - colorama==0.4.6 - cryptography==44.0.2 - distlib==0.3.9 - docutils==0.21.2 - exceptiongroup==1.2.2 - filelock==3.18.0 - flake8==7.2.0 - id==1.5.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jeepney==0.9.0 - jinja2==3.1.6 - keyring==25.6.0 - markdown-it-py==3.0.0 - markupsafe==3.0.2 - mccabe==0.7.0 - mdurl==0.1.2 - more-itertools==10.6.0 - nh3==0.2.21 - packaging==24.2 - pip==25.0.1 - platformdirs==4.3.7 - pluggy==1.5.0 - pycodestyle==2.13.0 - pycparser==2.22 - pyflakes==3.3.1 - pygments==2.19.1 - pyproject-api==1.9.0 - pytest==8.3.5 - pytest-wholenodeid==0.2 - readme-renderer==44.0 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - secretstorage==3.3.3 - setuptools==78.1.0 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - tomli==2.2.1 - tox==4.25.0 - twine==6.1.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - virtualenv==20.29.3 - webencodings==0.5.1 - zipp==3.21.0 prefix: /opt/conda/envs/bleach
[ "tests/test_clean.py::test_clean_idempotent", "tests/test_clean.py::test_only_text_is_cleaned", "tests/test_clean.py::test_empty", "tests/test_clean.py::test_content_has_no_html", "tests/test_clean.py::test_content_has_allowed_html[an", "tests/test_clean.py::test_content_has_allowed_html[another", "tests/test_clean.py::test_html_is_lowercased", "tests/test_clean.py::test_invalid_uri_does_not_raise_error", "tests/test_clean.py::test_comments[<!--", "tests/test_clean.py::test_comments[<!--open", "tests/test_clean.py::test_comments[<!--comment-->text-True-text]", "tests/test_clean.py::test_comments[<!--comment-->text-False-<!--comment-->text]", "tests/test_clean.py::test_comments[text<!--", "tests/test_clean.py::test_comments[text<!--comment-->-True-text]", "tests/test_clean.py::test_comments[text<!--comment-->-False-text<!--comment-->]", "tests/test_clean.py::test_disallowed_tags[<img", "tests/test_clean.py::test_disallowed_tags[a", "tests/test_clean.py::test_invalid_char_in_tag", "tests/test_clean.py::test_unclosed_tag", "tests/test_clean.py::test_nested_script_tag", "tests/test_clean.py::test_bare_entities_get_escaped_correctly[an", "tests/test_clean.py::test_bare_entities_get_escaped_correctly[tag", "tests/test_clean.py::test_character_entities_handling[&amp;-&amp;]", "tests/test_clean.py::test_character_entities_handling[&nbsp;-&nbsp;]", "tests/test_clean.py::test_character_entities_handling[&nbsp;", "tests/test_clean.py::test_character_entities_handling[&lt;em&gt;strong&lt;/em&gt;-&lt;em&gt;strong&lt;/em&gt;]", "tests/test_clean.py::test_character_entities_handling[&amp;is", "tests/test_clean.py::test_character_entities_handling[cool", "tests/test_clean.py::test_character_entities_handling[&&amp;", "tests/test_clean.py::test_character_entities_handling[&amp;", "tests/test_clean.py::test_character_entities_handling[this", "tests/test_clean.py::test_character_entities_handling[http://example.com?active=true&current=true-http://example.com?active=true&amp;current=true]", "tests/test_clean.py::test_character_entities_handling[<a", "tests/test_clean.py::test_character_entities_handling[&xx;-&xx;]", "tests/test_clean.py::test_character_entities_handling[&#39;-&#39;]", "tests/test_clean.py::test_character_entities_handling[&#34;-&#34;]", "tests/test_clean.py::test_character_entities_handling[&#123;-&#123;]", "tests/test_clean.py::test_character_entities_handling[&#x0007b;-&#x0007b;]", "tests/test_clean.py::test_character_entities_handling[&#x0007B;-&#x0007B;]", "tests/test_clean.py::test_character_entities_handling[&#-&amp;#]", "tests/test_clean.py::test_character_entities_handling[&#<-&amp;#&lt;]", "tests/test_clean.py::test_character_entities_handling[&#39;&#34;-&#39;&#34;]", "tests/test_clean.py::test_stripping_tags[a", "tests/test_clean.py::test_stripping_tags[<p><a", "tests/test_clean.py::test_stripping_tags[<p><span>multiply", "tests/test_clean.py::test_stripping_tags_is_safe[<scri<script>pt>alert(1)</scr</script>ipt>-pt&gt;alert(1)ipt&gt;]", "tests/test_clean.py::test_stripping_tags_is_safe[<scri<scri<script>pt>pt>alert(1)</script>-pt&gt;pt&gt;alert(1)]", "tests/test_clean.py::test_allowed_styles", "tests/test_clean.py::test_href_with_wrong_tag", "tests/test_clean.py::test_disallowed_attr", "tests/test_clean.py::test_unquoted_attr_values_are_quoted", "tests/test_clean.py::test_unquoted_event_handler_attr_value", "tests/test_clean.py::test_invalid_filter_attr", "tests/test_clean.py::test_poster_attribute", "tests/test_clean.py::test_attributes_callable", "tests/test_clean.py::test_attributes_wildcard", "tests/test_clean.py::test_attributes_wildcard_callable", "tests/test_clean.py::test_attributes_tag_callable", "tests/test_clean.py::test_attributes_tag_list", "tests/test_clean.py::test_attributes_list", "tests/test_clean.py::test_svg_attr_val_allows_ref", "tests/test_clean.py::test_svg_allow_local_href[<svg><pattern", "tests/test_clean.py::test_svg_allow_local_href_nonlocal[<svg><pattern", "tests/test_clean.py::test_weird_strings", "tests/test_clean.py::test_invisible_characters[1\\x0723-1?23]", "tests/test_clean.py::test_invisible_characters[1\\x0823-1?23]", "tests/test_clean.py::test_invisible_characters[1\\x0b23-1?23]", "tests/test_clean.py::test_invisible_characters[1\\x0c23-1?23]", "tests/test_clean.py::test_invisible_characters[import", "tests/test_clean.py::test_nonexistent_namespace", "tests/test_clean.py::test_regressions[/bleach/tests/data/1.test->\"><script>alert(\"XSS\")</script>&\\n--\\n&gt;\"&gt;&lt;script&gt;alert(\"XSS\")&lt;/script&gt;&amp;\\n]", "tests/test_clean.py::test_regressions[/bleach/tests/data/2.test-\"><STYLE>@import\"javascript:alert('XSS')\";</STYLE>\\n--\\n\"&gt;&lt;style&gt;@import\"javascript:alert('XSS')\";&lt;/style&gt;\\n]", "tests/test_clean.py::test_regressions[/bleach/tests/data/3.test->\"'><img%20src%3D%26%23x6a;%26%23x61;%26%23x76;%26%23x61;%26%23x73;%26%23x63;%26%23x72;%26%23x69;%26%23x70;%26%23x74;%26%23x3a;alert(%26quot;%26%23x20;XSS%26%23x20;Test%26%23x20;Successful%26quot;)>\\n--\\n&gt;\"'&gt;&lt;img%20src%3d%26%23x6a;%26%23x61;%26%23x76;%26%23x61;%26%23x73;%26%23x63;%26%23x72;%26%23x69;%26%23x70;%26%23x74;%26%23x3a;alert(%26quot;%26%23x20;xss%26%23x20;test%26%23x20;successful%26quot;)&gt;&lt;/img%20src%3d%26%23x6a;%26%23x61;%26%23x76;%26%23x61;%26%23x73;%26%23x63;%26%23x72;%26%23x69;%26%23x70;%26%23x74;%26%23x3a;alert(%26quot;%26%23x20;xss%26%23x20;test%26%23x20;successful%26quot;)&gt;\\n]", "tests/test_clean.py::test_regressions[/bleach/tests/data/4.test-<scr<script></script>ipt", "tests/test_clean.py::test_regressions[/bleach/tests/data/5.test->%22%27><img%20src%3d%22javascript:alert(%27%20XSS%27)%22>\\n--\\n&gt;%22%27&gt;&lt;img%20src%3d%22javascript:alert(%27%20xss%27)%22&gt;&lt;/img%20src%3d%22javascript:alert(%27%20xss%27)%22&gt;\\n]", "tests/test_clean.py::test_regressions[/bleach/tests/data/6.test-<a", "tests/test_clean.py::test_regressions[/bleach/tests/data/7.test-\">\\n--\\n\"&gt;\\n]", "tests/test_clean.py::test_regressions[/bleach/tests/data/8.test->\"\\n--\\n&gt;\"\\n]", "tests/test_clean.py::test_regressions[/bleach/tests/data/9.test-'';!--\"<XSS>=&{()}\\n--\\n'';!--\"&lt;xss&gt;=&amp;{()}&lt;/xss&gt;\\n]", "tests/test_clean.py::test_regressions[/bleach/tests/data/10.test-<IMG", "tests/test_clean.py::test_regressions[/bleach/tests/data/11.test-<IMG", "tests/test_clean.py::test_regressions[/bleach/tests/data/12.test-<IMG", "tests/test_clean.py::test_regressions[/bleach/tests/data/13.test-<IMG", "tests/test_clean.py::test_regressions[/bleach/tests/data/14.test-<IMGSRC=&#106;&#97;&#118;&#97;&<WBR>#115;&#99;&#114;&#105;&#112;&<WBR>#116;&#58;&#97;\\n--\\n&lt;imgsrc=&amp;#106;&amp;#97;&amp;#118;&amp;#97;&amp;&lt;wbr&gt;#115;&#99;&#114;&#105;&#112;&amp;&lt;wbr&gt;&lt;/wbr&gt;#116;&#58;&#97;&lt;/imgsrc=&amp;#106;&amp;#97;&amp;#118;&amp;#97;&amp;&lt;wbr&gt;\\n]", "tests/test_clean.py::test_regressions[/bleach/tests/data/15.test-&#108;&#101;&<WBR>#114;&#116;&#40;&#39;&#88;&#83<WBR>;&#83;&#39;&#41>\\n--\\n&#108;&#101;&amp;&lt;wbr&gt;&lt;/wbr&gt;#114;&#116;&#40;&#39;&#88;&amp;#83&lt;wbr&gt;&lt;/wbr&gt;;&#83;&#39;&amp;#41&gt;\\n]", "tests/test_clean.py::test_regressions[/bleach/tests/data/16.test-<IMGSRC=&#0000106&#0000097&<WBR>#0000118&#0000097&#0000115&<WBR>#0000099&#0000114&#0000105&<WBR>#0000112&#0000116&#0000058&<WBR>#0000097&#0000108&#0000101&<WBR>#0000114&#0000116&#0000040&<WBR>#0000039&#0000088&#0000083&<WBR>#0000083&#0000039&#0000041>\\n--\\n&lt;imgsrc=&amp;#0000106&amp;#0000097&amp;&lt;wbr&gt;#0000118&amp;#0000097&amp;#0000115&amp;&lt;wbr&gt;&lt;/wbr&gt;#0000099&amp;#0000114&amp;#0000105&amp;&lt;wbr&gt;&lt;/wbr&gt;#0000112&amp;#0000116&amp;#0000058&amp;&lt;wbr&gt;&lt;/wbr&gt;#0000097&amp;#0000108&amp;#0000101&amp;&lt;wbr&gt;&lt;/wbr&gt;#0000114&amp;#0000116&amp;#0000040&amp;&lt;wbr&gt;&lt;/wbr&gt;#0000039&amp;#0000088&amp;#0000083&amp;&lt;wbr&gt;&lt;/wbr&gt;#0000083&amp;#0000039&amp;#0000041&gt;&lt;/imgsrc=&amp;#0000106&amp;#0000097&amp;&lt;wbr&gt;\\n]", "tests/test_clean.py::test_regressions[/bleach/tests/data/17.test-<IMGSRC=&#x6A&#x61&#x76&#x61&#x73&<WBR>#x63&#x72&#x69&#x70&#x74&#x3A&<WBR>#x61&#x6C&#x65&#x72&#x74&#x28&<WBR>#x27&#x58&#x53&#x53&#x27&#x29>\\n--\\n&lt;imgsrc=&amp;#x6a&amp;#x61&amp;#x76&amp;#x61&amp;#x73&amp;&lt;wbr&gt;#x63&amp;#x72&amp;#x69&amp;#x70&amp;#x74&amp;#x3A&amp;&lt;wbr&gt;&lt;/wbr&gt;#x61&amp;#x6C&amp;#x65&amp;#x72&amp;#x74&amp;#x28&amp;&lt;wbr&gt;&lt;/wbr&gt;#x27&amp;#x58&amp;#x53&amp;#x53&amp;#x27&amp;#x29&gt;&lt;/imgsrc=&amp;#x6a&amp;#x61&amp;#x76&amp;#x61&amp;#x73&amp;&lt;wbr&gt;\\n]", "tests/test_clean.py::test_regressions[/bleach/tests/data/18.test-<IMG", "tests/test_clean.py::test_regressions[/bleach/tests/data/19.test-<IMG", "tests/test_clean.py::test_regressions[/bleach/tests/data/20.test-<IMG", "tests/test_clean.py::TestCleaner::test_basics", "tests/test_clean.py::TestCleaner::test_filters", "tests/test_html5lib_shim.py::test_convert_entities[-]", "tests/test_html5lib_shim.py::test_convert_entities[abc-abc]", "tests/test_html5lib_shim.py::test_convert_entities[&nbsp;-\\xa0]", "tests/test_html5lib_shim.py::test_convert_entities[&#32;-", "tests/test_html5lib_shim.py::test_convert_entities[&#x20;-", "tests/test_html5lib_shim.py::test_convert_entities[&xx;-&xx;]", "tests/test_html5lib_shim.py::test_convert_entities[this", "tests/test_html5lib_shim.py::test_serializer[-]", "tests/test_html5lib_shim.py::test_serializer[text-text]", "tests/test_html5lib_shim.py::test_serializer[&-&amp;]", "tests/test_html5lib_shim.py::test_serializer[a", "tests/test_html5lib_shim.py::test_serializer[<a" ]
[ "tests/test_clean.py::test_uri_value_allowed_protocols[<a" ]
[]
[]
Apache License 2.0
2,985
[ "bleach/html5lib_shim.py", "bleach/__init__.py", "bleach/sanitizer.py", "CHANGES", "bleach/linkifier.py" ]
[ "bleach/html5lib_shim.py", "bleach/__init__.py", "bleach/sanitizer.py", "CHANGES", "bleach/linkifier.py" ]
pypa__wheel-250
f3855494f20724f1ae844631d08e34367e977661
2018-08-28 20:35:56
e774538e0be3a5ca79ca31b1ae01c6672480bef6
codecov[bot]: # [Codecov](https://codecov.io/gh/pypa/wheel/pull/250?src=pr&el=h1) Report > Merging [#250](https://codecov.io/gh/pypa/wheel/pull/250?src=pr&el=desc) into [master](https://codecov.io/gh/pypa/wheel/commit/f3855494f20724f1ae844631d08e34367e977661?src=pr&el=desc) will **decrease** coverage by `4.17%`. > The diff coverage is `100%`. [![Impacted file tree graph](https://codecov.io/gh/pypa/wheel/pull/250/graphs/tree.svg?width=650&token=ey5B5hA7sW&height=150&src=pr)](https://codecov.io/gh/pypa/wheel/pull/250?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #250 +/- ## ========================================== - Coverage 59.31% 55.13% -4.18% ========================================== Files 12 12 Lines 816 818 +2 ========================================== - Hits 484 451 -33 - Misses 332 367 +35 ``` | [Impacted Files](https://codecov.io/gh/pypa/wheel/pull/250?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [wheel/metadata.py](https://codecov.io/gh/pypa/wheel/pull/250/diff?src=pr&el=tree#diff-d2hlZWwvbWV0YWRhdGEucHk=) | `81.81% <100%> (-15.52%)` | :arrow_down: | | [wheel/pkginfo.py](https://codecov.io/gh/pypa/wheel/pull/250/diff?src=pr&el=tree#diff-d2hlZWwvcGtnaW5mby5weQ==) | `48.27% <0%> (-37.94%)` | :arrow_down: | | [wheel/util.py](https://codecov.io/gh/pypa/wheel/pull/250/diff?src=pr&el=tree#diff-d2hlZWwvdXRpbC5weQ==) | `76.92% <0%> (-23.08%)` | :arrow_down: | | [wheel/pep425tags.py](https://codecov.io/gh/pypa/wheel/pull/250/diff?src=pr&el=tree#diff-d2hlZWwvcGVwNDI1dGFncy5weQ==) | `28.44% <0%> (-3.67%)` | :arrow_down: | | [wheel/wheelfile.py](https://codecov.io/gh/pypa/wheel/pull/250/diff?src=pr&el=tree#diff-d2hlZWwvd2hlZWxmaWxlLnB5) | `98.09% <0%> (-1.91%)` | :arrow_down: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/pypa/wheel/pull/250?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/pypa/wheel/pull/250?src=pr&el=footer). Last update [f385549...5b576de](https://codecov.io/gh/pypa/wheel/pull/250?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). cam72cam: I can confirm that this works in relation to https://github.com/pypa/pip/issues/5768 pradyunsg: The PEP changes are accepted, so merging this should be acceptable now IMO. /cc @pypa/wheel-committers
diff --git a/wheel/metadata.py b/wheel/metadata.py index 4fa17cd..c6a3736 100644 --- a/wheel/metadata.py +++ b/wheel/metadata.py @@ -16,7 +16,10 @@ EXTRA_RE = re.compile("""^(?P<package>.*?)(;\s*(?P<condition>.*?)(extra == '(?P< def requires_to_requires_dist(requirement): - """Compose the version predicates for requirement in PEP 345 fashion.""" + """Return the version specifier for a requirement in PEP 345/566 fashion.""" + if requirement.url: + return " @ " + requirement.url + requires_dist = [] for op, ver in requirement.specs: requires_dist.append(op + ver)
How should a PEP 508 requirement with an URL specifier be handled? Starting with pip 10.0, requirements with [URL specifiers](https://www.python.org/dev/peps/pep-0508/#examples) are now supported (as replacement for dependency links). With the following sample project' `setup.py`: ```python from setuptools import setup setup(name='projecta', version='42', install_requires=''' lazyImport@git+https://gitlab.com/KOLANICH1/lazyImport.py.git#egg=lazyImport-dev ''') ``` Installing from source work, but looking at the generated wheel metadata: ```shell > python setup.py -q bdist_wheel && unzip -p dist/projecta-42-py3-none-any.whl '*/METADATA' Metadata-Version: 2.1 Name: projecta Version: 42 Summary: Description! Home-page: UNKNOWN Author: UNKNOWN Author-email: UNKNOWN License: UNKNOWN Platform: UNKNOWN Requires-Dist: lazyImport UNKNOWN ``` It works with the following patch: ```diff tests/test_metadata.py | 3 +++ wheel/metadata.py | 2 ++ 2 files changed, 5 insertions(+) diff --git i/tests/test_metadata.py w/tests/test_metadata.py index 3421430..2390e98 100644 --- i/tests/test_metadata.py +++ w/tests/test_metadata.py @@ -9,6 +9,7 @@ def test_pkginfo_to_metadata(tmpdir): ('Provides-Extra', 'test'), ('Provides-Extra', 'signatures'), ('Provides-Extra', 'faster-signatures'), + ('Requires-Dist', "pip @ https://github.com/pypa/pip/archive/1.3.1.zip"), ('Requires-Dist', "ed25519ll; extra == 'faster-signatures'"), ('Requires-Dist', "keyring; extra == 'signatures'"), ('Requires-Dist', "keyrings.alt; extra == 'signatures'"), @@ -28,6 +29,8 @@ def test_pkginfo_to_metadata(tmpdir): egg_info_dir = tmpdir.ensure_dir('test.egg-info') egg_info_dir.join('requires.txt').write("""\ +pip@ https://github.com/pypa/pip/archive/1.3.1.zip + [faster-signatures] ed25519ll diff --git i/wheel/metadata.py w/wheel/metadata.py index 4fa17cd..5b03e3e 100644 --- i/wheel/metadata.py +++ w/wheel/metadata.py @@ -17,6 +17,8 @@ def requires_to_requires_dist(requirement): """Compose the version predicates for requirement in PEP 345 fashion.""" + if requirement.url: + return " @ " + requirement.url requires_dist = [] for op, ver in requirement.specs: requires_dist.append(op + ver) ``` ```shell > python setup.py -q bdist_wheel && unzip -p dist/projecta-42-py3-none-any.whl '*/METADATA' Metadata-Version: 2.1 Name: projecta Version: 42 Summary: Description! Home-page: UNKNOWN Author: UNKNOWN Author-email: UNKNOWN License: UNKNOWN Platform: UNKNOWN Requires-Dist: lazyImport @ git+https://gitlab.com/KOLANICH1/lazyImport.py.git UNKNOWN ``` Unfortunately, this is technically not valid [PEP 345](https://www.python.org/dev/peps/pep-0345/#requires-dist-multiple-use) metadata.
pypa/wheel
diff --git a/tests/test_metadata.py b/tests/test_metadata.py index 3421430..78fe40d 100644 --- a/tests/test_metadata.py +++ b/tests/test_metadata.py @@ -9,6 +9,7 @@ def test_pkginfo_to_metadata(tmpdir): ('Provides-Extra', 'test'), ('Provides-Extra', 'signatures'), ('Provides-Extra', 'faster-signatures'), + ('Requires-Dist', "pip @ https://github.com/pypa/pip/archive/1.3.1.zip"), ('Requires-Dist', "ed25519ll; extra == 'faster-signatures'"), ('Requires-Dist', "keyring; extra == 'signatures'"), ('Requires-Dist', "keyrings.alt; extra == 'signatures'"), @@ -28,6 +29,8 @@ Provides-Extra: faster-signatures""") egg_info_dir = tmpdir.ensure_dir('test.egg-info') egg_info_dir.join('requires.txt').write("""\ +pip@https://github.com/pypa/pip/archive/1.3.1.zip + [faster-signatures] ed25519ll
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.31
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work pytest-cov==6.0.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: wheel channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - pytest-cov==6.0.0 prefix: /opt/conda/envs/wheel
[ "tests/test_metadata.py::test_pkginfo_to_metadata" ]
[]
[]
[]
MIT License
2,986
[ "wheel/metadata.py" ]
[ "wheel/metadata.py" ]
fatiando__pooch-23
ea607a2f9ff4c8e3dc03d7eb350555dd544709d4
2018-08-29 03:45:40
1ff4910849be2db6dec33c5d3c3828f2e9addb74
leouieda: Waiting on conda-forge/pytest-feedstock#55 to fix the pytest issues on Mac.
diff --git a/pooch/core.py b/pooch/core.py index 7093b7c..1967913 100644 --- a/pooch/core.py +++ b/pooch/core.py @@ -2,9 +2,10 @@ Functions to download, verify, and update a sample dataset. """ import os +import sys from pathlib import Path import shutil -from tempfile import NamedTemporaryFile +import tempfile from warnings import warn import requests @@ -12,6 +13,11 @@ import requests from .utils import file_hash, check_version +# PermissionError was introduced in Python 3.3. This can be deleted when dropping 2.7 +if sys.version_info[0] < 3: + PermissionError = OSError # pylint: disable=redefined-builtin,invalid-name + + def create(path, base_url, version, version_dev, env=None, registry=None): """ Create a new :class:`~pooch.Pooch` with sensible defaults to fetch data files. @@ -27,8 +33,6 @@ def create(path, base_url, version, version_dev, env=None, registry=None): ``https://github.com/fatiando/pooch/raw/v0.1/data``). If the version string contains ``+XX.XXXXX``, it will be interpreted as a development version. - If the local storage path doesn't exit, it will be created. - Parameters ---------- path : str, PathLike, list or tuple @@ -73,9 +77,6 @@ def create(path, base_url, version, version_dev, env=None, registry=None): ... registry={"data.txt": "9081wo2eb2gc0u..."}) >>> print(pup.path.parts) # The path is a pathlib.Path ('myproject', 'v0.1') - >>> # We'll create the directory if it doesn't exist yet. - >>> pup.path.exists() - True >>> print(pup.base_url) http://some.link.com/v0.1/ >>> print(pup.registry) @@ -89,8 +90,6 @@ def create(path, base_url, version, version_dev, env=None, registry=None): ... version_dev="master") >>> print(pup.path.parts) ('myproject', 'master') - >>> pup.path.exists() - True >>> print(pup.base_url) http://some.link.com/master/ @@ -103,8 +102,6 @@ def create(path, base_url, version, version_dev, env=None, registry=None): ... version_dev="master") >>> print(pup.path.parts) ('myproject', 'cache', 'data', 'v0.1') - >>> pup.path.exists() - True The user can overwrite the storage path by setting an environment variable: @@ -127,10 +124,6 @@ def create(path, base_url, version, version_dev, env=None, registry=None): >>> print(pup.path.parts) ('myproject', 'from_env', 'v0.1') - Clean up the files we created: - - >>> import shutil; shutil.rmtree("myproject") - """ version = check_version(version, fallback=version_dev) if isinstance(path, (list, tuple)): @@ -138,9 +131,25 @@ def create(path, base_url, version, version_dev, env=None, registry=None): if env is not None and env in os.environ and os.environ[env]: path = os.environ[env] versioned_path = os.path.join(os.path.expanduser(str(path)), version) - # Create the directory if it doesn't already exist - if not os.path.exists(versioned_path): - os.makedirs(versioned_path) + # Check that the data directory is writable + try: + if not os.path.exists(versioned_path): + os.makedirs(versioned_path) + else: + tempfile.NamedTemporaryFile(dir=versioned_path) + except PermissionError: + message = ( + "Cannot write to data cache '{}'. " + "Will not be able to download remote data files.".format(versioned_path) + ) + if env is not None: + message = ( + message + + "Use environment variable '{}' to specify another directory.".format( + env + ) + ) + warn(message) if registry is None: registry = dict() pup = Pooch( @@ -185,10 +194,10 @@ class Pooch: """ Get the absolute path to a file in the local storage. - If it's not in the local storage, it will be downloaded. If the hash of file in - local storage doesn't match the one in the registry, will download a new copy of - the file. This is considered a sign that the file was updated in the remote - storage. If the hash of the downloaded file doesn't match the one in the + If it's not in the local storage, it will be downloaded. If the hash of the file + in local storage doesn't match the one in the registry, will download a new copy + of the file. This is considered a sign that the file was updated in the remote + storage. If the hash of the downloaded file still doesn't match the one in the registry, will raise an exception to warn of possible file corruption. Parameters @@ -206,15 +215,27 @@ class Pooch: """ if fname not in self.registry: raise ValueError("File '{}' is not in the registry.".format(fname)) + # Create the local data directory if it doesn't already exist + if not self.abspath.exists(): + os.makedirs(str(self.abspath)) full_path = self.abspath / fname in_storage = full_path.exists() - update = in_storage and file_hash(str(full_path)) != self.registry[fname] - download = not in_storage - if update or download: - self._download_file(fname, update) + if not in_storage: + action = "Downloading" + elif in_storage and file_hash(str(full_path)) != self.registry[fname]: + action = "Updating" + else: + action = "Nothing" + if action in ("Updating", "Downloading"): + warn( + "{} data file '{}' from remote data store '{}' to '{}'.".format( + action, fname, self.base_url, str(self.path) + ) + ) + self._download_file(fname) return str(full_path) - def _download_file(self, fname, update): + def _download_file(self, fname): """ Download a file from the remote data storage to the local storage. @@ -223,8 +244,6 @@ class Pooch: fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. - update : bool - True if the file already exists in the storage but needs an update. Raises ------ @@ -232,22 +251,13 @@ class Pooch: If the hash of the downloaded file doesn't match the hash in the registry. """ - destination = Path(self.abspath, fname) + destination = self.abspath / fname source = "".join([self.base_url, fname]) - if update: - action = "Updating" - else: - action = "Downloading" - warn( - "{} data file '{}' from remote data store '{}' to '{}'.".format( - action, fname, self.base_url, str(self.path) - ) - ) - response = requests.get(source, stream=True) - response.raise_for_status() # Stream the file to a temporary so that we can safely check its hash before # overwriting the original - with NamedTemporaryFile(delete=False) as fout: + with tempfile.NamedTemporaryFile(delete=False) as fout: + response = requests.get(source, stream=True) + response.raise_for_status() for chunk in response.iter_content(chunk_size=1024): if chunk: fout.write(chunk)
pooch.create should test for write access to directories and have a failback for failures See realworld implementation problem with Unidata/MetPy#933 `pooch.create` can fail to write to whatever cache directory it is presented with. A failback mechanism should exist, like what does with `matplotlib` when it attempts to find writable locations. There is also a [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html) for this. ```python POOCH = pooch.create( path=pooch.os_cache('metpy'), base_url='https://github.com/Unidata/MetPy/raw/{version}/staticdata/', version='v' + __version__, version_dev='master', env='TEST_DATA_DIR') ``` ## Full error message ``` File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/pooch/core.py", line 143, in create os.makedirs(versioned_path) File "/opt/miniconda3/envs/prod/lib/python3.6/os.py", line 210, in makedirs makedirs(head, mode, exist_ok) File "/opt/miniconda3/envs/prod/lib/python3.6/os.py", line 210, in makedirs makedirs(head, mode, exist_ok) File "/opt/miniconda3/envs/prod/lib/python3.6/os.py", line 220, in makedirs mkdir(name, mode) PermissionError: [Errno 13] Permission denied: '/usr/share/httpd/.cache' ``` thank you
fatiando/pooch
diff --git a/pooch/tests/test_core.py b/pooch/tests/test_core.py index 55a8f42..28d6bb0 100644 --- a/pooch/tests/test_core.py +++ b/pooch/tests/test_core.py @@ -2,7 +2,9 @@ Test the core class and factory function. """ import os +import sys from pathlib import Path +import tempfile try: from tempfile import TemporaryDirectory @@ -12,11 +14,16 @@ import warnings import pytest -from .. import Pooch +from .. import Pooch, create from ..utils import file_hash from .utils import pooch_test_url, pooch_test_registry, check_tiny_data +# PermissionError was introduced in Python 3.3. This can be deleted when dropping 2.7 +if sys.version_info[0] < 3: + PermissionError = OSError # pylint: disable=redefined-builtin,invalid-name + + DATA_DIR = str(Path(__file__).parent / "data") REGISTRY = pooch_test_registry() BASEURL = pooch_test_url() @@ -104,3 +111,66 @@ def test_pooch_load_registry_invalid_line(): pup = Pooch(path="", base_url="", registry={}) with pytest.raises(ValueError): pup.load_registry(os.path.join(DATA_DIR, "registry-invalid.txt")) + + +def test_create_makedirs_permissionerror(monkeypatch): + "Should warn the user when can't create the local data dir" + + def mockmakedirs(path): # pylint: disable=unused-argument + "Raise an exception to mimic permission issues" + raise PermissionError("Fake error") + + data_cache = os.path.join(os.curdir, "test_permission") + assert not os.path.exists(data_cache) + + monkeypatch.setattr(os, "makedirs", mockmakedirs) + + with warnings.catch_warnings(record=True) as warn: + pup = create( + path=data_cache, + base_url="", + version="1.0", + version_dev="master", + env="SOME_VARIABLE", + registry={"afile.txt": "ahash"}, + ) + assert len(warn) == 1 + assert issubclass(warn[-1].category, UserWarning) + assert str(warn[-1].message).startswith("Cannot write to data cache") + assert "'SOME_VARIABLE'" in str(warn[-1].message) + + with pytest.raises(PermissionError): + pup.fetch("afile.txt") + + +def test_create_newfile_permissionerror(monkeypatch): + "Should warn the user when can't write to the local data dir" + # This is a separate function because there should be a warning if the data dir + # already exists but we can't write to it. + + def mocktempfile(**kwargs): # pylint: disable=unused-argument + "Raise an exception to mimic permission issues" + raise PermissionError("Fake error") + + with TemporaryDirectory() as data_cache: + os.makedirs(os.path.join(data_cache, "1.0")) + assert os.path.exists(data_cache) + + monkeypatch.setattr(tempfile, "NamedTemporaryFile", mocktempfile) + + with warnings.catch_warnings(record=True) as warn: + pup = create( + path=data_cache, + base_url="", + version="1.0", + version_dev="master", + env="SOME_VARIABLE", + registry={"afile.txt": "ahash"}, + ) + assert len(warn) == 1 + assert issubclass(warn[-1].category, UserWarning) + assert str(warn[-1].message).startswith("Cannot write to data cache") + assert "'SOME_VARIABLE'" in str(warn[-1].message) + + with pytest.raises(PermissionError): + pup.fetch("afile.txt") diff --git a/pooch/tests/test_integration.py b/pooch/tests/test_integration.py index aeb4ada..d7dd16f 100644 --- a/pooch/tests/test_integration.py +++ b/pooch/tests/test_integration.py @@ -26,15 +26,16 @@ def pup(): ) # The str conversion is needed in Python 3.5 doggo.load_registry(str(Path(os.path.dirname(__file__), "data", "registry.txt"))) + if os.path.exists(str(doggo.abspath)): + shutil.rmtree(str(doggo.abspath)) yield doggo shutil.rmtree(str(doggo.abspath)) def test_fetch(pup): "Fetch a data file from the local storage" - # Make sure the storage exists and is empty to begin - assert pup.abspath.exists() - assert not list(pup.abspath.iterdir()) + # Make sure the storage has been cleaned up before running the tests + assert not pup.abspath.exists() for target in ["tiny-data.txt", "subdir/tiny-data.txt"]: with warnings.catch_warnings(record=True) as warn: fname = pup.fetch(target)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 astroid==2.11.7 attrs==22.2.0 Babel==2.11.0 bleach==4.1.0 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 colorama==0.4.5 coverage==6.2 cryptography==40.0.2 dill==0.3.4 docutils==0.18.1 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 isort==5.10.1 jeepney==0.7.1 Jinja2==3.0.3 keyring==23.4.1 lazy-object-proxy==1.7.1 MarkupSafe==2.0.1 mccabe==0.7.0 numpydoc==1.1.0 packaging==21.3 pkginfo==1.10.0 platformdirs==2.4.0 pluggy==1.0.0 -e git+https://github.com/fatiando/pooch.git@ea607a2f9ff4c8e3dc03d7eb350555dd544709d4#egg=pooch py==1.11.0 pycparser==2.21 Pygments==2.14.0 pylint==2.13.9 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytz==2025.2 readme-renderer==34.0 requests==2.27.1 requests-toolbelt==1.0.0 rfc3986==1.5.0 SecretStorage==3.3.3 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 tqdm==4.64.1 twine==3.8.0 typed-ast==1.5.5 typing_extensions==4.1.1 urllib3==1.26.20 webencodings==0.5.1 wrapt==1.16.0 zipp==3.6.0
name: pooch channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - astroid==2.11.7 - attrs==22.2.0 - babel==2.11.0 - bleach==4.1.0 - cffi==1.15.1 - charset-normalizer==2.0.12 - colorama==0.4.5 - coverage==6.2 - cryptography==40.0.2 - dill==0.3.4 - docutils==0.18.1 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - isort==5.10.1 - jeepney==0.7.1 - jinja2==3.0.3 - keyring==23.4.1 - lazy-object-proxy==1.7.1 - markupsafe==2.0.1 - mccabe==0.7.0 - numpydoc==1.1.0 - packaging==21.3 - pkginfo==1.10.0 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pycparser==2.21 - pygments==2.14.0 - pylint==2.13.9 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytz==2025.2 - readme-renderer==34.0 - requests==2.27.1 - requests-toolbelt==1.0.0 - rfc3986==1.5.0 - secretstorage==3.3.3 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - tqdm==4.64.1 - twine==3.8.0 - typed-ast==1.5.5 - typing-extensions==4.1.1 - urllib3==1.26.20 - webencodings==0.5.1 - wrapt==1.16.0 - zipp==3.6.0 prefix: /opt/conda/envs/pooch
[ "pooch/tests/test_core.py::test_create_makedirs_permissionerror", "pooch/tests/test_core.py::test_create_newfile_permissionerror" ]
[ "pooch/tests/test_core.py::test_pooch_update", "pooch/tests/test_core.py::test_pooch_corrupted", "pooch/tests/test_integration.py::test_fetch" ]
[ "pooch/tests/test_core.py::test_pooch_local", "pooch/tests/test_core.py::test_pooch_file_not_in_registry", "pooch/tests/test_core.py::test_pooch_load_registry", "pooch/tests/test_core.py::test_pooch_load_registry_invalid_line" ]
[]
BSD License
2,987
[ "pooch/core.py" ]
[ "pooch/core.py" ]
pypr__automan-2
3043553f4e39b4def791e353b015d815bc35357c
2018-08-29 04:57:25
3043553f4e39b4def791e353b015d815bc35357c
diff --git a/.gitignore b/.gitignore index 28016a7..9e684bc 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,8 @@ build/ dist/ *.egg-info/ +*.pytest_cache/ +examples/tutorial/config.json +examples/tutorial/outputs +examples/tutorial/manuscript +examples/tutorial/.automan diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..5ce40e1 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,5 @@ +include MANIFEST.in *.py *.rst *.txt *.yml +recursive-include docs *.* +recursive-include examples *.py +recursive-exclude examples/tutorial/.automan *.* +recursive-exclude docs/build *.* \ No newline at end of file diff --git a/automan/automation.py b/automan/automation.py index 8374ddc..4ed9cf4 100644 --- a/automan/automation.py +++ b/automan/automation.py @@ -59,8 +59,7 @@ class TaskRunner(object): def __init__(self, tasks, scheduler): """Constructor. - Parameters - ---------- + **Parameters** tasks: iterable of `Task` instances. scheduler: `automan.jobs.Scheduler` instance @@ -176,8 +175,7 @@ class CommandTask(Task): def __init__(self, command, output_dir, job_info=None): """Constructor - Parameters - ---------- + **Parameters** command: str or list: command to run $output_dir is substituted. output_dir: str : path of output directory. @@ -215,6 +213,9 @@ class CommandTask(Task): return self._copy_output_and_check_status() def run(self, scheduler): + # Remove the error status file if it exists and we are going to run. + if os.path.exists(self._error_status_file): + os.remove(self._error_status_file) self.job_proxy = scheduler.submit(self.job) def clean(self): @@ -297,8 +298,7 @@ class PySPHTask(CommandTask): def __init__(self, command, output_dir, job_info=None): """Constructor - Parameters - ---------- + **Parameters** command: str or list: command to run $output_dir is substituted. output_dir: str : path of output directory. @@ -363,8 +363,7 @@ class Problem(object): def __init__(self, simulation_dir, output_dir): """Constructor. - Parameters - ---------- + **Parameters** simulation_dir : str : directory where simulation output goes. output_dir : str : directory where outputs from `run` go. @@ -482,8 +481,7 @@ def kwargs_to_command_line(kwargs): """Convert a dictionary of keyword arguments to a list of command-line options. If the value of the key is None, no value is passed. - Examples - -------- + **Examples** >>> sorted(kwargs_to_command_line(dict(some_arg=1, something_else=None))) ['--some-arg=1', '--something-else'] @@ -549,8 +547,7 @@ class Simulation(object): def __init__(self, root, base_command, job_info=None, **kw): """Constructor - Parameters - ---------- + **Parameters** root: str Path to simulation output directory. @@ -617,8 +614,7 @@ def compare_runs(sims, method, labels, exact=None): compare and an optional method name for an exact solution, this calls the methods with the appropriate parameters for each simulation. - Parameters - ---------- + **Parameters** sims: sequence Sequence of `Simulation` objects. @@ -645,11 +641,22 @@ def compare_runs(sims, method, labels, exact=None): method(s, label=s.get_labels(labels), **next(ls)) -def filter_cases(runs, **params): +def filter_cases(runs, predicate=None, **params): """Given a sequence of simulations and any additional parameters, filter out all the cases having exactly those parameters and return a list of them. + + One may also pass a callable to filter the cases using the `predicate` + keyword argument. If this is not a callable, it is treated as a parameter. + If `predicate` is passed though, the other keyword arguments are ignored. + """ + if predicate is not None: + if callable(predicate): + return list(filter(predicate, runs)) + else: + params['predicate'] = predicate + def _check_match(run): for param, expected in params.items(): if param not in run.params or run.params[param] != expected: @@ -757,8 +764,8 @@ class Automator(object): cluster_manager_factory=None): """Constructor. - Parameters - ---------- + **Parameters** + simulation_dir : str Root directory to generate simulation results in. output_dir: str @@ -788,7 +795,8 @@ class Automator(object): self._check_positional_arguments(args.problem) self.cluster_manager = self.cluster_manager_factory( - config_fname=args.config + config_fname=args.config, + exclude_paths=self._get_exclude_paths() ) if len(args.host) > 0: @@ -821,6 +829,16 @@ class Automator(object): print("Valid names are %s" % ', '.join(names)) self.parser.exit(1) + def _get_exclude_paths(self): + """Returns a list of exclude paths suitable for passing on to rsync to exclude + syncing some directories on remote machines. + """ + paths = [] + for path in [self.simulation_dir, self.output_dir]: + if not path.endswith('/'): + paths.append(path + '/') + return paths + def _select_problem_classes(self, problems): if problems == 'all': return self.all_problems diff --git a/automan/cluster_manager.py b/automan/cluster_manager.py index d15877e..ba21b31 100644 --- a/automan/cluster_manager.py +++ b/automan/cluster_manager.py @@ -4,6 +4,7 @@ worker to help with the automation of tasks. This requires ssh/scp and rsync to work on all machines. This is currently only tested on Linux machines. + """ import json @@ -35,22 +36,22 @@ class ClusterManager(object): The general directory structure of a remote worker machine is as follows:: - remote_home/ # Could be ~ - automan/ # Root of automation directory (configurable) - envs/ # python virtual environments for use. - pysph/ # the pysph sources. - project/ # Current directory for specific project. - other_repos/ # other source repos. + remote_home/ # Could be ~ + automan/ # Root of automation directory (configurable) + envs/ # python virtual environments for use. + my_project/ # Current directory for specific projects. + + The project directories are synced from this machine to the remote worker. - The respective directories are synced from this machine to the remote - worker. + The "my_project" is the root of the directory with the automation script + and this should contain the required sources that need to be executed. One + can use a list of source directories which will be copied over but it is + probably most convenient to put it all in the root of the project directory + to keep everything self-contained. - The idea is that this remote directory contains a full installation of - PySPH, the PySPH sources, the current project sources and any other - optional directories. The `ClusterManager` class manages these remote - workers by helping setup the directories, bootstrapping the Python - virtualenv and also keeping these up-to-date as the respective directories - are changed on the local machine. + The `ClusterManager` class manages these remote workers by helping setup + the directories, bootstrapping the Python virtualenv and also keeping these + up-to-date as project directory is changed on the local machine. The class therefore has two primary public methods, @@ -80,34 +81,61 @@ class ClusterManager(object): set -e if hash virtualenv 2>/dev/null; then - virtualenv --system-site-packages envs/pysph + virtualenv --system-site-packages envs/{project_name} else - python virtualenv.py --system-site-packages envs/pysph + python virtualenv.py --system-site-packages envs/{project_name} + fi + source envs/{project_name}/bin/activate + + pip install automan + + # Run any requirements.txt from the user + cd {project_name} + if [ -f "requirements.txt" ] ; then + pip install -r requirements.txt fi - source envs/pysph/bin/activate - cd pysph - pip install -r requirements.txt - pip install automan h5py matplotlib - python setup.py develop - cd .. """) UPDATE = dedent("""\ #!/bin/bash set -e - source envs/pysph/bin/activate - cd pysph - python setup.py develop + source envs/{project_name}/bin/activate + # Run any requirements.txt from the user + cd {project_name} + if [ -f "requirements.txt" ] ; then + pip install -r requirements.txt + fi """) ####################################################### def __init__(self, root='automan', sources=None, - config_fname='config.json'): + config_fname='config.json', exclude_paths=None): + """Create a cluster manager instance. + + **Parameters** + + root: str + The name of the root directory where all the files on the remote + will be created. + sources: list + A list of source directories to sync. + config_fname: str + The name of the config file to create. + exclude_paths: list + A list of paths to exclude while syncing. This is in a form suitable + to pass to rsync. + """ self.root = root self.workers = [] self.sources = sources self.scripts_dir = os.path.abspath('.' + self.root) + self.exclude_paths = exclude_paths if exclude_paths else [] + + # This is setup by the config and is the name of + # the project directory. + self.project_name = None + # The config file will always trump any direct settings # unless there is no config file. self.config_fname = config_fname @@ -120,9 +148,11 @@ class ClusterManager(object): def _bootstrap(self, host, home): venv_script = self._get_virtualenv() - cmd = "ssh {host} 'cd {home}; mkdir -p {root}/envs'".format( - home=home, host=host, root=self.root - ) + cmd = ("ssh {host} 'cd {home}; mkdir -p {root}/envs'; " + + "mkdir -p {root}/{project_name}/.{root}").format( + home=home, host=host, root=self.root, + project_name=self.project_name + ) self._run_command(cmd) root = os.path.join(home, self.root) @@ -133,9 +163,8 @@ class ClusterManager(object): self._update_sources(host, home) - cmd = "ssh {host} 'cd {root}; ./bootstrap.sh'".format( - host=host, root=root - ) + cmd = "ssh {host} 'cd {root}; ./{project_name}/.{root}/bootstrap.sh'" + cmd = cmd.format(host=host, root=root, project_name=self.project_name) try: self._run_command(cmd) except subprocess.CalledProcessError: @@ -144,7 +173,13 @@ class ClusterManager(object): Bootstrapping of remote host {host} failed. All files have been copied to the host. - Please take a look at {root}/bootstrap.sh and try to fix it. + Please take a look at + {root}/{project_name}/.{root}/bootstrap.sh + and try to fix it. + + You should run it from within the {root} directory as: + + ./{project_name}/.{root}/bootstrap.sh Once the bootstrap.sh script runs successfully, the worker can be used without any further steps. @@ -154,7 +189,8 @@ class ClusterManager(object): and can be edited by you. These will be used for any new hosts you add. ****************************************************************** - """.format(root=root, host=host, scripts_dir=self.scripts_dir) + """.format(root=root, host=host, scripts_dir=self.scripts_dir, + project_name=self.project_name) ) print(msg) else: @@ -175,30 +211,23 @@ class ClusterManager(object): with open(self.config_fname) as f: data = json.load(f) self.root = data['root'] + self.project_name = data['project_name'] self.sources = data['sources'] self.workers = data['workers'] else: if self.sources is None or len(self.sources) == 0: project_dir = os.path.abspath(os.getcwd()) - sources = [project_dir] - pysph_dir = os.path.expanduser( - prompt("Enter PySPH source directory (empty for none): ") - ) - if len(pysph_dir) > 0 and os.path.exists(pysph_dir): - sources.append(os.path.abspath(pysph_dir)) - else: - print("Invalid pysph directory, please edit " - "%s." % self.config_fname) - self.sources = sources + self.project_name = os.path.basename(project_dir) + self.sources = [project_dir] self.workers = [dict(host='localhost', home='', nfs=False)] self._write_config() self.scripts_dir = os.path.abspath('.' + self.root) def _rebuild(self, host, home): root = os.path.join(home, self.root) - command = "ssh {host} 'cd {root}; ./update.sh'".format( - host=host, root=root - ) + command = "ssh {host} 'cd {root}; ./{project_name}/.{root}/update.sh'" + command = command.format(host=host, root=root, + project_name=self.project_name) self._run_command(command) def _run_command(self, cmd, **kw): @@ -217,6 +246,9 @@ class ClusterManager(object): stdout=subprocess.PIPE ) kwargs['stdin'] = proc.stdout + if self.exclude_paths: + for path in self.exclude_paths: + options += ' --exclude="%s"' % path command = "rsync -a {options} {src} {host}:{dest} ".format( options=options, src=src, host=host, dest=dest @@ -229,7 +261,10 @@ class ClusterManager(object): self._sync_dir(host, local_dir, remote_dir) scripts_dir = self.scripts_dir - scripts = {'bootstrap.sh': self.BOOTSTRAP, 'update.sh': self.UPDATE} + bootstrap_code = self.BOOTSTRAP.format(project_name=self.project_name) + update_code = self.UPDATE.format(project_name=self.project_name) + scripts = {'bootstrap.sh': bootstrap_code, + 'update.sh': update_code} for script, code in scripts.items(): fname = os.path.join(scripts_dir, script) if not os.path.exists(fname): @@ -242,7 +277,8 @@ class ClusterManager(object): mode = os.stat(fname).st_mode os.chmod(fname, mode | stat.S_IXUSR | stat.S_IXGRP) - path = os.path.join(home, self.root) + path = os.path.join(home, self.root, self.project_name, + '.' + self.root) cmd = "scp {script_files} {host}:{path}".format( host=host, path=path, script_files=' '.join(script_files) ) @@ -251,7 +287,10 @@ class ClusterManager(object): def _write_config(self): print("Writing %s" % self.config_fname) data = dict( - root=self.root, sources=self.sources, workers=self.workers + project_name=self.project_name, + root=self.root, + sources=self.sources, + workers=self.workers ) with open(self.config_fname, 'w') as f: json.dump(data, f, indent=2) @@ -259,7 +298,26 @@ class ClusterManager(object): # ### Public Protocol ######################################## def add_worker(self, host, home, nfs): - self.workers.append(dict(host=host, home=home, nfs=nfs)) + if host == 'localhost': + self.workers.append(dict(host=host, home=home, nfs=nfs)) + else: + root = self.root + curdir = os.path.basename(os.getcwd()) + if nfs: + python = sys.executable + chdir = curdir + else: + python = os.path.join( + home, root, + 'envs/{project_name}/bin/python'.format( + project_name=self.project_name + ) + ) + chdir = os.path.join(home, root, curdir) + self.workers.append( + dict(host=host, home=home, nfs=nfs, python=python, chdir=chdir) + ) + self._write_config() if host != 'localhost' and not nfs: self._bootstrap(host, home) @@ -280,22 +338,14 @@ class ClusterManager(object): from .jobs import Scheduler scheduler = Scheduler(root='.') - - root = self.root for worker in self.workers: host = worker.get('host') - home = worker.get('home') nfs = worker.get('nfs', False) if host == 'localhost': scheduler.add_worker(dict(host='localhost')) else: - curdir = os.path.basename(os.getcwd()) - if nfs: - python = sys.executable - chdir = curdir - else: - python = os.path.join(home, root, 'envs/pysph/bin/python') - chdir = os.path.join(home, root, curdir) + python = worker.get('python') + chdir = worker.get('chdir') scheduler.add_worker( dict(host=host, python=python, chdir=chdir, nfs=nfs) ) diff --git a/automan/jobs.py b/automan/jobs.py index 2b1588f..cce0997 100644 --- a/automan/jobs.py +++ b/automan/jobs.py @@ -15,6 +15,13 @@ import time import psutil +def _make_command_list(command): + if not isinstance(command, (list, tuple)): + return shlex.split(command) + else: + return command + + class Job(object): def __init__(self, command, output_dir, n_core=1, n_thread=1, env=None): """Constructor @@ -23,15 +30,10 @@ class Job(object): that many free cores. `n_thread` is used to set the `OMP_NUM_THREADS`. """ - if not isinstance(command, (list, tuple)): - command = shlex.split(command) - args = [] - for x in command: - if os.path.basename(x) == 'python': - args.append(sys.executable) - else: - args.append(x) - self.command = args + self.command = _make_command_list(command) + self.orig_command = self.command + self.substitute_in_command('python', sys.executable) + self._given_env = env self.env = dict(os.environ) if env is not None: @@ -46,6 +48,23 @@ class Job(object): self._info_file = os.path.join(self.output_dir, 'job_info.json') self.proc = None + def substitute_in_command(self, basename, substitute): + """Replace occurrence of given basename with the substitute. + + This is useful where the user asks to run ['python', 'script.py']. + Here, we need to make sure the right python is used. Typically a remote + machine will need to use a particular Python and not just the vanilla + Python. + + """ + args = [] + for arg in self.command: + if os.path.basename(arg) == basename: + args.append(substitute) + else: + args.append(arg) + self.commands = args + def to_dict(self): state = dict() for key in ('command', 'output_dir', 'n_core', 'n_thread'): @@ -54,7 +73,7 @@ class Job(object): return state def pretty_command(self): - return ' '.join(self.command) + return ' '.join(self.orig_command) def get_stderr(self): return open(self.stderr).read() diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..1fd87fe --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXPROJ = automan +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000..b61f1d2 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,36 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build +set SPHINXPROJ=automan + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000..7391981 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# automan documentation build configuration file, created by +# sphinx-quickstart on Thu Aug 23 22:12:56 2018. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) +import os + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['sphinx.ext.autodoc', + 'sphinx.ext.mathjax', + 'sphinx.ext.viewcode'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'automan' +copyright = '2018, Prabhu Ramachandran' +author = 'Prabhu Ramachandran' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +_d = {} +fname = os.path.join(os.pardir, os.pardir, 'automan', '__init__.py') +exec(compile(open(fname).read(), fname, 'exec'), _d) + +# The full version, including alpha/beta/rc tags. +release = _d['__version__'] + +# The short X.Y version. +version = release[:3] + + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = [] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'automandoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'automan.tex', 'automan Documentation', + 'Prabhu Ramachandran', 'manual'), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'automan', 'automan Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'automan', 'automan Documentation', + author, 'automan', 'One line description of project.', + 'Miscellaneous'), +] diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..84db66c --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,31 @@ +.. automan documentation master file, created by + sphinx-quickstart on Thu Aug 23 22:12:56 2018. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to automan's documentation! +=================================== + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + overview.rst + tutorial.rst + +************************ +Reference documentation +************************ + + +.. toctree:: + :maxdepth: 2 + + reference/index.rst + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/source/overview.rst b/docs/source/overview.rst new file mode 100644 index 0000000..1c7efe6 --- /dev/null +++ b/docs/source/overview.rst @@ -0,0 +1,61 @@ +Overview +========== + +``automan`` is an open source, Python-based automation framework for numerical +computing. + +It is designed to automate the drudge work of managing many numerical +simulations. As an automation framework it does the following: + +- helps you organize your simulations. +- helps you orchestrate running simulations and then post-processing the + results from these. +- helps you reuse code for the post processing of your simulation data. +- execute all your simulations and post-processing with one command. +- optionally distribute your simulations among other computers on your + network. + +This greatly facilitates reproducibility. Automan is written in pure Python +and is easy to install. + +This document should help you use automan to improve your productivity. If you +are interested in a more detailed article about automan see here: +https://arxiv.org/abs/1712.04786 + + +Installation +------------- + +The easiest way to install ``automan`` is with pip_:: + + $ pip install automan + +If you wish to run the latest version that has not been relesed you may clone +the git repository:: + + $ git clone https://github.com/pypr/automan + + $ cd automan + +And then run:: + + $ python setup.py develop + +.. _pip: https://pip.pypa.io/en/stable/ + + +Once this is done, move on to the next section that provides a gentle tutorial +introduction to using automan. + + +Citing ``automan`` +------------------- + +If you find automan useful and wish to cite it you may use the following +article: + +- Prabhu Ramachandran, "automan: a Python-based Automation Framework for + Numerical Computing", *IEEE Computing in Science and Engineering*, + Accepted, 2018. + +You can find a draft of the article here: https://arxiv.org/abs/1712.04786 diff --git a/docs/source/reference/automan.rst b/docs/source/reference/automan.rst new file mode 100644 index 0000000..9ea7cdf --- /dev/null +++ b/docs/source/reference/automan.rst @@ -0,0 +1,21 @@ +Main automation module +======================= + +.. automodule:: automan.automation + :members: + :undoc-members: + + +Low-level job management module +================================= + +.. automodule:: automan.jobs + :members: + :undoc-members: + +Cluster management module +========================= + +.. automodule:: automan.cluster_manager + :members: + :undoc-members: diff --git a/docs/source/reference/index.rst b/docs/source/reference/index.rst new file mode 100644 index 0000000..b5be858 --- /dev/null +++ b/docs/source/reference/index.rst @@ -0,0 +1,9 @@ +Reference Documentation +========================= + +Autogenerated from doc strings using sphinx’s autodoc feature. + +.. toctree:: + :maxdepth: 3 + + automan.rst diff --git a/docs/source/tutorial.rst b/docs/source/tutorial.rst new file mode 100644 index 0000000..dce89aa --- /dev/null +++ b/docs/source/tutorial.rst @@ -0,0 +1,629 @@ +A tutorial on using ``automan`` +================================ + +Automan is best suited for numerical computations that take a lot of time to +run. It is most useful when you have to manage the execution of many of these +numerical computations. Very often one needs to process the output of these +simulations to either compare the results and then assemble a variety of plots +or tables for a report or manuscript. + +The numerical simulations can be in the form of any executable code, either in +the form of a binary or a script. In this tutorial we will assume that the +programs being executed are in the form of Python scripts. + +There are three general recommendations we make for your numerical programs to +be able to use automan effectively. + +1. They should be configurable using command line arguments. +2. They should generate their output files into a directory specified on the + command line. +3. Any post-processed data should be saved into an easy to load datafile to + facilitate comparisons with other simulations. + +None of these is strictly mandatory but we strongly recommend doing it this +way. + +A simple example +----------------- + +In this tutorial we take a rather trivial program to execute just to +illustrate the basic concepts. Our basic program is going to simply calculate +the square of a number passed to it on the command line. Here is how the code +looks: + +.. literalinclude:: ../../examples/tutorial/square.py + + +You can execute this script like so:: + + $ python square.py 2.0 + 2.0 4.0 + +Yay, it works! + +.. note:: + + If you want to run these examples, they are included along with the + example files in the automan source code. The files should be in + ``examples/tutorial`` (`Browse online + <https://github.com/pypr/automan/tree/master/examples/tutorial>`_). + +This example does not produce any output files and doesn't really take any +configuration arguments. So we don't really need to do much about this. + +Now let us say we want to automate the execution of this script with many +different arguments, let us say 2 different values for now (we can increase it later). + +We can now do this with automan. First create a simple script for this, we +could call it ``automate1.py`` (this is just a convention, you could call it +anything you want). The code is as shown below: + +.. literalinclude:: ../../examples/tutorial/automate1.py + :linenos: + + +Note the following: + +- The ``Squares`` class derives from :py:class:`automan.automation.Problem`. + This encapsulates all the simulations where we wish to find the square of + some number. +- The ``get_name`` method returns a subdirectory for all the outputs of this + problem. +- The ``get_commands`` returns a list of tuples of the following form, + ``(directory_name, command, job_info)``. In this case we don't pass any job + information and we'll get to this later. Notice that the two commands + specified are essentially what we'd have typed on the terminal. +- The ``run`` command does nothing much except create a directory. For now let + us leave this alone. + + +Let us execute this to see what it does:: + + $ python automate1.py + + Writing config.json + 4 tasks pending and 0 tasks running + + Running task <automan.automation.CommandTask object at 0x10628d978>... + Starting worker on localhost. + Job run by localhost + Running python square.py 2 + + Running task <automan.automation.CommandTask object at 0x10628d9b0>... + Job run by localhost + Running python square.py 1 + 2 tasks pending and 2 tasks running + Running task <automan.automation.SolveProblem object at 0x10628d940>... + + Running task <automan.automation.RunAll object at 0x10628d908>... + Finished! + +So the script executes and seems to have run the requested computations. Let +us see the output directories:: + + $ tree + . + ├── automate1.py + ├── config.json + ├── manuscript + │   └── figures + │   └── squares + ├── outputs + │   └── squares + │   ├── 1 + │   │   ├── job_info.json + │   │   ├── stderr.txt + │   │   └── stdout.txt + │   └── 2 + │   ├── job_info.json + │   ├── stderr.txt + │   └── stdout.txt + └── square.py + + 7 directories, 9 files + +Let us summarize what just happened: + +- The two commands we asked for were executed and the respective outputs of + each invocation were placed into ``outputs/squares/1`` and + ``outputs/squares/2``. Notice that there are ``stdout.txt, stderr.txt`` and + a ``job_info.json`` file here too. +- A manuscript directory called ``manuscript/figures/squares`` is created. +- There is also a new ``config.json`` that we can safely ignore for now. + +Let us see the contents of the files in the outputs directory:: + + $ cat outputs/squares/1/stdout.txt + 1.0 1.0 + + $ cat outputs/squares/1/job_info.json + {"start": "Fri Aug 24 01:11:46 2018", "end": "Fri Aug 24 01:11:46 2018", "status": "done", "exitcode": 0, "pid": 20381} + +As you can see, the standard output has the output of the command. The +``job_info.json`` has information about the actual execution of the code. This +is very useful in general. + +Thus automan has executed the code, organized the output directories and +collected the standard output and information about the execution of the +tasks. + +Now, let us run the automation again:: + + $ python automate1.py + 0 tasks pending and 0 tasks running + Finished! + +It does not re-run the code as it detects that everything is complete. + +Adding some post-processing +---------------------------- + +Let us say we want to create a plot that either compares the individual runs +or assembles the runs into a single plot or collects the data into a single +file. We can easily do this by adding more code into the ``run`` method of our +``Squares`` class. Let us also add a couple of more computations. + +.. literalinclude:: ../../examples/tutorial/automate2.py + :linenos: + +Let us examine this code a little carefully: + +- In ``get_commands``, we have simply added two more cases. + +- In ``run``, we have added some simple code to just iterate over the 4 + directories we should have and then read the standard output into a list and + finally we write that out into an ``output.txt`` file. + +- We also moved the ``automator`` object creation and execution so we can + import our ``automate2.py`` script if we wish to. + +The two new methods you see here are ``self.input_path(...)`` which makes it +easy to access any paths inside the simulation directories and the +``self.output_path(...)`` which does the same but inside the output path. Let +us see what these do, inside the directory containing the ``automate2.py`` if +you start up a Python interpreter (IPython_ would be much nicer), you can do +the following:: + + >>> import automate2 + >>> squares = automate2.Squares( + ... simulation_dir='outputs', + ... output_dir='manuscript/figures/' + ... ) + + >>> squares.input_path('1') + 'outputs/squares/1' + + >>> squares.input_path('1', 'stdout.txt') + 'outputs/squares/1/stdout.txt' + + >>> squares.output_path('output.txt') + 'manuscript/figures/squares/output.txt' + +.. _IPython: ://ipython.org/ + +As you can see, these are just conveniences for finding the input file paths +and the output file paths. Now let us run this new script:: + + $ python automate2.py + 0 tasks pending and 0 tasks running + Finished! + +Whoa! That doesn't seem right? What happens is that since the last time we ran +the automate script, it created the output, it assumes that there is nothing +more to do as the final result (the ``manuscript/figures/squares``) was +successfully created so it does not run anything new. If you do this:: + + $ python automate2.py -h + +You'll see an option ``-f`` which basically redoes the post-processing by +removing any old plots, so let us try that:: + + $ python automate2.py -f + 4 tasks pending and 0 tasks running + ... + Finished! + +Now it actually ran just the new simulations (you can see the commands in the +output it prints), it will not re-run the already executed cases. Now let us +see if the output is collected:: + + $ cat manuscript/figures/squares/output.txt + [['1.0', '1.0'], ['2.0', '4.0'], ['3.0', '9.0'], ['4.0', '16.0']] + +So what automan did was to execute the newly added cases and then executed our +post-processing code in the ``run`` method to produce the output. + +Building on this we have a slightly improved script, called ``automate3.py``, +which makes a plot: + +.. literalinclude:: ../../examples/tutorial/automate3.py + :linenos: + +This version simplifies the command generation by using a list-comprehension, +so reduces several lines of code. It then makes a matplotlib plot with the +collected data. Let us run this:: + + $ python automate3.py -f + 5 tasks pending and 0 tasks running + ... + Finished! + + $ ls manuscript/figures/squares/ + squares.pdf + + +As you can see, the old ``output.txt`` is gone and our plot is available. + +.. note:: + This example requires that you have matplotlib_ and NumPy_ installed. + + +.. _NumPy: https://www.numpy.org/ +.. _matplotlib: https://matplotlib.org/ + + +If you wanted to change the plotting style in any way, you can do so and +simply re-run ``python automate3.py -f`` and it will only regenerate the final +plot without re-executing the actual simulations. + +So what if you wish to re-run any of these cases? In this case you will need +to manually remove the particular simulation (or even all of them). Let us try +this:: + + $ rm -rf outputs/squares/3 + + $ python automate3.py -f + 3 tasks pending and 0 tasks running + ... + Finished! + +It will just run the missing case and re-generate the plots. + +While this may not seem like much, we've fully automated our simulation and +analysis. + + +Doing things a bit better +---------------------------- + +The previous section demonstrated the basic ideas so you can get started +quickly. Our example problem was very simple and only produced command line +output. Our next example is a simple problem in the same directory called +``powers.py``. This problem is also simple but supports a few command line +arguments and is as follows: + +.. literalinclude:: ../../examples/tutorial/powers.py + :linenos: + +Again, the example is very simple, bulk of the code is parsing command line +arguments. There are three arguments the code can take: + +- ``--power power`` specifies the power to be computed. +- ``--max`` specifies the largest integer in sequence whose power is to be + computed +- ``--output-dir`` is the directory where the output should be generated. + +When executed, the script will create a ``results.npz`` file which contains +the results stored as NumPy_ arrays. This example also requires that +matplotlib_ and NumPy_ be installed. Let us run the code:: + + $ python powers.py + + $ ls results.npz + results.npz + + $ python powers.py --power 3.5 + +On a Python interpreter we can quickly look at the results:: + + $ python + + >>> import numpy as np + >>> data = np.load('results.npz') + >>> data['x'] + >>> data['y'] + +This looks about right, so let us move on to see how we can automate running +several cases of this script in a better way than what we had before. We will +continue to automate the previous ``squares.py`` script. This shows you how +you can use automan incrementally as you add more cases. We only show the +lines that are changed and do not reproduce the ``Squares`` problem class in +the listing below. + + +.. literalinclude:: ../../examples/tutorial/automate4.py + :lines: 31- + :lineno-match: + +To see the complete file see `automate4.py +<https://github.com/pypr/automan/tree/master/examples/tutorial>`_. The key +points to note in the code are the following: + +- As before ``get_name()`` simply returns a convenient name where the outputs + will be stored. +- A new ``setup()`` method is used and this creates an instance attribute + called ``self.cases`` which is a list of cases we wish to simulate. Instead + of using strings in the ``get_commands`` we simply setup the ``cases`` and + no longer need to create a ``get_commands``. We discuss ``Simulation`` + instances in greater detail below. +- The ``run()`` method is similar except it uses the ``cases`` attribute and + some conveniences of the simulation objects for convenience. + + +The :py:class:`automan.automation.Simulation` instances we create are more +general purpose and are very handy. A simulation instance's first argument is +the the output directory and the second is a basic command to execute. It +takes a third optional argument called ``job_info`` which specifies the number +of cores and threads to use and we discuss this later. For now let us ignore +it. In addition any keyword arguments one passes to this are automatically +converted to command line arguments. Let us try to create one of these on an +interpreter to see what is going on:: + + >>> from automan.api import Simulation + >>> s = Simulation(root='some_output/dir/blah', + ... base_command='python powers.py', power=3.5) + >>> s.name + 'blah' + >>> s.command + 'python powers.py --power=3.5' + >>> s.params + {'power': 3.5} + +Notice that the name is simply the basename of the root path. You will see +that additional keyword argument ``power=3.5`` is converted to a suitable +command line argument. This is done by the +``Simulation.get_command_line_args`` method and can be overridden if you wish +to do something different. The ``Simulation.params`` attribute simply stores +all the keyword arguments so you could use it later while post-processing. + +Now we want that each execution of this command produces output into the +correct directory. We could either roll this into the ``base_command`` +argument by passing the correct output directory or there is a nicer way to do +this using the magic ``$output_dir`` argument that is automatically set the +output directory when the command is executed, for example:: + + >>> from automan.api import Simulation + >>> s = Simulation(root='some_output/dir/blah', + ... base_command='python powers.py --output-dir $output_dir', power=3.5) + >>> s.command + 'python powers.py --output-dir $output_dir --power=3.5' + +Note that the magic variable is not substituted at this point but later when +the program is about to be executed. + +Given these details, the code in the ``run`` method should be fairly +straightforward to understand. Note that this organization of our code has +made us maximize reuse of our plotting code. The ``case.params`` attribute is +convenient when post-prprocessing. One can also filter the cases using the +``filter_cases`` function that is provided by ``automan``. We discuss this +later. + +The last change to note is that we add the ``Powers`` class to the +``Automator``'s ``all_problems`` and we are done.Let us now run this:: + + $ python automate4.py + 6 tasks pending and 0 tasks running + ... + Finished! + +This only executes the new cases from the ``Powers`` class and makes the plot +in ``manuscript/figures/powers/powers.pdf``. + +Using :py:class:`automan.automation.Simulation` instances allows us to +parametrize simulations with the keyword arguments. In addition, it is handy +while post-processing. We can also subclass the ``Simulation`` instance to +customize various things or share code. + +There are a few more conveniences that automan provides that are useful while +post-processing and these are discussed below. + + +Filtering and comparing cases +------------------------------ + +.. py:currentmodule:: automan.automation + +``automan`` provides a couple of handy functions to help filter the different +cases based on the parameters or the name of the cases. One can also make +plots for a collection of cases and compare them easily. + +- The :py:func:`filter_cases` function takes a sequence of cases + and any additional keyword arguments with parameter values and filters out + the cases having those parameter values. For example from our previous + example in the ``Powers`` class, if we do the following:: + + filtered_cases = filter_cases(cases, power=2) + + will return a list with a single case which uses ``power=2``. This is very + handy. This function can also be passed a callable which returns ``True`` + for any acceptable case. For example:: + + filter_cases(cases, lambda x: x.params['power'] % 2) + + will return all the cases with odd powers. + +- The :py:func:`filter_by_name` function filters the cases whose + names match the list of names passed. For example:: + + filter_by_name(cases, ['1', '4']) + + will return the two simulations whose names are equal to ``'1'`` or ``'4'``. + +- The :py:func:`compare_runs` function calls a method or callable + with the given cases. This is very handy to make comparison plots. + +With this information you should be in a position to automate your +computational simulations and analysis. + +Next we look at setting up additional remote computers on which we can execute +our computations. + + +Using additional computational resources +---------------------------------------- + +Wouldn't it be nice if we could easily run part of the simulations on one or +more remote computers? ``automan`` makes this possible. Let us see how with +our last example. + +Let us first remove all the generated outputs and files so we can try this:: + + $ rm -rf outputs/ manuscript/figures config.json + +Running the simulations on a remote machine requires a few things: + +- the computer should be running either Mac OS or Linux/Unix. +- you should have an account on the computer, and be able to ``ssh`` into it + without a password (see `article on password-less ssh + <http://askubuntu.com/questions/46930/how-can-i-set-up-password-less-ssh-login>`_. +- the computer should have a working basic Python interpreter. + +For more complex dependencies, you need to make sure the remote machine has +the necessary software. + + +Assuming you have these requirements on a computer accessible on your network +you can do the following:: + + $ python automate4.py -a host_name + [...] + +Where ``host_name`` is either the computer's name or IP address. This will +print a lot of output and attempt to setup a virtual environment on the remote +machine. If it fails, it will print out some instructions for you to fix. + +If this succeeds, you can now simply use the automation script just as before +and it will now run some of the code on the remote machine depending on its +availability. For example:: + + $ python automate4.py + 14 tasks pending and 0 tasks running + + Running task <automan.automation.CommandTask object at 0x1141da748>... + Starting worker on localhost. + Job run by localhost + Running python powers.py --output-dir outputs/powers/4 --power=4.0 + + Running task <automan.automation.CommandTask object at 0x1141da6d8>... + Starting worker on 10.1.10.242. + Job run by 10.1.10.242 + Running python powers.py --output-dir outputs/powers/3 --power=3.0 + ... + + +Note that you can add new machines at any point. For example you may have +finished running a few simulations already and are simulating a new problem +that you wish to distribute, you can add a new machine and fire the automation +script and it will use it for the new simulations. + +When you add a new remote host ``automan`` does the following: + +- Creates an ``automan`` directory in the remote machine home directory (you + can set a different home using ``python automate4.py -a host --home + other_home``.) +- Inside this directory it copies the current project directory, ``tutorial`` + in the present case. +- It then copies over a ``bootstrap.sh`` and ``update.sh`` and runs the + ``bootstrap.sh`` script. These scripts are inside a ``.automan/`` directory + on your localhost and you may edit these if you need to. + +The bootstrap code does the following: + +- It creates a virtualenv_ called ``tutorial`` on this computer using the + system Python and puts this in ``automan/envs/tutorial``. +- It then activates this environment, installs ``automan`` and also runs any + ``requirements.txt`` if they exist in the tutorial directory. + +If for some reason this script fails, you may edit it on the remote host and +re-run it. + +When executing the code, automan copies over the files from the remote host to +your computer once the simulation is completed and also deletes the output +files on the remote machine. + +If your remote computer shares your file-system via nfs or so, you can specify +this when you add the host as follows:: + + $ python automate4.py -a host_sharing_nfs_files --nfs + +In this case, files will not be copied back and forth from the remote host. + + +.. _virtualenv: https://virtualenv.pypa.io/ + +Now lets say you update files inside your project you can update the remote +hosts using:: + + $ python automate4.py -u + +This will update all remote workers and also run the ``update.sh`` script on +all of them. It will also copy your local modifications to the scripts in +``.automan``. It will then run any simulations. + +Lets say you do not want to use a particular host, you can remove the entry +for this in the ``config.json`` file. + +When ``automan`` distributes tasks to machines, local and remote, it needs +some information about the task and the remote machines. Recall that when we +created the ``Simulation`` instances we could pass in a ``job_info`` keyword +argument. The ``job_info`` is an optional dictionary with the following +optional keys: + +- ``'n_core'``: the number of cores that this simulation requires. This is + used for scheduling tasks. For example if you set ``n_core=4`` and have a + computer with only 2 cores, automan will not be able to run this job on this + machine at all. On the other hand if the task does indeed consume more than + one core and you set the value to one, then the scheduler will run the job + on a computer with only one core available. +- ``'n_thread'``: the number of threads to use. This is used to set the + environment variable ``OMP_NUM_THREADS`` for OpenMP executions. + + +As an example, here is how one would use this:: + + Simulation(root=self.input_path('3.5'), + base_command='python powers.py', + job_info=dict(n_core=1, n_thread=1), + power=3.5 + ) + +This job requires only a single core. So when automan tries to execute the job +on a computer it looks at the load on the computer and if one core is free, it +will execute the job. + +If for some reason you are not happy with how the remote computer is managed +and wish to customize it, you can feel free to subclass the +:py:class:`automan.cluster_manager.ClusterManager` class. You may pass this in +to the :py:class:`automan.automation.Automator` class as the +``cluster_manager_factory`` and it will use it. + + + +Using docker +------------ + +It should be possible to use automan from within a Docker_ container. This can +be done either by specifying commands to be run within suitable ``docker run`` +invocations. Alternatively, one can install automan and run scripts within the +docker container and this will work correctly. + +Using docker for remote computers is not fully supported at this time. + +.. _Docker: https://www.docker.com/ + +Learning more +------------- + +If you wish to learn more about automan you may find the following useful: + +- Read the draft of the paper on ``automan`` here: https://arxiv.org/abs/1712.04786 + +- The paper mentions another manuscript which was fully automated using + automan, the sources for this are at https://gitlab.com/prabhu/edac_sph/ and + this demonstrates a complete real-world example of using automan to automate + an entire research paper. + +- Olivier Mesnard has created a nice example as part of the review of this + paper that can be seen here: https://github.com/mesnardo/automan-example the + example also nicely shows how automan can be used from within a docker + container for a completely reproducible workflow. diff --git a/examples/tutorial/automate1.py b/examples/tutorial/automate1.py new file mode 100644 index 0000000..b55c8c5 --- /dev/null +++ b/examples/tutorial/automate1.py @@ -0,0 +1,23 @@ +from automan.api import Problem, Automator + + +class Squares(Problem): + def get_name(self): + return 'squares' + + def get_commands(self): + return [ + ('1', 'python square.py 1', None), + ('2', 'python square.py 2', None), + ] + + def run(self): + self.make_output_dir() + + +automator = Automator( + simulation_dir='outputs', + output_dir='manuscript/figures', + all_problems=[Squares] +) +automator.run() diff --git a/examples/tutorial/automate2.py b/examples/tutorial/automate2.py new file mode 100644 index 0000000..bc0d39d --- /dev/null +++ b/examples/tutorial/automate2.py @@ -0,0 +1,36 @@ +from automan.api import Problem, Automator + + +class Squares(Problem): + def get_name(self): + return 'squares' + + def get_commands(self): + return [ + ('1', 'python square.py 1', None), + ('2', 'python square.py 2', None), + ('3', 'python square.py 3', None), + ('4', 'python square.py 4', None), + ] + + def run(self): + self.make_output_dir() + data = [] + for i in ('1', '2', '3', '4'): + stdout = self.input_path(i, 'stdout.txt') + with open(stdout) as f: + data.append(f.read().split()) + + output = self.output_path('output.txt') + with open(output, 'w') as o: + o.write(str(data)) + + +if __name__ == '__main__': + automator = Automator( + simulation_dir='outputs', + output_dir='manuscript/figures', + all_problems=[Squares] + ) + + automator.run() diff --git a/examples/tutorial/automate3.py b/examples/tutorial/automate3.py new file mode 100644 index 0000000..4907c6b --- /dev/null +++ b/examples/tutorial/automate3.py @@ -0,0 +1,36 @@ +from automan.api import Problem, Automator +from matplotlib import pyplot as plt +import numpy as np + + +class Squares(Problem): + def get_name(self): + return 'squares' + + def get_commands(self): + commands = [(str(i), 'python square.py %d' % i, None) + for i in range(1, 8)] + return commands + + def run(self): + self.make_output_dir() + data = [] + for i in range(1, 8): + stdout = self.input_path(str(i), 'stdout.txt') + with open(stdout) as f: + values = [float(x) for x in f.read().split()] + data.append(values) + + data = np.asarray(data) + plt.plot(data[:, 0], data[:, 1], 'o-') + plt.xlabel('x') + plt.ylabel('y') + plt.savefig(self.output_path('squares.pdf')) + + +automator = Automator( + simulation_dir='outputs', + output_dir='manuscript/figures', + all_problems=[Squares] +) +automator.run() diff --git a/examples/tutorial/automate4.py b/examples/tutorial/automate4.py new file mode 100644 index 0000000..43ba751 --- /dev/null +++ b/examples/tutorial/automate4.py @@ -0,0 +1,70 @@ +from automan.api import Problem, Automator +from matplotlib import pyplot as plt +import numpy as np + + +class Squares(Problem): + def get_name(self): + return 'squares' + + def get_commands(self): + commands = [(str(i), 'python square.py %d' % i, None) + for i in range(1, 8)] + return commands + + def run(self): + self.make_output_dir() + data = [] + for i in range(1, 8): + stdout = self.input_path(str(i), 'stdout.txt') + with open(stdout) as f: + values = [float(x) for x in f.read().split()] + data.append(values) + + data = np.asarray(data) + plt.plot(data[:, 0], data[:, 1], 'o-') + plt.xlabel('x') + plt.ylabel('y') + plt.savefig(self.output_path('squares.pdf')) + + +from automan.api import Simulation + + +class Powers(Problem): + def get_name(self): + return 'powers' + + def setup(self): + base_cmd = 'python powers.py --output-dir $output_dir' + self.cases = [ + Simulation( + root=self.input_path(str(i)), + base_command=base_cmd, + power=float(i) + ) + for i in range(1, 5) + ] + + def run(self): + self.make_output_dir() + for case in self.cases: + data = np.load(case.input_path('results.npz')) + plt.plot( + data['x'], data['y'], + label=r'$x^{{%.2f}}$' % case.params['power'] + ) + plt.grid() + plt.xlabel('x') + plt.ylabel('y') + plt.legend() + plt.savefig(self.output_path('powers.pdf')) + + +if __name__ == '__main__': + automator = Automator( + simulation_dir='outputs', + output_dir='manuscript/figures', + all_problems=[Squares, Powers] + ) + automator.run() diff --git a/examples/tutorial/powers.py b/examples/tutorial/powers.py new file mode 100644 index 0000000..033e034 --- /dev/null +++ b/examples/tutorial/powers.py @@ -0,0 +1,41 @@ +import argparse +import os + +import numpy as np + + +def compute_powers(r_max, power): + """Compute the powers of the integers upto r_max and return the result. + """ + result = [] + for i in range(0, r_max + 1): + result.append((i, i**power)) + x = np.arange(0, r_max + 1) + y = np.power(x, power) + return x, y + + +def main(): + p = argparse.ArgumentParser() + p.add_argument( + '--power', type=float, default=2.0, + help='Power to calculate' + ) + p.add_argument( + '--max', type=int, default=10, + help='Maximum integer that we must raise to the given power' + ) + p.add_argument( + '--output-dir', type=str, default='.', + help='Output directory to generate file.' + ) + opts = p.parse_args() + + x, y = compute_powers(opts.max, opts.power) + + fname = os.path.join(opts.output_dir, 'results.npz') + np.savez(fname, x=x, y=y) + + +if __name__ == '__main__': + main() diff --git a/examples/tutorial/square.py b/examples/tutorial/square.py new file mode 100644 index 0000000..5c7592e --- /dev/null +++ b/examples/tutorial/square.py @@ -0,0 +1,5 @@ +from __future__ import print_function +import sys +x = float(sys.argv[1]) +print(x, x*x) +
Add sphinx documentation This would be a lot more accessible than reading the formal paper. This was also suggested here: https://www.authorea.com/users/99991/articles/307464-review-automan-a-python-based-automation-framework-for-numerical-computing
pypr/automan
diff --git a/automan/tests/test_automation.py b/automan/tests/test_automation.py index 8fef103..fefd34a 100644 --- a/automan/tests/test_automation.py +++ b/automan/tests/test_automation.py @@ -13,7 +13,7 @@ except ImportError: from automan.automation import ( Automator, CommandTask, PySPHProblem, Simulation, SolveProblem, - TaskRunner, compare_runs + TaskRunner, compare_runs, filter_cases ) try: from automan.jobs import Scheduler, RemoteWorker @@ -356,6 +356,69 @@ def test_compare_runs_works_when_given_callables(): s0.get_labels.assert_called_once_with(['x']) +def test_filter_cases_works_with_params(): + # Given + sims = [Simulation(root='', base_command='python', param1=i, param2=i+1) + for i in range(5)] + # When + result = filter_cases(sims, param1=2) + + # Then + assert len(result) == 1 + assert result[0].params['param1'] == 2 + + # When + result = filter_cases(sims, param1=2, param2=2) + + # Then + assert len(result) == 0 + + # When + result = filter_cases(sims, param1=3, param2=4) + + # Then + assert len(result) == 1 + assert result[0].params['param1'] == 3 + assert result[0].params['param2'] == 4 + + +def test_filter_cases_works_with_predicate(): + # Given + sims = [Simulation(root='', base_command='python', param1=i, param2=i+1) + for i in range(5)] + + # When + result = filter_cases( + sims, predicate=lambda x: x.params.get('param1', 0) % 2 + ) + + # Then + assert len(result) == 2 + assert result[0].params['param1'] == 1 + assert result[1].params['param1'] == 3 + + # When + result = filter_cases( + sims, predicate=2 + ) + + # Then + assert len(result) == 0 + + # Given + sims = [Simulation(root='', base_command='python', predicate=i) + for i in range(5)] + + # When + result = filter_cases( + sims, predicate=2 + ) + + # Then + assert len(result) == 1 + assert result[0].params['predicate'] == 2 + + class TestAutomator(TestAutomationBase): def setUp(self): super(TestAutomator, self).setUp()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 4 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work -e git+https://github.com/pypr/automan.git@3043553f4e39b4def791e353b015d815bc35357c#egg=automan certifi==2021.5.30 execnet==1.9.0 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work psutil==7.0.0 py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: automan channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - execnet==1.9.0 - psutil==7.0.0 prefix: /opt/conda/envs/automan
[ "automan/tests/test_automation.py::test_filter_cases_works_with_predicate" ]
[]
[ "automan/tests/test_automation.py::TestLocalAutomation::test_automation", "automan/tests/test_automation.py::TestLocalAutomation::test_nothing_is_run_when_output_exists", "automan/tests/test_automation.py::TestRemoteAutomation::test_automation", "automan/tests/test_automation.py::TestRemoteAutomation::test_job_with_error_is_handled_correctly", "automan/tests/test_automation.py::TestRemoteAutomation::test_nothing_is_run_when_output_exists", "automan/tests/test_automation.py::TestCommandTask::test_command_tasks_converts_dollar_output_dir", "automan/tests/test_automation.py::TestCommandTask::test_command_tasks_executes_simple_command", "automan/tests/test_automation.py::TestCommandTask::test_command_tasks_handles_errors_correctly", "automan/tests/test_automation.py::test_simulation_get_labels", "automan/tests/test_automation.py::test_compare_runs_calls_methods_when_given_names", "automan/tests/test_automation.py::test_compare_runs_works_when_given_callables", "automan/tests/test_automation.py::test_filter_cases_works_with_params", "automan/tests/test_automation.py::TestAutomator::test_automator" ]
[]
BSD-3-Clause
2,988
[ "examples/tutorial/automate3.py", "examples/tutorial/square.py", "MANIFEST.in", "examples/tutorial/automate1.py", ".gitignore", "examples/tutorial/automate2.py", "docs/source/overview.rst", "docs/source/reference/automan.rst", "automan/cluster_manager.py", "docs/source/tutorial.rst", "automan/automation.py", "docs/source/index.rst", "examples/tutorial/powers.py", "docs/source/reference/index.rst", "docs/make.bat", "examples/tutorial/automate4.py", "docs/Makefile", "automan/jobs.py", "docs/source/conf.py" ]
[ "examples/tutorial/automate3.py", "examples/tutorial/square.py", "MANIFEST.in", "examples/tutorial/automate1.py", ".gitignore", "examples/tutorial/automate2.py", "docs/source/overview.rst", "docs/source/reference/automan.rst", "automan/cluster_manager.py", "docs/source/tutorial.rst", "automan/automation.py", "docs/source/index.rst", "examples/tutorial/powers.py", "docs/source/reference/index.rst", "docs/make.bat", "examples/tutorial/automate4.py", "docs/Makefile", "automan/jobs.py", "docs/source/conf.py" ]
joblib__joblib-765
f64e1b8cf286c02d2782c6dd5bf7a42e0518c51d
2018-08-29 14:56:56
cbb660126d2ad8ac9f9ae9ffc16dd551ca937ebd
codecov[bot]: # [Codecov](https://codecov.io/gh/joblib/joblib/pull/765?src=pr&el=h1) Report > Merging [#765](https://codecov.io/gh/joblib/joblib/pull/765?src=pr&el=desc) into [master](https://codecov.io/gh/joblib/joblib/commit/f64e1b8cf286c02d2782c6dd5bf7a42e0518c51d?src=pr&el=desc) will **decrease** coverage by `0.1%`. > The diff coverage is `100%`. [![Impacted file tree graph](https://codecov.io/gh/joblib/joblib/pull/765/graphs/tree.svg?width=650&token=gA6LF5DGTW&height=150&src=pr)](https://codecov.io/gh/joblib/joblib/pull/765?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #765 +/- ## ========================================== - Coverage 95.42% 95.31% -0.11% ========================================== Files 44 44 Lines 6224 6235 +11 ========================================== + Hits 5939 5943 +4 - Misses 285 292 +7 ``` | [Impacted Files](https://codecov.io/gh/joblib/joblib/pull/765?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [joblib/\_store\_backends.py](https://codecov.io/gh/joblib/joblib/pull/765/diff?src=pr&el=tree#diff-am9ibGliL19zdG9yZV9iYWNrZW5kcy5weQ==) | `90.57% <100%> (+0.62%)` | :arrow_up: | | [joblib/test/test\_memory.py](https://codecov.io/gh/joblib/joblib/pull/765/diff?src=pr&el=tree#diff-am9ibGliL3Rlc3QvdGVzdF9tZW1vcnkucHk=) | `98.11% <100%> (+0.02%)` | :arrow_up: | | [joblib/disk.py](https://codecov.io/gh/joblib/joblib/pull/765/diff?src=pr&el=tree#diff-am9ibGliL2Rpc2sucHk=) | `81.66% <0%> (-6.67%)` | :arrow_down: | | [joblib/test/test\_dask.py](https://codecov.io/gh/joblib/joblib/pull/765/diff?src=pr&el=tree#diff-am9ibGliL3Rlc3QvdGVzdF9kYXNrLnB5) | `96.22% <0%> (-1.89%)` | :arrow_down: | | [joblib/\_parallel\_backends.py](https://codecov.io/gh/joblib/joblib/pull/765/diff?src=pr&el=tree#diff-am9ibGliL19wYXJhbGxlbF9iYWNrZW5kcy5weQ==) | `95.6% <0%> (-0.41%)` | :arrow_down: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/joblib/joblib/pull/765?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/joblib/joblib/pull/765?src=pr&el=footer). Last update [f64e1b8...bf5f24e](https://codecov.io/gh/joblib/joblib/pull/765?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). lesteve: Can you check that self.store_backend is not used in other classes `__repr__`. I seem to remember it is. aabadie: > Can you check that self.store_backend is not used in other classes __repr__. I seem to remember it is. I just did and you are right it was used in `MemorizedResult` and `MemorizedFunc` classes. It's fixed now. aabadie: @ogrisel, is it ok to merge ?
diff --git a/README.rst b/README.rst index 472a5fd..1ed63ef 100644 --- a/README.rst +++ b/README.rst @@ -58,6 +58,8 @@ Dependencies - Joblib has an optional dependency on `python-lz4 <https://pypi.python.org/pypi/lz4>`_ as a faster alternative to zlib and gzip for compressed serialization. +- Joblib has an optional dependency on psutil to mitigate memory leaks in + parallel worker processes. - Some examples require external dependencies such as pandas. See the instructions in the `Building the docs`_ section for details. diff --git a/joblib/_store_backends.py b/joblib/_store_backends.py index d48edcd..9196f0a 100644 --- a/joblib/_store_backends.py +++ b/joblib/_store_backends.py @@ -35,6 +35,8 @@ class StoreBackendBase(with_metaclass(ABCMeta)): """Helper Abstract Base Class which defines all methods that a StorageBackend must implement.""" + location = None + @abstractmethod def _open_item(self, f, mode): """Opens an item on the store and return a file-like object. @@ -327,7 +329,8 @@ class StoreBackendMixin(object): def __repr__(self): """Printable representation of the store location.""" - return self.location + return '{class_name}(location="{location}")'.format( + class_name=self.__class__.__name__, location=self.location) class FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin): diff --git a/joblib/memory.py b/joblib/memory.py index 491238c..5ae6940 100644 --- a/joblib/memory.py +++ b/joblib/memory.py @@ -258,7 +258,7 @@ class MemorizedResult(Logger): return ('{class_name}(location="{location}", func="{func}", ' 'args_id="{args_id}")' .format(class_name=self.__class__.__name__, - location=self.store_backend, + location=self.store_backend.location, func=self.func, args_id=self.args_id )) @@ -769,9 +769,10 @@ class MemorizedFunc(Logger): # ------------------------------------------------------------------------ def __repr__(self): - return ("{0}(func={1}, location={2})".format(self.__class__.__name__, - self.func, - self.store_backend,)) + return '{class_name}(func={func}, location={location})'.format( + class_name=self.__class__.__name__, + func=self.func, + location=self.store_backend.location,) ############################################################################### @@ -963,9 +964,10 @@ class Memory(Logger): # ------------------------------------------------------------------------ def __repr__(self): - return '{0}(location={1})'.format( - self.__class__.__name__, (repr(None) if self.store_backend is None - else repr(self.store_backend))) + return '{class_name}(location={location})'.format( + class_name=self.__class__.__name__, + location=(None if self.store_backend is None + else self.store_backend.location)) def __getstate__(self): """ We don't store the timestamp when pickling, to avoid the hash
FileSystemStoreBackend.__repr__ should not return a string Something that tripped me up when working on https://github.com/joblib/joblib/pull/746, is that `FileSystemStoreBackend.__repr__` makes it look like a string. I could not find a good reason for this. Do you remember something about this @aabadie? Here is a snippet to show what I mean: ``` In [1]: from joblib import Memory In [2]: mem = Memory('/tmp/test') In [3]: mem.store_backend Out[3]: /tmp/test/joblib ``` When I was debugging I lost at least one hour until I thought of doing `type(mem.store_backend)` ... Also slightly less important but: ```py from joblib._store_backends import FileSystemStoreBackend backend = FileSystemStoreBackend() backend # Exception in __repr__ AttributeError: 'FileSystemStoreBackend' object has no attribute 'location' ```
joblib/joblib
diff --git a/joblib/test/test_memory.py b/joblib/test/test_memory.py index b3ac081..2f0c1b7 100644 --- a/joblib/test/test_memory.py +++ b/joblib/test/test_memory.py @@ -24,7 +24,7 @@ from joblib.memory import register_store_backend, _STORE_BACKENDS from joblib.memory import _build_func_identifier, _store_backend_factory from joblib.memory import JobLibCollisionWarning from joblib.parallel import Parallel, delayed -from joblib._store_backends import StoreBackendBase +from joblib._store_backends import StoreBackendBase, FileSystemStoreBackend from joblib.test.common import with_numpy, np from joblib.test.common import with_multiprocessing from joblib.testing import parametrize, raises, warns @@ -989,6 +989,55 @@ def test_dummy_store_backend(): assert isinstance(backend_obj, DummyStoreBackend) +def test_filesystem_store_backend_repr(tmpdir): + # Verify string representation of a filesystem store backend. + + repr_pattern = 'FileSystemStoreBackend(location="{location}")' + backend = FileSystemStoreBackend() + assert backend.location is None + + repr(backend) # Should not raise an exception + + assert str(backend) == repr_pattern.format(location=None) + + # backend location is passed explicitely via the configure method (called + # by the internal _store_backend_factory function) + backend.configure(tmpdir.strpath) + + assert str(backend) == repr_pattern.format(location=tmpdir.strpath) + + repr(backend) # Should not raise an exception + + +def test_memory_objects_repr(tmpdir): + # Verify printable reprs of MemorizedResult, MemorizedFunc and Memory. + + def my_func(a, b): + return a + b + + memory = Memory(location=tmpdir.strpath, verbose=0) + memorized_func = memory.cache(my_func) + + memorized_func_repr = 'MemorizedFunc(func={func}, location={location})' + + assert str(memorized_func) == memorized_func_repr.format( + func=my_func, + location=memory.store_backend.location) + + memorized_result = memorized_func.call_and_shelve(42, 42) + + memorized_result_repr = ('MemorizedResult(location="{location}", ' + 'func="{func}", args_id="{args_id}")') + + assert str(memorized_result) == memorized_result_repr.format( + location=memory.store_backend.location, + func=memorized_result.func_id, + args_id=memorized_result.args_id) + + assert str(memory) == 'Memory(location={location})'.format( + location=memory.store_backend.location) + + def test_memorized_result_pickle(tmpdir): # Verify a MemoryResult object can be pickled/depickled. Non regression # test introduced following issue
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 3 }
0.12
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/joblib/joblib.git@f64e1b8cf286c02d2782c6dd5bf7a42e0518c51d#egg=joblib more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: joblib channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 prefix: /opt/conda/envs/joblib
[ "joblib/test/test_memory.py::test_filesystem_store_backend_repr" ]
[]
[ "joblib/test/test_memory.py::test_memory_integration", "joblib/test/test_memory.py::test_no_memory", "joblib/test/test_memory.py::test_memory_kwarg", "joblib/test/test_memory.py::test_memory_lambda", "joblib/test/test_memory.py::test_memory_name_collision", "joblib/test/test_memory.py::test_memory_warning_lambda_collisions", "joblib/test/test_memory.py::test_memory_warning_collision_detection", "joblib/test/test_memory.py::test_memory_partial", "joblib/test/test_memory.py::test_memory_eval", "joblib/test/test_memory.py::test_argument_change", "joblib/test/test_memory.py::test_memory_exception", "joblib/test/test_memory.py::test_memory_ignore", "joblib/test/test_memory.py::test_memory_args_as_kwargs", "joblib/test/test_memory.py::test_partial_decoration[ignore0-100-r]", "joblib/test/test_memory.py::test_partial_decoration[ignore1-10-None]", "joblib/test/test_memory.py::test_func_dir", "joblib/test/test_memory.py::test_persistence", "joblib/test/test_memory.py::test_call_and_shelve", "joblib/test/test_memory.py::test_call_and_shelve_argument_hash", "joblib/test/test_memory.py::test_memorized_pickling", "joblib/test/test_memory.py::test_memorized_repr", "joblib/test/test_memory.py::test_memory_file_modification", "joblib/test/test_memory.py::test_memory_in_memory_function_code_change", "joblib/test/test_memory.py::test_clear_memory_with_none_location", "joblib/test/test_memory.py::test_memory_func_with_kwonly_args", "joblib/test/test_memory.py::test_memory_func_with_signature", "joblib/test/test_memory.py::test__get_items", "joblib/test/test_memory.py::test__get_items_to_delete", "joblib/test/test_memory.py::test_memory_reduce_size", "joblib/test/test_memory.py::test_memory_clear", "joblib/test/test_memory.py::test_cached_function_race_condition_when_persisting_output", "joblib/test/test_memory.py::test_cached_function_race_condition_when_persisting_output_2", "joblib/test/test_memory.py::test_memory_recomputes_after_an_error_why_loading_results", "joblib/test/test_memory.py::test_deprecated_cachedir_behaviour", "joblib/test/test_memory.py::test_register_invalid_store_backends_key[None]", "joblib/test/test_memory.py::test_register_invalid_store_backends_key[invalid_prefix1]", "joblib/test/test_memory.py::test_register_invalid_store_backends_key[invalid_prefix2]", "joblib/test/test_memory.py::test_register_invalid_store_backends_object", "joblib/test/test_memory.py::test_memory_default_store_backend", "joblib/test/test_memory.py::test_instanciate_incomplete_store_backend", "joblib/test/test_memory.py::test_dummy_store_backend", "joblib/test/test_memory.py::test_memory_objects_repr", "joblib/test/test_memory.py::test_memorized_result_pickle", "joblib/test/test_memory.py::test_memory_pickle_dump_load[memory_kwargs0]", "joblib/test/test_memory.py::test_memory_pickle_dump_load[memory_kwargs1]" ]
[]
BSD 3-Clause "New" or "Revised" License
2,989
[ "README.rst", "joblib/memory.py", "joblib/_store_backends.py" ]
[ "README.rst", "joblib/memory.py", "joblib/_store_backends.py" ]
peterbe__hashin-74
a80c5efbb1630f75a9cfd4492627e9aff051c3e1
2018-08-29 15:07:40
a80c5efbb1630f75a9cfd4492627e9aff051c3e1
peterbe: @mythmon r? mythmon: Perhaps the log output above should be in a code block? I think Github is messing with your formatting by erasing the newlines, or something. peterbe: Ah, I didn't have the backticks inside there. And [apprently you need a leading newline before the ticks](https://github.com/dear-github/dear-github/issues/166#issuecomment-322367328). peterbe: You're right about the English prefixes before printing those things. It could be better but I'm also wary of explaining the verbose output too much. The most important thing is there and more English verbiage isn't likely to be much use. By the way, the original intent of the `--verbose` was for the "paranoid" to manually check the downloaded files and see that each one doesn't have malicious stuff in it. That feature kinda went away (slowly and quietly) when using pypi.org and extracting the `sha256` straight from the JSON. Now, if you're paranoid, you'd have to manually `wget` each of those files and basically check that what pypi is hosting is files that haven't been tampered with.
diff --git a/README.rst b/README.rst index 91b83ea..4be9060 100644 --- a/README.rst +++ b/README.rst @@ -221,6 +221,11 @@ put it directly into ``pip``. Version History =============== +0.13.x + + * Don't show URLs when using ``--verbose`` if files don't need to be + downloaded. See https://github.com/peterbe/hashin/issues/73 + 0.13.3 * Makes it possible to install ``nltk`` on Windows. `Thanks @chrispbailey! <https://github.com/peterbe/hashin/pull/72>`_ diff --git a/hashin.py b/hashin.py index c2a4a0d..12918f7 100755 --- a/hashin.py +++ b/hashin.py @@ -364,15 +364,17 @@ def get_package_data(package, verbose=False): def get_releases_hashes(releases, algorithm, verbose=False): for found in releases: - url = found['url'] - if verbose: - _verbose('Found URL', url) digests = found['digests'] try: found['hash'] = digests[algorithm] + if verbose: + _verbose('Found hash for', found['url']) except KeyError: # The algorithm is NOT in the 'digests' dict. # We have to download the file and use pip + url = found['url'] + if verbose: + _verbose('Found URL', url) download_dir = tempfile.gettempdir() filename = os.path.join( download_dir, @@ -390,7 +392,6 @@ def get_releases_hashes(releases, algorithm, verbose=False): if verbose: _verbose(' Hash', found['hash']) yield { - 'url': url, 'hash': found['hash'] }
Verbose option doesn't make sense any more when the algorithm is in the release When we [started using pypi.org instead](https://github.com/peterbe/hashin/commit/66aa748a6d80e436283f26526b5af2b3b9a0d62f) one important change was that now we can (almost) always get the digests as part of the JSON payload. Before we had to get each URL for each release file, download that file, and run `pip` on it to get the checksum. Instead now can we can just get it from `.releases[<desired version][N].digests.sha256` for example. In https://pypi.org/pypi/psycopg2/json for example every release has a `md5` and `sha256` digest. So we don't need to process a specific URL.
peterbe/hashin
diff --git a/tests/test_cli.py b/tests/test_cli.py index 336eb2e..ad4ecfb 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -417,7 +417,7 @@ selenium==2.53.1 \ out_lines[1] ) self.assertTrue( - 'Found URL https://pypi.org/packages' in out_lines[1], + 'Found hash for https://pypi.org/packages/2.7/p/hashin/hashin-0.10-py2-none-any.whl' in out_lines[1], out_lines[1] ) # hash it got @@ -986,15 +986,12 @@ selenium==2.53.1 \ 'version': '0.10', 'hashes': [ { - 'url': 'https://pypi.org/packages/2.7/p/hashin/hashin-0.10-py2-none-any.whl', 'hash': 'aaaaa' }, { - 'url': 'https://pypi.org/packages/3.3/p/hashin/hashin-0.10-py3-none-any.whl', 'hash': 'bbbbb' }, { - 'url': 'https://pypi.org/packages/source/p/hashin/hashin-0.10.tar.gz', 'hash': 'ccccc' } ] @@ -1047,10 +1044,17 @@ selenium==2.53.1 \ murlopen.side_effect = mocked_get - result = hashin.get_package_hashes( - package='hashin', - version='0.10', - algorithm='sha512', + my_stdout = StringIO() + with redirect_stdout(my_stdout): + result = hashin.get_package_hashes( + package='hashin', + version='0.10', + algorithm='sha512', + verbose=True, + ) + out_lines = my_stdout.getvalue().splitlines() + self.assertTrue( + 'Found URL https://pypi.org/packages/2.7/p/hashin/hashin-0.10-py2-none-any.whl' in out_lines[1] ) expected = { @@ -1058,15 +1062,12 @@ selenium==2.53.1 \ 'version': '0.10', 'hashes': [ { - 'url': 'https://pypi.org/packages/3.3/p/hashin/hashin-0.10-py3-none-any.whl', 'hash': '0d63bf4c115154781846ecf573049324f06b021a1d4b92da4fae2bf491da2b83a13096b14d73e73cefad36855f4fa936bac4b2357dabf05a2b1e7329ff1e5455' }, { - 'url': 'https://pypi.org/packages/2.7/p/hashin/hashin-0.10-py2-none-any.whl', 'hash': '45d1c5d2237a3b4f78b4198709fb2ecf1f781c8234ce3d94356f2100a36739433952c6c13b2843952f608949e6baa9f95055a314487cd8fb3f9d76522d8edb50' }, { - 'url': 'https://pypi.org/packages/source/p/hashin/hashin-0.10.tar.gz', 'hash': 'c32e6d9fb09dc36ab9222c4606a1f43a2dcc183a8c64bdd9199421ef779072c174fa044b155babb12860cf000e36bc4d358694fa22420c997b1dd75b623d4daa' } ]
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 2 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "mock" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 -e git+https://github.com/peterbe/hashin.git@a80c5efbb1630f75a9cfd4492627e9aff051c3e1#egg=hashin iniconfig==2.1.0 mock==5.2.0 packaging==24.2 pip-api==0.0.34 pluggy==1.5.0 pytest==8.3.5 tomli==2.2.1
name: hashin channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - mock==5.2.0 - packaging==24.2 - pip-api==0.0.34 - pluggy==1.5.0 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/hashin
[ "tests/test_cli.py::Tests::test_get_package_hashes", "tests/test_cli.py::Tests::test_get_package_hashes_unknown_algorithm", "tests/test_cli.py::Tests::test_run" ]
[]
[ "tests/test_cli.py::Tests::test_amend_requirements_content_new", "tests/test_cli.py::Tests::test_amend_requirements_content_new_similar_name", "tests/test_cli.py::Tests::test_amend_requirements_content_replacement", "tests/test_cli.py::Tests::test_amend_requirements_content_replacement_2", "tests/test_cli.py::Tests::test_amend_requirements_content_replacement_amonst_others", "tests/test_cli.py::Tests::test_amend_requirements_content_replacement_amonst_others_2", "tests/test_cli.py::Tests::test_amend_requirements_content_replacement_single_to_multi", "tests/test_cli.py::Tests::test_expand_python_version", "tests/test_cli.py::Tests::test_filter_releases", "tests/test_cli.py::Tests::test_get_hashes_error", "tests/test_cli.py::Tests::test_get_latest_version_non_pre_release", "tests/test_cli.py::Tests::test_get_latest_version_non_pre_release_leading_zeros", "tests/test_cli.py::Tests::test_get_latest_version_only_pre_release", "tests/test_cli.py::Tests::test_get_latest_version_simple", "tests/test_cli.py::Tests::test_get_package_hashes_without_version", "tests/test_cli.py::Tests::test_main_packageerrors_stderr", "tests/test_cli.py::Tests::test_main_version", "tests/test_cli.py::Tests::test_non_200_ok_download", "tests/test_cli.py::Tests::test_release_url_metadata_python", "tests/test_cli.py::Tests::test_run_case_insensitive", "tests/test_cli.py::Tests::test_run_contained_names", "tests/test_cli.py::Tests::test_run_pep_0496", "tests/test_cli.py::Tests::test_run_without_specific_version" ]
[]
MIT License
2,990
[ "README.rst", "hashin.py" ]
[ "README.rst", "hashin.py" ]
zopefoundation__zope.schema-48
19c95112eea707be0e40166c59f0702a280a14f0
2018-08-29 15:14:33
0a719f2ded189630a0a77e9292a66a3662c6512c
diff --git a/CHANGES.rst b/CHANGES.rst index 9f9da33..d83b78b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -66,6 +66,17 @@ to the collections ABCs of the same name; ``Tuple`` now extends ``Sequence`` and ``List`` now extends ``MutableSequence``. +- Add new field ``Collection``, implementing ``ICollection``. This is + the base class of ``Sequence``. Previously this was known as + ``AbstractCollection`` and was not public. It can be subclassed to + add ``value_type``, ``_type`` and ``unique`` attributes at the class + level, enabling a simpler constructor call. See `issue 23 + <https://github.com/zopefoundation/zope.schema/issues/23>`_. + +- Make ``Object`` respect a ``schema`` attribute defined by a + subclass, enabling a simpler constructor call. See `issue 23 + <https://github.com/zopefoundation/zope.schema/issues/23>`_. + 4.5.0 (2017-07-10) ================== diff --git a/docs/api.rst b/docs/api.rst index 35bd2be..a133111 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -139,6 +139,7 @@ Fields ====== .. autoclass:: zope.schema.Field +.. autoclass:: zope.schema.Collection .. autoclass:: zope.schema._field.AbstractCollection .. autoclass:: zope.schema.ASCII :no-show-inheritance: diff --git a/src/zope/schema/__init__.py b/src/zope/schema/__init__.py index 895459b..8fc6825 100644 --- a/src/zope/schema/__init__.py +++ b/src/zope/schema/__init__.py @@ -20,6 +20,7 @@ from zope.schema._field import Bool from zope.schema._field import Bytes from zope.schema._field import BytesLine from zope.schema._field import Choice +from zope.schema._field import Collection from zope.schema._field import Container from zope.schema._field import Date from zope.schema._field import Datetime @@ -75,6 +76,7 @@ __all__ = [ 'Bytes', 'BytesLine', 'Choice', + 'Collection', 'Container', 'Date', 'Datetime', diff --git a/src/zope/schema/_bootstrapfields.py b/src/zope/schema/_bootstrapfields.py index 6501310..6fee839 100644 --- a/src/zope/schema/_bootstrapfields.py +++ b/src/zope/schema/_bootstrapfields.py @@ -39,6 +39,15 @@ from zope.schema._compat import integer_types from zope.schema._schema import getFields +class _NotGiven(object): + + def __repr__(self): # pragma: no cover + return "<Not Given>" + + +_NotGiven = _NotGiven() + + class ValidatedProperty(object): def __init__(self, name, check=None, allow_none=False): @@ -97,7 +106,7 @@ class Field(Attribute): # Field constructor. A marker is helpful since we don't want to # overwrite missing_value if it is set differently on a Field # subclass and isn't specified via the constructor. - __missing_value_marker = object() + __missing_value_marker = _NotGiven # Note that the "order" field has a dual existance: # 1. The class variable Field.order is used as a source for the diff --git a/src/zope/schema/_field.py b/src/zope/schema/_field.py index 9570ce8..d80ee3c 100644 --- a/src/zope/schema/_field.py +++ b/src/zope/schema/_field.py @@ -45,6 +45,7 @@ from zope.schema.interfaces import IBool from zope.schema.interfaces import IBytes from zope.schema.interfaces import IBytesLine from zope.schema.interfaces import IChoice +from zope.schema.interfaces import ICollection from zope.schema.interfaces import IContextSourceBinder from zope.schema.interfaces import IDate from zope.schema.interfaces import IDatetime @@ -99,6 +100,7 @@ from zope.schema._bootstrapfields import Bool from zope.schema._bootstrapfields import Int from zope.schema._bootstrapfields import Password from zope.schema._bootstrapfields import MinMaxLen +from zope.schema._bootstrapfields import _NotGiven from zope.schema.fieldproperty import FieldProperty from zope.schema.vocabulary import getVocabularyRegistry from zope.schema.vocabulary import VocabularyRegistryError @@ -133,6 +135,7 @@ classImplements(Bool, IFromUnicode) classImplements(Int, IInt) + @implementer(ISourceText) class SourceText(Text): __doc__ = ISourceText.__doc__ @@ -538,21 +541,40 @@ def _validate_uniqueness(self, value): temp_values.append(item) -class AbstractCollection(MinMaxLen, Iterable): +@implementer(ICollection) +class Collection(MinMaxLen, Iterable): + """ + A generic collection implementing :class:`zope.schema.interfaces.ICollection`. + + Subclasses can define the attribute ``value_type`` to be a field + such as an :class:`Object` that will be checked for each member of + the collection. This can then be omitted from the constructor call. + + They can also define the attribute ``_type`` to be a concrete + class (or tuple of classes) that the collection itself will + be checked to be an instance of. This cannot be set in the constructor. + + .. versionchanged:: 4.6.0 + Add the ability for subclasses to specify ``value_type`` + and ``unique``, and allow eliding them from the constructor. + """ value_type = None unique = False - def __init__(self, value_type=None, unique=False, **kw): - super(AbstractCollection, self).__init__(**kw) + def __init__(self, value_type=_NotGiven, unique=_NotGiven, **kw): + super(Collection, self).__init__(**kw) # whine if value_type is not a field - if value_type is not None and not IField.providedBy(value_type): + if value_type is not _NotGiven: + self.value_type = value_type + + if self.value_type is not None and not IField.providedBy(self.value_type): raise ValueError("'value_type' must be field instance.") - self.value_type = value_type - self.unique = unique + if unique is not _NotGiven: + self.unique = unique def bind(self, object): """See zope.schema._bootstrapinterfaces.IField.""" - clone = super(AbstractCollection, self).bind(object) + clone = super(Collection, self).bind(object) # binding value_type is necessary for choices with named vocabularies, # and possibly also for other fields. if clone.value_type is not None: @@ -560,7 +582,7 @@ class AbstractCollection(MinMaxLen, Iterable): return clone def _validate(self, value): - super(AbstractCollection, self)._validate(value) + super(Collection, self)._validate(value) errors = _validate_sequence(self.value_type, value) if errors: try: @@ -572,8 +594,15 @@ class AbstractCollection(MinMaxLen, Iterable): _validate_uniqueness(self, value) +#: An alternate name for :class:`.Collection`. +#: +#: .. deprecated:: 4.6.0 +#: Use :class:`.Collection` instead. +AbstractCollection = Collection + + @implementer(ISequence) -class Sequence(AbstractCollection): +class Sequence(Collection): """ A field representing an ordered sequence. @@ -604,28 +633,26 @@ class List(MutableSequence): _type = list -@implementer(ISet) -class Set(AbstractCollection): - """A field representing a set.""" - _type = set +class _AbstractSet(Collection): + unique = True - def __init__(self, **kw): - if 'unique' in kw: # set members are always unique + def __init__(self, *args, **kwargs): + super(_AbstractSet, self).__init__(*args, **kwargs) + if not self.unique: # set members are always unique raise TypeError( "__init__() got an unexpected keyword argument 'unique'") - super(Set, self).__init__(unique=True, **kw) + + +@implementer(ISet) +class Set(_AbstractSet): + """A field representing a set.""" + _type = set @implementer(IFrozenSet) -class FrozenSet(AbstractCollection): +class FrozenSet(_AbstractSet): _type = frozenset - def __init__(self, **kw): - if 'unique' in kw: # set members are always unique - raise TypeError( - "__init__() got an unexpected keyword argument 'unique'") - super(FrozenSet, self).__init__(unique=True, **kw) - VALIDATED_VALUES = threading.local() @@ -677,10 +704,11 @@ def _validate_fields(schema, value): @implementer(IObject) class Object(Field): __doc__ = IObject.__doc__ + schema = None - def __init__(self, schema, **kw): + def __init__(self, schema=_NotGiven, **kw): """ - Object(schema, *, validate_invariants=True, **kwargs) + Object(schema=<Not Given>, *, validate_invariants=True, **kwargs) Create an `~.IObject` field. The keyword arguments are as for `~.Field`. @@ -688,7 +716,13 @@ class Object(Field): Add the keyword argument *validate_invariants*. When true (the default), the schema's ``validateInvariants`` method will be invoked to check the ``@invariant`` properties of the schema. + .. versionchanged:: 4.6.0 + The *schema* argument can be ommitted in a subclass + that specifies a ``schema`` attribute. """ + if schema is _NotGiven: + schema = self.schema + if not IInterface.providedBy(schema): raise WrongType
"AbstractCollection" should allow to define "value_type" in a derived class Currently, "AbstractCollection" looks like: ``` class AbstractCollection(MinMaxLen, Iterable) value_type = None unique = False def __init__(self, value_type=None, unique=False, **kw): super(AbstractCollection, self).__init__(**kw) # whine if value_type is not a field if value_type is not None and not IField.providedBy(value_type): raise ValueError("'value_type' must be field instance.") self.value_type = value_type self.unique = unique ``` This makes it difficult to define "value_type" in a derived class. I suggest to use a "marker" as default value for the "value_type" parameter of `AbstractCollection.__init__` and use the class default in this case.
zopefoundation/zope.schema
diff --git a/src/zope/schema/tests/test__field.py b/src/zope/schema/tests/test__field.py index dd9bd14..c035e85 100644 --- a/src/zope/schema/tests/test__field.py +++ b/src/zope/schema/tests/test__field.py @@ -1375,19 +1375,66 @@ class InterfaceFieldTests(unittest.TestCase): self.assertRaises(RequiredMissing, field.validate, None) -class AbstractCollectionTests(unittest.TestCase): +class CollectionTests(unittest.TestCase): + + _DEFAULT_UNIQUE = False def _getTargetClass(self): - from zope.schema._field import AbstractCollection - return AbstractCollection + from zope.schema._field import Collection + return Collection + + def _getTargetInterface(self): + from zope.schema.interfaces import ICollection + return ICollection def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) + _makeCollection = list + + def test_class_conforms_to_iface(self): + from zope.interface.verify import verifyClass + verifyClass(self._getTargetInterface(), self._getTargetClass()) + + def test_instance_conforms_to_iface(self): + from zope.interface.verify import verifyObject + verifyObject(self._getTargetInterface(), self._makeOne()) + + + def test_schema_defined_by_subclass(self): + from zope import interface + from zope.schema import Object + from zope.schema.interfaces import WrongContainedType + + class IValueType(interface.Interface): + "The value type schema" + + the_value_type = Object(IValueType) + + class Field(self._getTargetClass()): + value_type = the_value_type + + field = Field() + self.assertIs(field.value_type, the_value_type) + + # Empty collection is fine + field.validate(self._makeCollection([])) + + # Collection with a non-implemented object is bad + self.assertRaises(WrongContainedType, field.validate, self._makeCollection([object()])) + + # Actual implementation works + @interface.implementer(IValueType) + class ValueType(object): + "The value type" + + + field.validate(self._makeCollection([ValueType()])) + def test_ctor_defaults(self): absc = self._makeOne() self.assertEqual(absc.value_type, None) - self.assertEqual(absc.unique, False) + self.assertEqual(absc.unique, self._DEFAULT_UNIQUE) def test_ctor_explicit(self): from zope.schema._bootstrapfields import Text @@ -1407,7 +1454,7 @@ class AbstractCollectionTests(unittest.TestCase): bound = absc.bind(context) self.assertEqual(bound.context, context) self.assertEqual(bound.value_type, None) - self.assertEqual(bound.unique, False) + self.assertEqual(bound.unique, self._DEFAULT_UNIQUE) def test_bind_w_value_Type(self): from zope.schema._bootstrapfields import Text @@ -1426,117 +1473,73 @@ class AbstractCollectionTests(unittest.TestCase): text = Text() absc = self._makeOne(text) with self.assertRaises(WrongContainedType) as exc: - absc.validate([1]) + absc.validate(self._makeCollection([1])) wct = exc.exception self.assertIs(wct.field, absc) - self.assertEqual(wct.value, [1]) + self.assertEqual(wct.value, self._makeCollection([1])) def test__validate_miss_uniqueness(self): from zope.schema.interfaces import NotUnique + from zope.schema.interfaces import WrongType from zope.schema._bootstrapfields import Text text = Text() absc = self._makeOne(text, True) - with self.assertRaises(NotUnique) as exc: + with self.assertRaises((NotUnique, WrongType)) as exc: absc.validate([u'a', u'a']) not_uniq = exc.exception self.assertIs(not_uniq.field, absc) - self.assertEqual(not_uniq.value, [u'a', u'a']) - - -class TupleTests(unittest.TestCase): - - def _getTargetClass(self): - from zope.schema._field import Tuple - return Tuple - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_ITuple(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import ITuple - verifyClass(ITuple, self._getTargetClass()) - - def test_instance_conforms_to_ITuple(self): - from zope.interface.verify import verifyObject - from zope.schema.interfaces import ITuple - verifyObject(ITuple, self._makeOne()) - - def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType - - - field = self._makeOne() - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, b'') - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) - - def test_validate_not_required(self): - field = self._makeOne(required=False) - field.validate(()) - field.validate((1, 2)) - field.validate((3,)) - field.validate(None) - - def test_validate_required(self): - from zope.schema.interfaces import RequiredMissing - field = self._makeOne() - field.validate(()) - field.validate((1, 2)) - field.validate((3,)) - with self.assertRaises(RequiredMissing) as exc: - field.validate(None) - - req_missing = exc.exception - self.assertIs(req_missing.field, field) - self.assertEqual(req_missing.value, None) + self.assertEqual(not_uniq.value, + [u'a', u'a']) def test_validate_min_length(self): from zope.schema.interfaces import TooShort field = self._makeOne(min_length=2) - field.validate((1, 2)) - field.validate((1, 2, 3)) - self.assertRaises(TooShort, field.validate, ()) - with self.assertRaises(TooShort) as exc: - field.validate((1,)) - - too_short = exc.exception - self.assertIs(too_short.field, field) - self.assertEqual(too_short.value, (1,)) + field.validate(self._makeCollection((1, 2))) + field.validate(self._makeCollection((1, 2, 3))) + self.assertRaises(TooShort, field.validate, self._makeCollection()) + self.assertRaises(TooShort, field.validate, self._makeCollection((1,))) def test_validate_max_length(self): from zope.schema.interfaces import TooLong field = self._makeOne(max_length=2) - field.validate(()) - field.validate((1, 2)) - self.assertRaises(TooLong, field.validate, (1, 2, 3, 4)) - with self.assertRaises(TooLong) as exc: - field.validate((1, 2, 3)) - - too_long = exc.exception - self.assertIs(too_long.field, field) - self.assertEqual(too_long.value, (1, 2, 3)) + field.validate(self._makeCollection()) + field.validate(self._makeCollection((1,))) + field.validate(self._makeCollection((1, 2))) + self.assertRaises(TooLong, field.validate, self._makeCollection((1, 2, 3, 4))) + self.assertRaises(TooLong, field.validate, self._makeCollection((1, 2, 3))) def test_validate_min_length_and_max_length(self): from zope.schema.interfaces import TooLong from zope.schema.interfaces import TooShort field = self._makeOne(min_length=1, max_length=2) - field.validate((1, )) - field.validate((1, 2)) - self.assertRaises(TooShort, field.validate, ()) - self.assertRaises(TooLong, field.validate, (1, 2, 3)) + field.validate(self._makeCollection((1,))) + field.validate(self._makeCollection((1, 2))) + self.assertRaises(TooShort, field.validate, self._makeCollection()) + self.assertRaises(TooLong, field.validate, self._makeCollection((1, 2, 3))) + + def test_validate_not_required(self): + field = self._makeOne(required=False) + field.validate(self._makeCollection()) + field.validate(self._makeCollection((1, 2))) + field.validate(self._makeCollection((3,))) + field.validate(None) + def test_validate_required(self): + from zope.schema.interfaces import RequiredMissing + field = self._makeOne() + field.validate(self._makeCollection()) + field.validate(self._makeCollection((1, 2))) + field.validate(self._makeCollection((3,))) + field.validate(self._makeCollection()) + field.validate(self._makeCollection((1, 2))) + field.validate(self._makeCollection((3,))) + self.assertRaises(RequiredMissing, field.validate, None) -class SequenceTests(unittest.TestCase): + +class SequenceTests(CollectionTests): def _getTargetClass(self): from zope.schema._field import Sequence @@ -1546,17 +1549,6 @@ class SequenceTests(unittest.TestCase): from zope.schema.interfaces import ISequence return ISequence - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_iface(self): - from zope.interface.verify import verifyClass - verifyClass(self._getTargetInterface(), self._getTargetClass()) - - def test_instance_conforms_to_IList(self): - from zope.interface.verify import verifyObject - verifyObject(self._getTargetInterface(), self._makeOne()) - def test_validate_wrong_types(self): from zope.schema.interfaces import WrongType @@ -1568,47 +1560,6 @@ class SequenceTests(unittest.TestCase): self.assertRaises(WrongType, field.validate, frozenset()) self.assertRaises(WrongType, field.validate, object()) - def test_validate_not_required(self): - field = self._makeOne(required=False) - field.validate([]) - field.validate([1, 2]) - field.validate([3]) - field.validate(None) - - def test_validate_required(self): - from zope.schema.interfaces import RequiredMissing - field = self._makeOne() - field.validate([]) - field.validate([1, 2]) - field.validate([3]) - self.assertRaises(RequiredMissing, field.validate, None) - - def test_validate_min_length(self): - from zope.schema.interfaces import TooShort - field = self._makeOne(min_length=2) - field.validate([1, 2]) - field.validate([1, 2, 3]) - self.assertRaises(TooShort, field.validate, []) - self.assertRaises(TooShort, field.validate, [1, ]) - - def test_validate_max_length(self): - from zope.schema.interfaces import TooLong - field = self._makeOne(max_length=2) - field.validate([]) - field.validate([1]) - field.validate([1, 2]) - self.assertRaises(TooLong, field.validate, [1, 2, 3, 4]) - self.assertRaises(TooLong, field.validate, [1, 2, 3]) - - def test_validate_min_length_and_max_length(self): - from zope.schema.interfaces import TooLong - from zope.schema.interfaces import TooShort - field = self._makeOne(min_length=1, max_length=2) - field.validate([1]) - field.validate([1, 2]) - self.assertRaises(TooShort, field.validate, []) - self.assertRaises(TooLong, field.validate, [1, 2, 3]) - def test_sequence(self): from zope.schema._field import abc @@ -1643,6 +1594,38 @@ class SequenceTests(unittest.TestCase): field = self._makeOne() field.validate(sequence) + +class TupleTests(SequenceTests): + + _makeCollection = tuple + + def _getTargetClass(self): + from zope.schema._field import Tuple + return Tuple + + def _getTargetInterface(self): + from zope.schema.interfaces import ITuple + return ITuple + + def test_mutable_sequence(self): + from zope.schema.interfaces import WrongType + with self.assertRaises(WrongType): + super(TupleTests, self).test_mutable_sequence() + + def test_sequence(self): + from zope.schema.interfaces import WrongType + with self.assertRaises(WrongType): + super(TupleTests, self).test_sequence() + + def test_validate_wrong_types(self): + from zope.schema.interfaces import WrongType + field = self._makeOne() + self.assertRaises(WrongType, field.validate, u'') + self.assertRaises(WrongType, field.validate, b'') + self.assertRaises(WrongType, field.validate, []) + super(TupleTests, self).test_validate_wrong_types() + + class MutableSequenceTests(SequenceTests): def _getTargetClass(self): @@ -1659,7 +1642,6 @@ class MutableSequenceTests(SequenceTests): self.assertRaises(WrongType, field.validate, u'') self.assertRaises(WrongType, field.validate, b'') self.assertRaises(WrongType, field.validate, ()) - super(MutableSequenceTests, self).test_validate_wrong_types() def test_sequence(self): @@ -1684,34 +1666,28 @@ class ListTests(MutableSequenceTests): super(ListTests, self).test_mutable_sequence() -class SetTests(unittest.TestCase): +class SetTests(CollectionTests): + + _DEFAULT_UNIQUE = True + _makeCollection = set + _makeWrongSet = frozenset def _getTargetClass(self): from zope.schema._field import Set return Set - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_ISet(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import ISet - verifyClass(ISet, self._getTargetClass()) - - def test_instance_conforms_to_ISet(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import ISet - verifyObject(ISet, self._makeOne()) + return ISet def test_ctor_disallows_unique(self): self.assertRaises(TypeError, self._makeOne, unique=False) - self.assertRaises(TypeError, self._makeOne, unique=True) + self._makeOne(unique=True) # restating the obvious is allowed self.assertTrue(self._makeOne().unique) def test_validate_wrong_types(self): from zope.schema.interfaces import WrongType - field = self._makeOne() self.assertRaises(WrongType, field.validate, u'') self.assertRaises(WrongType, field.validate, b'') @@ -1720,133 +1696,22 @@ class SetTests(unittest.TestCase): self.assertRaises(WrongType, field.validate, ()) self.assertRaises(WrongType, field.validate, []) self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, frozenset()) + self.assertRaises(WrongType, field.validate, self._makeWrongSet()) self.assertRaises(WrongType, field.validate, object()) - def test_validate_not_required(self): - field = self._makeOne(required=False) - field.validate(set()) - field.validate(set((1, 2))) - field.validate(set((3,))) - field.validate(None) - - def test_validate_required(self): - from zope.schema.interfaces import RequiredMissing - field = self._makeOne() - field.validate(set()) - field.validate(set((1, 2))) - field.validate(set((3,))) - field.validate(set()) - field.validate(set((1, 2))) - field.validate(set((3,))) - self.assertRaises(RequiredMissing, field.validate, None) - - def test_validate_min_length(self): - from zope.schema.interfaces import TooShort - field = self._makeOne(min_length=2) - field.validate(set((1, 2))) - field.validate(set((1, 2, 3))) - self.assertRaises(TooShort, field.validate, set()) - self.assertRaises(TooShort, field.validate, set((1,))) - - def test_validate_max_length(self): - from zope.schema.interfaces import TooLong - field = self._makeOne(max_length=2) - field.validate(set()) - field.validate(set((1,))) - field.validate(set((1, 2))) - self.assertRaises(TooLong, field.validate, set((1, 2, 3, 4))) - self.assertRaises(TooLong, field.validate, set((1, 2, 3))) - def test_validate_min_length_and_max_length(self): - from zope.schema.interfaces import TooLong - from zope.schema.interfaces import TooShort - field = self._makeOne(min_length=1, max_length=2) - field.validate(set((1,))) - field.validate(set((1, 2))) - self.assertRaises(TooShort, field.validate, set()) - self.assertRaises(TooLong, field.validate, set((1, 2, 3))) +class FrozenSetTests(SetTests): - -class FrozenSetTests(unittest.TestCase): + _makeCollection = frozenset + _makeWrongSet = set def _getTargetClass(self): from zope.schema._field import FrozenSet return FrozenSet - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IFrozenSet(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IFrozenSet - verifyClass(IFrozenSet, self._getTargetClass()) - - def test_instance_conforms_to_IFrozenSet(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IFrozenSet - verifyObject(IFrozenSet, self._makeOne()) - - def test_ctor_disallows_unique(self): - self.assertRaises(TypeError, self._makeOne, unique=False) - self.assertRaises(TypeError, self._makeOne, unique=True) - self.assertTrue(self._makeOne().unique) - - def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType - - - field = self._makeOne() - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, b'') - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, object()) - - def test_validate_not_required(self): - field = self._makeOne(required=False) - field.validate(frozenset()) - field.validate(frozenset((1, 2))) - field.validate(frozenset((3,))) - field.validate(None) - - def test_validate_required(self): - from zope.schema.interfaces import RequiredMissing - field = self._makeOne() - field.validate(frozenset()) - field.validate(frozenset((1, 2))) - field.validate(frozenset((3,))) - self.assertRaises(RequiredMissing, field.validate, None) - - def test_validate_min_length(self): - from zope.schema.interfaces import TooShort - field = self._makeOne(min_length=2) - field.validate(frozenset((1, 2))) - field.validate(frozenset((1, 2, 3))) - self.assertRaises(TooShort, field.validate, frozenset()) - self.assertRaises(TooShort, field.validate, frozenset((1,))) - - def test_validate_max_length(self): - from zope.schema.interfaces import TooLong - field = self._makeOne(max_length=2) - field.validate(frozenset()) - field.validate(frozenset((1,))) - field.validate(frozenset((1, 2))) - self.assertRaises(TooLong, field.validate, frozenset((1, 2, 3, 4))) - self.assertRaises(TooLong, field.validate, frozenset((1, 2, 3))) - - def test_validate_min_length_and_max_length(self): - from zope.schema.interfaces import TooLong - from zope.schema.interfaces import TooShort - field = self._makeOne(min_length=1, max_length=2) - field.validate(frozenset((1,))) - field.validate(frozenset((1, 2))) - self.assertRaises(TooShort, field.validate, frozenset()) - self.assertRaises(TooLong, field.validate, frozenset((1, 2, 3))) + return IFrozenSet class ObjectTests(unittest.TestCase): @@ -2191,13 +2056,13 @@ class ObjectTests(unittest.TestCase): bar = Bytes() @invariant - def check_foo(o): - if o.foo == u'bar': + def check_foo(self): + if self.foo == u'bar': raise Invalid("Foo is not valid") @invariant - def check_bar(o): - if o.bar == b'foo': + def check_bar(self): + if self.bar == b'foo': raise Invalid("Bar is not valid") @implementer(ISchema) @@ -2235,6 +2100,30 @@ class ObjectTests(unittest.TestCase): field = self._makeOne(ISchema, validate_invariants=False) field.validate(inst) + def test_schema_defined_by_subclass(self): + from zope import interface + from zope.schema.interfaces import SchemaNotProvided + + class IValueType(interface.Interface): + "The value type schema" + + class Field(self._getTargetClass()): + schema = IValueType + + field = Field() + self.assertIs(field.schema, IValueType) + + # Non implementation is bad + self.assertRaises(SchemaNotProvided, field.validate, object()) + + # Actual implementation works + @interface.implementer(IValueType) + class ValueType(object): + "The value type" + + + field.validate(ValueType()) + class MappingTests(unittest.TestCase): @@ -2452,6 +2341,7 @@ def _makeDummyRegistry(v): class DummyRegistry(VocabularyRegistry): def __init__(self, vocabulary): + VocabularyRegistry.__init__(self) self._vocabulary = vocabulary def get(self, object, name):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 5 }
4.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0 zope.event==4.6 zope.exceptions==4.6 zope.interface==5.5.2 -e git+https://github.com/zopefoundation/zope.schema.git@19c95112eea707be0e40166c59f0702a280a14f0#egg=zope.schema zope.testing==5.0.1 zope.testrunner==5.6
name: zope.schema channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 - zope-event==4.6 - zope-exceptions==4.6 - zope-interface==5.5.2 - zope-testing==5.0.1 - zope-testrunner==5.6 prefix: /opt/conda/envs/zope.schema
[ "src/zope/schema/tests/test__field.py::CollectionTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::CollectionTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::CollectionTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::CollectionTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::CollectionTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::CollectionTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::CollectionTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_required", "src/zope/schema/tests/test__field.py::SequenceTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::TupleTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::ListTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::SetTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::SetTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::SetTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_disallows_unique", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::SetTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::FrozenSetTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::FrozenSetTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_disallows_unique", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::ObjectTests::test_schema_defined_by_subclass" ]
[]
[ "src/zope/schema/tests/test__field.py::BytesTests::test_class_conforms_to_IBytes", "src/zope/schema/tests/test__field.py::BytesTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::BytesTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::BytesTests::test_instance_conforms_to_IBytes", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_required", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_w_invalid_default", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_empty", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_non_empty_hit", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_non_empty_miss", "src/zope/schema/tests/test__field.py::ASCIITests::test_class_conforms_to_IASCII", "src/zope/schema/tests/test__field.py::ASCIITests::test_instance_conforms_to_IASCII", "src/zope/schema/tests/test__field.py::ASCIITests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::BytesLineTests::test_class_conforms_to_IBytesLine", "src/zope/schema/tests/test__field.py::BytesLineTests::test_constraint", "src/zope/schema/tests/test__field.py::BytesLineTests::test_instance_conforms_to_IBytesLine", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_required", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_class_conforms_to_IASCIILine", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_constraint", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_instance_conforms_to_IASCIILine", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_required", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::FloatTests::test_class_conforms_to_IFloat", "src/zope/schema/tests/test__field.py::FloatTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::FloatTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::FloatTests::test_instance_conforms_to_IFloat", "src/zope/schema/tests/test__field.py::FloatTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_min", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_required", "src/zope/schema/tests/test__field.py::DecimalTests::test_class_conforms_to_IDecimal", "src/zope/schema/tests/test__field.py::DecimalTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::DecimalTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::DecimalTests::test_instance_conforms_to_IDecimal", "src/zope/schema/tests/test__field.py::DecimalTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_min", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test_class_conforms_to_IDatetime", "src/zope/schema/tests/test__field.py::DatetimeTests::test_instance_conforms_to_IDatetime", "src/zope/schema/tests/test__field.py::DatetimeTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_w_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_w_min", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_w_min_and_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DateTests::test_class_conforms_to_IDate", "src/zope/schema/tests/test__field.py::DateTests::test_instance_conforms_to_IDate", "src/zope/schema/tests/test__field.py::DateTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DateTests::test_validate_required", "src/zope/schema/tests/test__field.py::DateTests::test_validate_w_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_w_min", "src/zope/schema/tests/test__field.py::DateTests::test_validate_w_min_and_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_class_conforms_to_ITimedelta", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_instance_conforms_to_ITimedelta", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_min", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_required", "src/zope/schema/tests/test__field.py::TimeTests::test_class_conforms_to_ITime", "src/zope/schema/tests/test__field.py::TimeTests::test_instance_conforms_to_ITime", "src/zope/schema/tests/test__field.py::TimeTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_min", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_required", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_int", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_mixed", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_source_is_ICSB_bound", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_source_is_ICSB_unbound", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_string", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_tuple", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary_invalid", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_preconstructed_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_is_ICSB", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_is_ICSB_but_not_ISource", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_not_ICSB", "src/zope/schema/tests/test__field.py::ChoiceTests::test_class_conforms_to_IChoice", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_both_vocabulary_and_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_both_vocabulary_and_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_invalid_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_invalid_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_named_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_preconstructed_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_unicode_non_ascii_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_wo_values_vocabulary_or_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::ChoiceTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::ChoiceTests::test_instance_conforms_to_IChoice", "src/zope/schema/tests/test__field.py::URITests::test_class_conforms_to_IURI", "src/zope/schema/tests/test__field.py::URITests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::URITests::test_fromUnicode_ok", "src/zope/schema/tests/test__field.py::URITests::test_instance_conforms_to_IURI", "src/zope/schema/tests/test__field.py::URITests::test_validate_not_a_uri", "src/zope/schema/tests/test__field.py::URITests::test_validate_not_required", "src/zope/schema/tests/test__field.py::URITests::test_validate_required", "src/zope/schema/tests/test__field.py::URITests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DottedNameTests::test_class_conforms_to_IDottedName", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_max_dots_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_max_dots_valid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_min_dots_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_min_dots_valid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_fromUnicode_dotted_name_ok", "src/zope/schema/tests/test__field.py::DottedNameTests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_instance_conforms_to_IDottedName", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_not_a_dotted_name", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_required", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_w_max_dots", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_w_min_dots", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::IdTests::test_class_conforms_to_IId", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_dotted_name_ok", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_url_ok", "src/zope/schema/tests/test__field.py::IdTests::test_instance_conforms_to_IId", "src/zope/schema/tests/test__field.py::IdTests::test_validate_not_a_uri", "src/zope/schema/tests/test__field.py::IdTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::IdTests::test_validate_required", "src/zope/schema/tests/test__field.py::IdTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_class_conforms_to_IInterfaceField", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_instance_conforms_to_IInterfaceField", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_required", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::SequenceTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::SequenceTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::SequenceTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::SequenceTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::SequenceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::SequenceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::SequenceTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::SequenceTests::test_sequence", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_required", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::TupleTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::TupleTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::TupleTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::TupleTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::TupleTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::TupleTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TupleTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::TupleTests::test_sequence", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_required", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_sequence", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_required", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ListTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::ListTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::ListTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::ListTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::ListTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::ListTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ListTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::ListTests::test_sequence", "src/zope/schema/tests/test__field.py::ListTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ListTests::test_validate_required", "src/zope/schema/tests/test__field.py::ListTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::SetTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::SetTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::SetTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::SetTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::SetTests::test_validate_required", "src/zope/schema/tests/test__field.py::SetTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_required", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_empty_schema", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_value_not_providing_schema", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_value_providing_schema", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_value_providing_schema_but_invalid_fields", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_value_providing_schema_but_missing_fields", "src/zope/schema/tests/test__field.py::ObjectTests::test_class_conforms_to_IObject", "src/zope/schema/tests/test__field.py::ObjectTests::test_ctor_w_bad_schema", "src/zope/schema/tests/test__field.py::ObjectTests::test_instance_conforms_to_IObject", "src/zope/schema/tests/test__field.py::ObjectTests::test_set_allows_IBOAE_subscr_to_replace_value", "src/zope/schema/tests/test__field.py::ObjectTests::test_set_emits_IBOAE", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_required", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_w_cycles", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_w_cycles_collection_not_valid", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_w_cycles_object_not_valid", "src/zope/schema/tests/test__field.py::ObjectTests::test_validates_invariants_by_default", "src/zope/schema/tests/test__field.py::MappingTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::MappingTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MappingTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::MappingTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::MappingTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MappingTests::test_mapping", "src/zope/schema/tests/test__field.py::MappingTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_required", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_mapping", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_required", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DictTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::DictTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DictTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::DictTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::DictTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DictTests::test_mapping", "src/zope/schema/tests/test__field.py::DictTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::DictTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::DictTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::DictTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DictTests::test_validate_required", "src/zope/schema/tests/test__field.py::DictTests::test_validate_wrong_types" ]
[]
Zope Public License 2.1
2,991
[ "src/zope/schema/_field.py", "src/zope/schema/_bootstrapfields.py", "CHANGES.rst", "docs/api.rst", "src/zope/schema/__init__.py" ]
[ "src/zope/schema/_field.py", "src/zope/schema/_bootstrapfields.py", "CHANGES.rst", "docs/api.rst", "src/zope/schema/__init__.py" ]
dask__dask-3919
09100d02ad8f2b23da70234707888b1374dd46bd
2018-08-29 16:23:29
df1cee3b55706443303b85563e7c01e26611603d
TomAugspurger: I *think* this is what we want to do here. This may surprise some users, if their callable either expects or incidentally creates a pandas dataframe. If a concrete dataframe really is needed, then `df.map_blocks(pd.DataFrame.assign, column=callable)` should be used instead I think. mrocklin: Is anyone available to review this?
diff --git a/dask/dataframe/categorical.py b/dask/dataframe/categorical.py index 9900b54af..f372aa2ca 100644 --- a/dask/dataframe/categorical.py +++ b/dask/dataframe/categorical.py @@ -184,7 +184,7 @@ class CategoricalAccessor(Accessor): Keywords to pass on to the call to `compute`. """ if self.known: - return self + return self._series categories = self._property_map('categories').unique().compute(**kwargs) return self.set_categories(categories.values) diff --git a/dask/dataframe/core.py b/dask/dataframe/core.py index 807111334..855eed317 100644 --- a/dask/dataframe/core.py +++ b/dask/dataframe/core.py @@ -2681,6 +2681,9 @@ class DataFrame(_Frame): callable(v) or pd.api.types.is_scalar(v)): raise TypeError("Column assignment doesn't support type " "{0}".format(type(v).__name__)) + if callable(v): + kwargs[k] = v(self) + pairs = list(sum(kwargs.items(), ())) # Figure out columns of the output
`.assign` leads to different results when using lambda vs not using lambda `.assign` leads to different results when using lambda vs not using lambda. Please see the example below. The difference seems to happen only in the divisions limits. I guess this is a bug? Any idea on why this is happening? ### Imports ```python import pandas as pd import numpy as np import dask.dataframe as dd ``` ### Create Dataframe ```python idx = pd.date_range(start='2013-01-01 00:00:00', end='2013-01-01 06:00:00', freq='30t') df_pd = pd.DataFrame({'col_1':[1,2,3,4,5,6,7,8,9,10,11,12,13,]}, index=idx) df = dd.from_pandas(df_pd, npartitions=2) df.compute() ``` <div> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>col_1</th> </tr> </thead> <tbody> <tr> <th>2013-01-01 00:00:00</th> <td>1</td> </tr> <tr> <th>2013-01-01 00:30:00</th> <td>2</td> </tr> <tr> <th>2013-01-01 01:00:00</th> <td>3</td> </tr> <tr> <th>2013-01-01 01:30:00</th> <td>4</td> </tr> <tr> <th>2013-01-01 02:00:00</th> <td>5</td> </tr> <tr> <th>2013-01-01 02:30:00</th> <td>6</td> </tr> <tr> <th>2013-01-01 03:00:00</th> <td>7</td> </tr> <tr> <th>2013-01-01 03:30:00</th> <td>8</td> </tr> <tr> <th>2013-01-01 04:00:00</th> <td>9</td> </tr> <tr> <th>2013-01-01 04:30:00</th> <td>10</td> </tr> <tr> <th>2013-01-01 05:00:00</th> <td>11</td> </tr> <tr> <th>2013-01-01 05:30:00</th> <td>12</td> </tr> <tr> <th>2013-01-01 06:00:00</th> <td>13</td> </tr> </tbody> </table> </div> ```python df.divisions ``` (Timestamp('2013-01-01 00:00:00', freq='30T'), Timestamp('2013-01-01 03:30:00', freq='30T'), Timestamp('2013-01-01 06:00:00', freq='30T')) ### This is what the shifted column looks like: ```python df.col_1.shift(freq='1h').compute().to_frame() ``` <div> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>col_1</th> </tr> </thead> <tbody> <tr> <th>2013-01-01 01:00:00</th> <td>1</td> </tr> <tr> <th>2013-01-01 01:30:00</th> <td>2</td> </tr> <tr> <th>2013-01-01 02:00:00</th> <td>3</td> </tr> <tr> <th>2013-01-01 02:30:00</th> <td>4</td> </tr> <tr> <th>2013-01-01 03:00:00</th> <td>5</td> </tr> <tr> <th>2013-01-01 03:30:00</th> <td>6</td> </tr> <tr> <th>2013-01-01 04:00:00</th> <td>7</td> </tr> <tr> <th>2013-01-01 04:30:00</th> <td>8</td> </tr> <tr> <th>2013-01-01 05:00:00</th> <td>9</td> </tr> <tr> <th>2013-01-01 05:30:00</th> <td>10</td> </tr> <tr> <th>2013-01-01 06:00:00</th> <td>11</td> </tr> <tr> <th>2013-01-01 06:30:00</th> <td>12</td> </tr> <tr> <th>2013-01-01 07:00:00</th> <td>13</td> </tr> </tbody> </table> </div> ### This is what assigning the shifted column without using lambda, looks like: ```python df.assign(shifted=df.col_1.shift(freq='1h')).compute() ``` <div> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>col_1</th> <th>shifted</th> </tr> </thead> <tbody> <tr> <th>2013-01-01 00:00:00</th> <td>1</td> <td>NaN</td> </tr> <tr> <th>2013-01-01 00:30:00</th> <td>2</td> <td>NaN</td> </tr> <tr> <th>2013-01-01 01:00:00</th> <td>3</td> <td>1.0</td> </tr> <tr> <th>2013-01-01 01:30:00</th> <td>4</td> <td>2.0</td> </tr> <tr> <th>2013-01-01 02:00:00</th> <td>5</td> <td>3.0</td> </tr> <tr> <th>2013-01-01 02:30:00</th> <td>6</td> <td>4.0</td> </tr> <tr> <th>2013-01-01 03:00:00</th> <td>7</td> <td>5.0</td> </tr> <tr> <th>2013-01-01 03:30:00</th> <td>8</td> <td>6.0</td> </tr> <tr> <th>2013-01-01 04:00:00</th> <td>9</td> <td>7.0</td> </tr> <tr> <th>2013-01-01 04:30:00</th> <td>10</td> <td>8.0</td> </tr> <tr> <th>2013-01-01 05:00:00</th> <td>11</td> <td>9.0</td> </tr> <tr> <th>2013-01-01 05:30:00</th> <td>12</td> <td>10.0</td> </tr> <tr> <th>2013-01-01 06:00:00</th> <td>13</td> <td>11.0</td> </tr> </tbody> </table> </div> ### This is what assigning the shifted column using lambda, looks like: ```python df.assign(shifted=lambda x: x.col_1.shift(freq='1h')).compute() ``` <div> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>col_1</th> <th>shifted</th> </tr> </thead> <tbody> <tr> <th>2013-01-01 00:00:00</th> <td>1</td> <td>NaN</td> </tr> <tr> <th>2013-01-01 00:30:00</th> <td>2</td> <td>NaN</td> </tr> <tr> <th>2013-01-01 01:00:00</th> <td>3</td> <td>1.0</td> </tr> <tr> <th>2013-01-01 01:30:00</th> <td>4</td> <td>2.0</td> </tr> <tr> <th>2013-01-01 02:00:00</th> <td>5</td> <td>3.0</td> </tr> <tr> <th>2013-01-01 02:30:00</th> <td>6</td> <td>4.0</td> </tr> <tr> <th>2013-01-01 03:00:00</th> <td>7</td> <td>5.0</td> </tr> <tr> <th>2013-01-01 03:30:00</th> <td>8</td> <td>NaN</td> </tr> <tr> <th>2013-01-01 04:00:00</th> <td>9</td> <td>NaN</td> </tr> <tr> <th>2013-01-01 04:30:00</th> <td>10</td> <td>8.0</td> </tr> <tr> <th>2013-01-01 05:00:00</th> <td>11</td> <td>9.0</td> </tr> <tr> <th>2013-01-01 05:30:00</th> <td>12</td> <td>10.0</td> </tr> <tr> <th>2013-01-01 06:00:00</th> <td>13</td> <td>11.0</td> </tr> </tbody> </table> </div>
dask/dask
diff --git a/dask/dataframe/tests/test_categorical.py b/dask/dataframe/tests/test_categorical.py index b84011818..3b643e82e 100644 --- a/dask/dataframe/tests/test_categorical.py +++ b/dask/dataframe/tests/test_categorical.py @@ -271,6 +271,14 @@ def assert_array_index_eq(left, right): assert_eq(left, pd.Index(right) if isinstance(right, np.ndarray) else right) +def test_return_type_known_categories(): + df = pd.DataFrame({"A": ['a', 'b', 'c']}) + df['A'] = df['A'].astype('category') + dask_df = dd.from_pandas(df, 2) + ret_type = dask_df.A.cat.as_known() + assert isinstance(ret_type, dd.core.Series) + + class TestCategoricalAccessor: @pytest.mark.parametrize('series', cat_series) diff --git a/dask/dataframe/tests/test_dataframe.py b/dask/dataframe/tests/test_dataframe.py index 41a50385d..a9e61cc86 100644 --- a/dask/dataframe/tests/test_dataframe.py +++ b/dask/dataframe/tests/test_dataframe.py @@ -925,6 +925,13 @@ def test_assign(): d.assign(foo=d_unknown.a) +def test_assign_callable(): + df = dd.from_pandas(pd.DataFrame({"A": range(10)}), npartitions=2) + a = df.assign(B=df.A.shift()) + b = df.assign(B=lambda x: x.A.shift()) + assert_eq(a, b) + + def test_map(): assert_eq(d.a.map(lambda x: x + 1), full.a.map(lambda x: x + 1)) lk = dict((v, v + 1) for v in full.a.values)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 2 }
1.23
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[complete]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-xdist", "pytest-cov", "pytest-mock" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 click==8.0.4 cloudpickle==2.2.1 coverage==6.2 -e git+https://github.com/dask/dask.git@09100d02ad8f2b23da70234707888b1374dd46bd#egg=dask distributed==1.28.1 execnet==1.9.0 HeapDict==1.0.1 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work locket==1.0.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work msgpack==1.0.5 numpy==1.19.5 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pandas==1.1.5 partd==1.2.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work psutil==7.0.0 py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 six==1.17.0 sortedcontainers==2.4.0 tblib==1.7.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 toolz==0.12.0 tornado==6.1 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zict==2.1.0 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: dask channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - click==8.0.4 - cloudpickle==2.2.1 - coverage==6.2 - distributed==1.28.1 - execnet==1.9.0 - heapdict==1.0.1 - locket==1.0.0 - msgpack==1.0.5 - numpy==1.19.5 - pandas==1.1.5 - partd==1.2.0 - psutil==7.0.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - six==1.17.0 - sortedcontainers==2.4.0 - tblib==1.7.0 - tomli==1.2.3 - toolz==0.12.0 - tornado==6.1 - zict==2.1.0 prefix: /opt/conda/envs/dask
[ "dask/dataframe/tests/test_categorical.py::test_return_type_known_categories", "dask/dataframe/tests/test_dataframe.py::test_assign_callable" ]
[ "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[remove_unused_categories-kwargs8-series2]", "dask/dataframe/tests/test_dataframe.py::test_attributes", "dask/dataframe/tests/test_dataframe.py::test_timezone_freq[npartitions1]", "dask/dataframe/tests/test_dataframe.py::test_clip[2-5]", "dask/dataframe/tests/test_dataframe.py::test_clip[2.5-3.5]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_picklable", "dask/dataframe/tests/test_dataframe.py::test_repartition_freq_divisions", "dask/dataframe/tests/test_dataframe.py::test_repartition_freq_month", "dask/dataframe/tests/test_dataframe.py::test_select_dtypes[include0-None]", "dask/dataframe/tests/test_dataframe.py::test_select_dtypes[None-exclude1]", "dask/dataframe/tests/test_dataframe.py::test_select_dtypes[include2-exclude2]", "dask/dataframe/tests/test_dataframe.py::test_select_dtypes[include3-None]", "dask/dataframe/tests/test_dataframe.py::test_to_timestamp", "dask/dataframe/tests/test_dataframe.py::test_to_dask_array_raises[False0]", "dask/dataframe/tests/test_dataframe.py::test_to_dask_array_raises[False1]", "dask/dataframe/tests/test_dataframe.py::test_apply", "dask/dataframe/tests/test_dataframe.py::test_apply_warns", "dask/dataframe/tests/test_dataframe.py::test_apply_infer_columns", "dask/dataframe/tests/test_dataframe.py::test_info", "dask/dataframe/tests/test_dataframe.py::test_groupby_multilevel_info", "dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx2-True]", "dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx2-False]", "dask/dataframe/tests/test_dataframe.py::test_shift", "dask/dataframe/tests/test_dataframe.py::test_shift_with_freq", "dask/dataframe/tests/test_dataframe.py::test_first_and_last[first]", "dask/dataframe/tests/test_dataframe.py::test_first_and_last[last]", "dask/dataframe/tests/test_dataframe.py::test_datetime_loc_open_slicing" ]
[ "dask/dataframe/tests/test_categorical.py::test_concat_unions_categoricals", "dask/dataframe/tests/test_categorical.py::test_unknown_categoricals", "dask/dataframe/tests/test_categorical.py::test_is_categorical_dtype", "dask/dataframe/tests/test_categorical.py::test_categorize", "dask/dataframe/tests/test_categorical.py::test_categorize_index", "dask/dataframe/tests/test_categorical.py::test_categorical_set_index[disk]", "dask/dataframe/tests/test_categorical.py::test_categorical_set_index[tasks]", "dask/dataframe/tests/test_categorical.py::test_repartition_on_categoricals[1]", "dask/dataframe/tests/test_categorical.py::test_repartition_on_categoricals[4]", "dask/dataframe/tests/test_categorical.py::test_categorical_accessor_presence", "dask/dataframe/tests/test_categorical.py::test_categorize_nan", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_properties[categories-assert_array_index_eq-series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_properties[categories-assert_array_index_eq-series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_properties[categories-assert_array_index_eq-series2]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_properties[ordered-assert_eq-series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_properties[ordered-assert_eq-series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_properties[ordered-assert_eq-series2]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_properties[codes-assert_array_index_eq-series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_properties[codes-assert_array_index_eq-series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_properties[codes-assert_array_index_eq-series2]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[add_categories-kwargs0-series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[add_categories-kwargs0-series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[add_categories-kwargs0-series2]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[as_ordered-kwargs1-series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[as_ordered-kwargs1-series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[as_ordered-kwargs1-series2]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[as_unordered-kwargs2-series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[as_unordered-kwargs2-series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[as_unordered-kwargs2-series2]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[as_ordered-kwargs3-series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[as_ordered-kwargs3-series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[as_ordered-kwargs3-series2]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[remove_categories-kwargs4-series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[remove_categories-kwargs4-series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[remove_categories-kwargs4-series2]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[rename_categories-kwargs5-series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[rename_categories-kwargs5-series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[rename_categories-kwargs5-series2]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[reorder_categories-kwargs6-series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[reorder_categories-kwargs6-series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[reorder_categories-kwargs6-series2]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[set_categories-kwargs7-series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[set_categories-kwargs7-series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[set_categories-kwargs7-series2]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[remove_unused_categories-kwargs8-series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_callable[remove_unused_categories-kwargs8-series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_categorical_empty", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_unknown_categories[series0]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_unknown_categories[series1]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_unknown_categories[series2]", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_categorical_string_ops", "dask/dataframe/tests/test_categorical.py::TestCategoricalAccessor::test_categorical_non_string_raises", "dask/dataframe/tests/test_dataframe.py::test_Dataframe", "dask/dataframe/tests/test_dataframe.py::test_head_tail", "dask/dataframe/tests/test_dataframe.py::test_head_npartitions", "dask/dataframe/tests/test_dataframe.py::test_head_npartitions_warn", "dask/dataframe/tests/test_dataframe.py::test_index_head", "dask/dataframe/tests/test_dataframe.py::test_Series", "dask/dataframe/tests/test_dataframe.py::test_Index", "dask/dataframe/tests/test_dataframe.py::test_Scalar", "dask/dataframe/tests/test_dataframe.py::test_column_names", "dask/dataframe/tests/test_dataframe.py::test_index_names", "dask/dataframe/tests/test_dataframe.py::test_timezone_freq[1]", "dask/dataframe/tests/test_dataframe.py::test_rename_columns", "dask/dataframe/tests/test_dataframe.py::test_rename_series", "dask/dataframe/tests/test_dataframe.py::test_rename_series_method", "dask/dataframe/tests/test_dataframe.py::test_describe", "dask/dataframe/tests/test_dataframe.py::test_describe_empty", "dask/dataframe/tests/test_dataframe.py::test_cumulative", "dask/dataframe/tests/test_dataframe.py::test_dropna", "dask/dataframe/tests/test_dataframe.py::test_squeeze", "dask/dataframe/tests/test_dataframe.py::test_where_mask", "dask/dataframe/tests/test_dataframe.py::test_map_partitions_multi_argument", "dask/dataframe/tests/test_dataframe.py::test_map_partitions", "dask/dataframe/tests/test_dataframe.py::test_map_partitions_names", "dask/dataframe/tests/test_dataframe.py::test_map_partitions_column_info", "dask/dataframe/tests/test_dataframe.py::test_map_partitions_method_names", "dask/dataframe/tests/test_dataframe.py::test_map_partitions_keeps_kwargs_readable", "dask/dataframe/tests/test_dataframe.py::test_metadata_inference_single_partition_aligned_args", "dask/dataframe/tests/test_dataframe.py::test_drop_duplicates", "dask/dataframe/tests/test_dataframe.py::test_drop_duplicates_subset", "dask/dataframe/tests/test_dataframe.py::test_get_partition", "dask/dataframe/tests/test_dataframe.py::test_ndim", "dask/dataframe/tests/test_dataframe.py::test_dtype", "dask/dataframe/tests/test_dataframe.py::test_value_counts", "dask/dataframe/tests/test_dataframe.py::test_unique", "dask/dataframe/tests/test_dataframe.py::test_isin", "dask/dataframe/tests/test_dataframe.py::test_len", "dask/dataframe/tests/test_dataframe.py::test_size", "dask/dataframe/tests/test_dataframe.py::test_shape", "dask/dataframe/tests/test_dataframe.py::test_nbytes", "dask/dataframe/tests/test_dataframe.py::test_quantile", "dask/dataframe/tests/test_dataframe.py::test_quantile_missing", "dask/dataframe/tests/test_dataframe.py::test_empty_quantile", "dask/dataframe/tests/test_dataframe.py::test_dataframe_quantile", "dask/dataframe/tests/test_dataframe.py::test_index", "dask/dataframe/tests/test_dataframe.py::test_assign", "dask/dataframe/tests/test_dataframe.py::test_map", "dask/dataframe/tests/test_dataframe.py::test_concat", "dask/dataframe/tests/test_dataframe.py::test_args", "dask/dataframe/tests/test_dataframe.py::test_known_divisions", "dask/dataframe/tests/test_dataframe.py::test_unknown_divisions", "dask/dataframe/tests/test_dataframe.py::test_align[inner]", "dask/dataframe/tests/test_dataframe.py::test_align[outer]", "dask/dataframe/tests/test_dataframe.py::test_align[left]", "dask/dataframe/tests/test_dataframe.py::test_align[right]", "dask/dataframe/tests/test_dataframe.py::test_align_axis[inner]", "dask/dataframe/tests/test_dataframe.py::test_align_axis[outer]", "dask/dataframe/tests/test_dataframe.py::test_align_axis[left]", "dask/dataframe/tests/test_dataframe.py::test_align_axis[right]", "dask/dataframe/tests/test_dataframe.py::test_combine", "dask/dataframe/tests/test_dataframe.py::test_combine_first", "dask/dataframe/tests/test_dataframe.py::test_random_partitions", "dask/dataframe/tests/test_dataframe.py::test_series_round", "dask/dataframe/tests/test_dataframe.py::test_repartition_divisions", "dask/dataframe/tests/test_dataframe.py::test_repartition_on_pandas_dataframe", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-1-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-1-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-2-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-2-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-4-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-4-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-5-True]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-5-False]", "dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions_same_limits", "dask/dataframe/tests/test_dataframe.py::test_repartition_object_index", "dask/dataframe/tests/test_dataframe.py::test_repartition_freq_errors", "dask/dataframe/tests/test_dataframe.py::test_embarrassingly_parallel_operations", "dask/dataframe/tests/test_dataframe.py::test_fillna", "dask/dataframe/tests/test_dataframe.py::test_fillna_multi_dataframe", "dask/dataframe/tests/test_dataframe.py::test_ffill_bfill", "dask/dataframe/tests/test_dataframe.py::test_fillna_series_types", "dask/dataframe/tests/test_dataframe.py::test_sample", "dask/dataframe/tests/test_dataframe.py::test_sample_without_replacement", "dask/dataframe/tests/test_dataframe.py::test_sample_raises", "dask/dataframe/tests/test_dataframe.py::test_datetime_accessor", "dask/dataframe/tests/test_dataframe.py::test_str_accessor", "dask/dataframe/tests/test_dataframe.py::test_empty_max", "dask/dataframe/tests/test_dataframe.py::test_deterministic_apply_concat_apply_names", "dask/dataframe/tests/test_dataframe.py::test_aca_meta_infer", "dask/dataframe/tests/test_dataframe.py::test_aca_split_every", "dask/dataframe/tests/test_dataframe.py::test_reduction_method", "dask/dataframe/tests/test_dataframe.py::test_reduction_method_split_every", "dask/dataframe/tests/test_dataframe.py::test_pipe", "dask/dataframe/tests/test_dataframe.py::test_gh_517", "dask/dataframe/tests/test_dataframe.py::test_drop_axis_1", "dask/dataframe/tests/test_dataframe.py::test_gh580", "dask/dataframe/tests/test_dataframe.py::test_rename_dict", "dask/dataframe/tests/test_dataframe.py::test_rename_function", "dask/dataframe/tests/test_dataframe.py::test_rename_index", "dask/dataframe/tests/test_dataframe.py::test_to_frame", "dask/dataframe/tests/test_dataframe.py::test_to_dask_array_unknown[False0]", "dask/dataframe/tests/test_dataframe.py::test_to_dask_array_unknown[False1]", "dask/dataframe/tests/test_dataframe.py::test_to_dask_array[False0-lengths0]", "dask/dataframe/tests/test_dataframe.py::test_to_dask_array[False0-True]", "dask/dataframe/tests/test_dataframe.py::test_to_dask_array[False1-lengths0]", "dask/dataframe/tests/test_dataframe.py::test_to_dask_array[False1-True]", "dask/dataframe/tests/test_dataframe.py::test_applymap", "dask/dataframe/tests/test_dataframe.py::test_abs", "dask/dataframe/tests/test_dataframe.py::test_round", "dask/dataframe/tests/test_dataframe.py::test_cov", "dask/dataframe/tests/test_dataframe.py::test_corr", "dask/dataframe/tests/test_dataframe.py::test_cov_corr_meta", "dask/dataframe/tests/test_dataframe.py::test_cov_corr_mixed", "dask/dataframe/tests/test_dataframe.py::test_autocorr", "dask/dataframe/tests/test_dataframe.py::test_index_time_properties", "dask/dataframe/tests/test_dataframe.py::test_nlargest_nsmallest", "dask/dataframe/tests/test_dataframe.py::test_reset_index", "dask/dataframe/tests/test_dataframe.py::test_dataframe_compute_forward_kwargs", "dask/dataframe/tests/test_dataframe.py::test_series_iteritems", "dask/dataframe/tests/test_dataframe.py::test_dataframe_iterrows", "dask/dataframe/tests/test_dataframe.py::test_dataframe_itertuples", "dask/dataframe/tests/test_dataframe.py::test_astype", "dask/dataframe/tests/test_dataframe.py::test_astype_categoricals", "dask/dataframe/tests/test_dataframe.py::test_astype_categoricals_known", "dask/dataframe/tests/test_dataframe.py::test_groupby_callable", "dask/dataframe/tests/test_dataframe.py::test_methods_tokenize_differently", "dask/dataframe/tests/test_dataframe.py::test_categorize_info", "dask/dataframe/tests/test_dataframe.py::test_gh_1301", "dask/dataframe/tests/test_dataframe.py::test_timeseries_sorted", "dask/dataframe/tests/test_dataframe.py::test_column_assignment", "dask/dataframe/tests/test_dataframe.py::test_columns_assignment", "dask/dataframe/tests/test_dataframe.py::test_attribute_assignment", "dask/dataframe/tests/test_dataframe.py::test_setitem_triggering_realign", "dask/dataframe/tests/test_dataframe.py::test_inplace_operators", "dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx0-True]", "dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx0-False]", "dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx1-True]", "dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx1-False]", "dask/dataframe/tests/test_dataframe.py::test_idxmaxmin_empty_partitions", "dask/dataframe/tests/test_dataframe.py::test_getitem_meta", "dask/dataframe/tests/test_dataframe.py::test_getitem_multilevel", "dask/dataframe/tests/test_dataframe.py::test_getitem_string_subclass", "dask/dataframe/tests/test_dataframe.py::test_getitem_column_types[list]", "dask/dataframe/tests/test_dataframe.py::test_getitem_column_types[array]", "dask/dataframe/tests/test_dataframe.py::test_getitem_column_types[Series]", "dask/dataframe/tests/test_dataframe.py::test_getitem_column_types[Index]", "dask/dataframe/tests/test_dataframe.py::test_ipython_completion", "dask/dataframe/tests/test_dataframe.py::test_diff", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-2-1]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-2-4]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-2-20]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-5-1]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-5-4]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-5-20]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-2-1]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-2-4]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-2-20]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-5-1]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-5-4]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-5-20]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-2-1]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-2-4]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-2-20]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-5-1]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-5-4]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-5-20]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-2-1]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-2-4]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-2-20]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-5-1]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-5-4]", "dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-5-20]", "dask/dataframe/tests/test_dataframe.py::test_split_out_drop_duplicates[None]", "dask/dataframe/tests/test_dataframe.py::test_split_out_drop_duplicates[2]", "dask/dataframe/tests/test_dataframe.py::test_split_out_value_counts[None]", "dask/dataframe/tests/test_dataframe.py::test_split_out_value_counts[2]", "dask/dataframe/tests/test_dataframe.py::test_values", "dask/dataframe/tests/test_dataframe.py::test_copy", "dask/dataframe/tests/test_dataframe.py::test_del", "dask/dataframe/tests/test_dataframe.py::test_memory_usage[True-True]", "dask/dataframe/tests/test_dataframe.py::test_memory_usage[True-False]", "dask/dataframe/tests/test_dataframe.py::test_memory_usage[False-True]", "dask/dataframe/tests/test_dataframe.py::test_memory_usage[False-False]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[sum]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[mean]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[std]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[var]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[count]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[min]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[max]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[idxmin]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[idxmax]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[prod]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[all]", "dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[sem]", "dask/dataframe/tests/test_dataframe.py::test_to_datetime", "dask/dataframe/tests/test_dataframe.py::test_to_timedelta", "dask/dataframe/tests/test_dataframe.py::test_isna[values0]", "dask/dataframe/tests/test_dataframe.py::test_isna[values1]", "dask/dataframe/tests/test_dataframe.py::test_slice_on_filtered_boundary[0]", "dask/dataframe/tests/test_dataframe.py::test_slice_on_filtered_boundary[9]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_nonmonotonic", "dask/dataframe/tests/test_dataframe.py::test_with_boundary[-1-None-False-False-drop0]", "dask/dataframe/tests/test_dataframe.py::test_with_boundary[-1-None-False-True-drop1]", "dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3-False-False-drop2]", "dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3-True-False-drop3]", "dask/dataframe/tests/test_dataframe.py::test_with_boundary[-0.5-None-False-False-drop4]", "dask/dataframe/tests/test_dataframe.py::test_with_boundary[-0.5-None-False-True-drop5]", "dask/dataframe/tests/test_dataframe.py::test_with_boundary[-1.5-None-False-True-drop6]", "dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3.5-False-False-drop7]", "dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3.5-True-False-drop8]", "dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-2.5-False-False-drop9]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index0-0-9]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index1--1-None]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index2-None-10]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index3-None-None]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index4--1-None]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index5-None-2]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index6--2-3]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index7-None-None]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index8-left8-None]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index9-None-right9]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index10-left10-None]", "dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index11-None-right11]", "dask/dataframe/tests/test_dataframe.py::test_better_errors_object_reductions", "dask/dataframe/tests/test_dataframe.py::test_sample_empty_partitions", "dask/dataframe/tests/test_dataframe.py::test_coerce", "dask/dataframe/tests/test_dataframe.py::test_bool", "dask/dataframe/tests/test_dataframe.py::test_cumulative_multiple_columns", "dask/dataframe/tests/test_dataframe.py::test_map_partition_array[asarray]", "dask/dataframe/tests/test_dataframe.py::test_map_partition_array[func1]", "dask/dataframe/tests/test_dataframe.py::test_mixed_dask_array_operations", "dask/dataframe/tests/test_dataframe.py::test_mixed_dask_array_operations_errors", "dask/dataframe/tests/test_dataframe.py::test_mixed_dask_array_multi_dimensional", "dask/dataframe/tests/test_dataframe.py::test_meta_raises" ]
[]
BSD 3-Clause "New" or "Revised" License
2,992
[ "dask/dataframe/core.py", "dask/dataframe/categorical.py" ]
[ "dask/dataframe/core.py", "dask/dataframe/categorical.py" ]
ev3dev__ev3dev-lang-python-500
e2dd9a1180eed72e20a22f5f53744b06754b9048
2018-08-30 05:11:17
c4cbe75edd69310dc2a81c10e76f03274b3fc383
WasabiFan: Updated. I didn't change the `SpeedNativeUnits` name because we didn't come up with anything that was very clear and I figure people won't be using it much anyway. All our CI builds are failing now, both on this branch and `ev3dev-stretch`. I'm not sure what changed, but it isn't our code. ddemidov: Looks like this line is the source of the CI troubles: https://github.com/ev3dev/ev3dev-lang-python/blob/e2dd9a1180eed72e20a22f5f53744b06754b9048/.travis.yml#L9 ddemidov: git blame says the commit that is responsible for that is https://github.com/ev3dev/ev3dev-lang-python/commit/6af0b14145bc5ebc6ec516bc2127efc5ba191971, but the commit was tested succesfully here: https://travis-ci.org/ev3dev/ev3dev-lang-python/builds/404695800?utm_source=github_status&utm_medium=notification ddemidov: Installing evdev 1.0.0 fixed Travis. (1.1.0 was released [about 4 days ago](https://pypi.org/project/evdev/#history)) ddemidov: https://github.com/gvalkov/python-evdev/issues/98 WasabiFan: Thanks for the investigation @ddemidov; everything looks good for us now, at least we have a workaround.
diff --git a/.travis.yml b/.travis.yml index 4235441..d447dcd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,8 +6,8 @@ sudo: false git: depth: 100 install: -- pip install -q Pillow evdev Sphinx sphinx_bootstrap_theme recommonmark evdev -- pip install -q -r ./docs/requirements.txt +- pip install Pillow Sphinx sphinx_bootstrap_theme recommonmark evdev==1.0.0 +- pip install -r ./docs/requirements.txt script: - chmod -R g+rw ./tests/fake-sys/devices/**/* - ./tests/api_tests.py diff --git a/docs/motors.rst b/docs/motors.rst index 514070b..6973d73 100644 --- a/docs/motors.rst +++ b/docs/motors.rst @@ -10,9 +10,9 @@ Motor classes Units ----- -Most methods which run motors with accept a ``speed`` or ``speed_pct`` argument. While this can be provided as an integer which will be interpreted as a percentage of max speed, you can also specify an instance of any of the following classes, each of which represents a different unit system: +Most methods which run motors will accept a ``speed`` argument. While this can be provided as an integer which will be interpreted as a percentage of max speed, you can also specify an instance of any of the following classes, each of which represents a different unit system: -.. autoclass:: SpeedInteger +.. autoclass:: SpeedValue .. autoclass:: SpeedPercent .. autoclass:: SpeedNativeUnits .. autoclass:: SpeedRPS diff --git a/ev3dev2/motor.py b/ev3dev2/motor.py index e2aa7f5..b39d00c 100644 --- a/ev3dev2/motor.py +++ b/ev3dev2/motor.py @@ -67,7 +67,7 @@ else: raise Exception("Unsupported platform '%s'" % platform) -class SpeedInteger(int): +class SpeedValue(): """ A base class for other unit types. Don't use this directly; instead, see :class:`SpeedPercent`, :class:`SpeedRPS`, :class:`SpeedRPM`, @@ -75,97 +75,118 @@ class SpeedInteger(int): """ pass -class SpeedPercent(SpeedInteger): +class SpeedPercent(SpeedValue): """ Speed as a percentage of the motor's maximum rated speed. """ + def __init__(self, percent): + assert -100 <= percent <= 100,\ + "{} is an invalid percentage, must be between -100 and 100 (inclusive)".format(percent) + + self.percent = percent + def __str__(self): - return int.__str__(self) + "%" + return str(self.percent) + "%" - def get_speed_pct(self, motor): + def to_native_units(self, motor): """ - Return the motor speed percentage represented by this SpeedPercent + Return this SpeedPercent in native motor units """ - return self + return self.percent / 100 * motor.max_speed -class SpeedNativeUnits(SpeedInteger): +class SpeedNativeUnits(SpeedValue): """ Speed in tacho counts per second. """ + def __init__(self, native_counts): + self.native_counts = native_counts + def __str__(self): - return int.__str__(self) + "% (counts/sec)" + return str(self.native_counts) + " counts/sec" - def get_speed_pct(self, motor): + def to_native_units(self, motor): """ - Return the motor speed percentage represented by this SpeedNativeUnits + Return this SpeedNativeUnits as a number """ - return self/motor.max_speed * 100 + return self.native_counts -class SpeedRPS(SpeedInteger): +class SpeedRPS(SpeedValue): """ Speed in rotations-per-second. """ + def __init__(self, rotations_per_second): + self.rotations_per_second = rotations_per_second + def __str__(self): - return int.__str__(self) + " rps" + return str(self.rotations_per_second) + " rot/sec" - def get_speed_pct(self, motor): + def to_native_units(self, motor): """ - Return the motor speed percentage to achieve desired rotations-per-second + Return the native speed measurement required to achieve desired rotations-per-second """ - assert self <= motor.max_rps, "{} max RPS is {}, {} was requested".format(motor, motor.max_rps, self) - return (self/motor.max_rps) * 100 + assert abs(self.rotations_per_second) <= motor.max_rps, "invalid rotations-per-second: {} max RPS is {}, {} was requested".format(motor, motor.max_rps, self.rotations_per_second) + return self.rotations_per_second/motor.max_rps * motor.max_speed -class SpeedRPM(SpeedInteger): +class SpeedRPM(SpeedValue): """ Speed in rotations-per-minute. """ + def __init__(self, rotations_per_minute): + self.rotations_per_minute = rotations_per_minute + def __str__(self): - return int.__str__(self) + " rpm" + return str(self) + " rot/min" - def get_speed_pct(self, motor): + def to_native_units(self, motor): """ - Return the motor speed percentage to achieve desired rotations-per-minute + Return the native speed measurement required to achieve desired rotations-per-minute """ - assert self <= motor.max_rpm, "{} max RPM is {}, {} was requested".format(motor, motor.max_rpm, self) - return (self/motor.max_rpm) * 100 + assert abs(self.rotations_per_minute) <= motor.max_rpm, "invalid rotations-per-minute: {} max RPM is {}, {} was requested".format(motor, motor.max_rpm, self.rotations_per_minute) + return self.rotations_per_minute/motor.max_rpm * motor.max_speed -class SpeedDPS(SpeedInteger): +class SpeedDPS(SpeedValue): """ Speed in degrees-per-second. """ + def __init__(self, degrees_per_second): + self.degrees_per_second = degrees_per_second + def __str__(self): - return int.__str__(self) + " dps" + return str(self) + " deg/sec" - def get_speed_pct(self, motor): + def to_native_units(self, motor): """ - Return the motor speed percentage to achieve desired degrees-per-second + Return the native speed measurement required to achieve desired degrees-per-second """ - assert self <= motor.max_dps, "{} max DPS is {}, {} was requested".format(motor, motor.max_dps, self) - return (self/motor.max_dps) * 100 + assert abs(self.degrees_per_second) <= motor.max_dps, "invalid degrees-per-second: {} max DPS is {}, {} was requested".format(motor, motor.max_dps, self.degrees_per_second) + return self.degrees_per_second/motor.max_dps * motor.max_speed -class SpeedDPM(SpeedInteger): +class SpeedDPM(SpeedValue): """ Speed in degrees-per-minute. """ + def __init__(self, degrees_per_minute): + self.degrees_per_minute = degrees_per_minute + def __str__(self): - return int.__str__(self) + " dpm" + return int.__str__(self) + " deg/min" - def get_speed_pct(self, motor): + def to_native_units(self, motor): """ - Return the motor speed percentage to achieve desired degrees-per-minute + Return the native speed measurement required to achieve desired degrees-per-minute """ - assert self <= motor.max_dpm, "{} max DPM is {}, {} was requested".format(motor, motor.max_dpm, self) - return (self/motor.max_dpm) * 100 + assert abs(self.degrees_per_minute) <= motor.max_dpm, "invalid degrees-per-minute: {} max DPM is {}, {} was requested".format(motor, motor.max_dpm, self.degrees_per_minute) + return self.degrees_per_minute/motor.max_dpm * motor.max_speed class Motor(Device): @@ -175,11 +196,6 @@ class Motor(Device): positional and directional feedback such as the EV3 and NXT motors. This feedback allows for precise control of the motors. This is the most common type of motor, so we just call it `motor`. - - The way to configure a motor is to set the '_sp' attributes when - calling a command or before. Only in 'run_direct' mode attribute - changes are processed immediately, in the other modes they only - take place when a new command is issued. """ SYSTEM_CLASS_NAME = 'tacho-motor' @@ -841,34 +857,32 @@ class Motor(Device): """ return self.wait(lambda state: s not in state, timeout) - def _speed_pct(self, speed_pct, label=None): - - # If speed_pct is SpeedInteger object we must convert - # SpeedRPS, etc to an actual speed percentage - if isinstance(speed_pct, SpeedInteger): - speed_pct = speed_pct.get_speed_pct(self) + def _speed_native_units(self, speed, label=None): - assert -100 <= speed_pct <= 100,\ - "{}{} is an invalid speed_pct, must be between -100 and 100 (inclusive)".format(None if label is None else (label + ": ") , speed_pct) + # If speed is not a SpeedValue object we treat it as a percentage + if not isinstance(speed, SpeedValue): + assert -100 <= speed <= 100,\ + "{}{} is an invalid speed percentage, must be between -100 and 100 (inclusive)".format("" if label is None else (label + ": ") , speed) + speed = SpeedPercent(speed) - return speed_pct + return speed.to_native_units(self) - def _set_position_rotations(self, speed_pct, rotations): + def _set_position_rotations(self, speed, rotations): # +/- speed is used to control direction, rotations must be positive assert rotations >= 0, "rotations is {}, must be >= 0".format(rotations) - if speed_pct > 0: - self.position_sp = self.position + int(rotations * self.count_per_rot) + if speed > 0: + self.position_sp = self.position + int(round(rotations * self.count_per_rot)) else: - self.position_sp = self.position - int(rotations * self.count_per_rot) + self.position_sp = self.position - int(round(rotations * self.count_per_rot)) - def _set_position_degrees(self, speed_pct, degrees): + def _set_position_degrees(self, speed, degrees): # +/- speed is used to control direction, degrees must be positive assert degrees >= 0, "degrees is %s, must be >= 0" % degrees - if speed_pct > 0: + if speed > 0: self.position_sp = self.position + int((degrees * self.count_per_rot)/360) else: self.position_sp = self.position - int((degrees * self.count_per_rot)/360) @@ -879,22 +893,22 @@ class Motor(Device): else: self.stop_action = self.STOP_ACTION_COAST - def on_for_rotations(self, speed_pct, rotations, brake=True, block=True): + def on_for_rotations(self, speed, rotations, brake=True, block=True): """ - Rotate the motor at ``speed_pct`` for ``rotations`` + Rotate the motor at ``speed`` for ``rotations`` - ``speed_pct`` can be an integer percentage or a :class:`ev3dev2.motor.SpeedInteger` + ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units. """ - speed_pct = self._speed_pct(speed_pct) + speed = self._speed_native_units(speed) - if not speed_pct or not rotations: - log.warning("({}) Either speed_pct ({}) or rotations ({}) is invalid, motor will not move" .format(self, speed_pct, rotations)) + if not speed or not rotations: + log.warning("({}) Either speed ({}) or rotations ({}) is invalid, motor will not move" .format(self, speed, rotations)) self._set_brake(brake) return - self.speed_sp = int((speed_pct * self.max_speed) / 100) - self._set_position_rotations(speed_pct, rotations) + self.speed_sp = int(round(speed)) + self._set_position_rotations(speed, rotations) self._set_brake(brake) self.run_to_abs_pos() @@ -902,22 +916,22 @@ class Motor(Device): self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving() - def on_for_degrees(self, speed_pct, degrees, brake=True, block=True): + def on_for_degrees(self, speed, degrees, brake=True, block=True): """ - Rotate the motor at ``speed_pct`` for ``degrees`` + Rotate the motor at ``speed`` for ``degrees`` - ``speed_pct`` can be an integer percentage or a :class:`ev3dev2.motor.SpeedInteger` + ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units. """ - speed_pct = self._speed_pct(speed_pct) + speed = self._speed_native_units(speed) - if not speed_pct or not degrees: - log.warning("({}) Either speed_pct ({}) or degrees ({}) is invalid, motor will not move" .format(self, speed_pct, degrees)) + if not speed or not degrees: + log.warning("({}) Either speed ({}) or degrees ({}) is invalid, motor will not move".format(self, speed, degrees)) self._set_brake(brake) return - self.speed_sp = int((speed_pct * self.max_speed) / 100) - self._set_position_degrees(speed_pct, degrees) + self.speed_sp = int(round(speed)) + self._set_position_degrees(speed, degrees) self._set_brake(brake) self.run_to_abs_pos() @@ -925,21 +939,21 @@ class Motor(Device): self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving() - def on_to_position(self, speed_pct, position, brake=True, block=True): + def on_to_position(self, speed, position, brake=True, block=True): """ - Rotate the motor at ``speed_pct`` to ``position`` + Rotate the motor at ``speed`` to ``position`` - ``speed_pct`` can be an integer percentage or a :class:`ev3dev2.motor.SpeedInteger` + ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units. """ - speed_pct = self._speed_pct(speed_pct) + speed = self._speed_native_units(speed) - if not speed_pct: - log.warning("({}) speed_pct is invalid ({}), motor will not move".format(self, speed_pct)) + if not speed: + log.warning("({}) speed is invalid ({}), motor will not move".format(self, speed)) self._set_brake(brake) return - self.speed_sp = int((speed_pct * self.max_speed) / 100) + self.speed_sp = int(round(speed)) self.position_sp = position self._set_brake(brake) self.run_to_abs_pos() @@ -948,21 +962,21 @@ class Motor(Device): self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving() - def on_for_seconds(self, speed_pct, seconds, brake=True, block=True): + def on_for_seconds(self, speed, seconds, brake=True, block=True): """ - Rotate the motor at ``speed_pct`` for ``seconds`` + Rotate the motor at ``speed`` for ``seconds`` - ``speed_pct`` can be an integer percentage or a :class:`ev3dev2.motor.SpeedInteger` + ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units. """ - speed_pct = self._speed_pct(speed_pct) + speed = self._speed_native_units(speed) - if not speed_pct or not seconds: - log.warning("({}) Either speed_pct ({}) or seconds ({}) is invalid, motor will not move" .format(self, speed_pct, seconds)) + if not speed or not seconds: + log.warning("({}) Either speed ({}) or seconds ({}) is invalid, motor will not move" .format(self, speed, seconds)) self._set_brake(brake) return - self.speed_sp = int((speed_pct * self.max_speed) / 100) + self.speed_sp = int(round(speed)) self.time_sp = int(seconds * 1000) self._set_brake(brake) self.run_timed() @@ -971,24 +985,24 @@ class Motor(Device): self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving() - def on(self, speed_pct, brake=True, block=False): + def on(self, speed, brake=True, block=False): """ - Rotate the motor at ``speed_pct`` for forever + Rotate the motor at ``speed`` for forever - ``speed_pct`` can be an integer percentage or a :class:`ev3dev2.motor.SpeedInteger` + ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units. Note that `block` is False by default, this is different from the other `on_for_XYZ` methods. """ - speed_pct = self._speed_pct(speed_pct) + speed = self._speed_native_units(speed) - if not speed_pct: - log.warning("({}) speed_pct is invalid ({}), motor will not move".format(self, speed_pct)) + if not speed: + log.warning("({}) speed is invalid ({}), motor will not move".format(self, speed)) self._set_brake(brake) return - self.speed_sp = int((speed_pct * self.max_speed) / 100) + self.speed_sp = int(round(speed)) self._set_brake(brake) self.run_forever() @@ -1732,21 +1746,21 @@ class MoveTank(MotorSet): self.right_motor.wait_until_not_moving() def _unpack_speeds_to_native_units(self, left_speed, right_speed): - left_speed_pct = self.left_motor._speed_pct(left_speed, "left_speed") - right_speed_pct = self.right_motor._speed_pct(right_speed, "right_speed") + left_speed = self.left_motor._speed_native_units(left_speed, "left_speed") + right_speed = self.right_motor._speed_native_units(right_speed, "right_speed") - assert left_speed_pct or right_speed_pct,\ + assert left_speed or right_speed,\ "Either left_speed or right_speed must be non-zero" return ( - int((left_speed_pct * self.left_motor.max_speed) / 100), - int((right_speed_pct * self.right_motor.max_speed) / 100) + left_speed, + right_speed ) def on_for_rotations(self, left_speed, right_speed, rotations, brake=True, block=True): """ Rotate the motors at 'left_speed & right_speed' for 'rotations'. Speeds - can be integer percentages or any SpeedInteger implementation. + can be percentages or any SpeedValue implementation. If the left speed is not equal to the right speed (i.e., the robot will turn), the motor on the outside of the turn will rotate for the full @@ -1766,10 +1780,10 @@ class MoveTank(MotorSet): right_rotations = rotations # Set all parameters - self.left_motor.speed_sp = left_speed_native_units + self.left_motor.speed_sp = int(round(left_speed_native_units)) self.left_motor._set_position_rotations(left_speed_native_units, left_rotations) self.left_motor._set_brake(brake) - self.right_motor.speed_sp = right_speed_native_units + self.right_motor.speed_sp = int(round(right_speed_native_units)) self.right_motor._set_position_rotations(right_speed_native_units, right_rotations) self.right_motor._set_brake(brake) @@ -1783,7 +1797,7 @@ class MoveTank(MotorSet): def on_for_degrees(self, left_speed, right_speed, degrees, brake=True, block=True): """ Rotate the motors at 'left_speed & right_speed' for 'degrees'. Speeds - can be integer percentages or any SpeedInteger implementation. + can be percentages or any SpeedValue implementation. If the left speed is not equal to the right speed (i.e., the robot will turn), the motor on the outside of the turn will rotate for the full @@ -1800,10 +1814,10 @@ class MoveTank(MotorSet): right_degrees = degrees # Set all parameters - self.left_motor.speed_sp = left_speed_native_units + self.left_motor.speed_sp = int(round(left_speed_native_units)) self.left_motor._set_position_degrees(left_speed_native_units, left_degrees) self.left_motor._set_brake(brake) - self.right_motor.speed_sp = right_speed_native_units + self.right_motor.speed_sp = int(round(right_speed_native_units)) self.right_motor._set_position_degrees(right_speed_native_units, right_degrees) self.right_motor._set_brake(brake) @@ -1817,15 +1831,15 @@ class MoveTank(MotorSet): def on_for_seconds(self, left_speed, right_speed, seconds, brake=True, block=True): """ Rotate the motors at 'left_speed & right_speed' for 'seconds'. Speeds - can be integer percentages or any SpeedInteger implementation. + can be percentages or any SpeedValue implementation. """ (left_speed_native_units, right_speed_native_units) = self._unpack_speeds_to_native_units(left_speed, right_speed) # Set all parameters - self.left_motor.speed_sp = left_speed_native_units + self.left_motor.speed_sp = int(round(left_speed_native_units)) self.left_motor.time_sp = int(seconds * 1000) self.left_motor._set_brake(brake) - self.right_motor.speed_sp = right_speed_native_units + self.right_motor.speed_sp = int(round(right_speed_native_units)) self.right_motor.time_sp = int(seconds * 1000) self.right_motor._set_brake(brake) @@ -1839,12 +1853,12 @@ class MoveTank(MotorSet): def on(self, left_speed, right_speed): """ Start rotating the motors according to ``left_speed`` and ``right_speed`` forever. - Speeds can be integer percentages or any SpeedInteger implementation. + Speeds can be percentages or any SpeedValue implementation. """ (left_speed_native_units, right_speed_native_units) = self._unpack_speeds_to_native_units(left_speed, right_speed) - self.left_motor.speed_sp = left_speed_native_units - self.right_motor.speed_sp = right_speed_native_units + self.left_motor.speed_sp = int(round(left_speed_native_units)) + self.right_motor.speed_sp = int(round(right_speed_native_units)) # Start the motors self.left_motor.run_forever() @@ -1936,9 +1950,9 @@ class MoveSteering(MoveTank): # We don't have a good way to make this generic for the pair... so we # assume that the left motor's speed stats are the same as the right # motor's. - speed_pct = self.left_motor._speed_pct(speed) - left_speed = int((speed_pct * self.max_speed) / 100) - right_speed = left_speed + speed = self.left_motor._speed_native_units(speed) + left_speed = speed + right_speed = speed speed_factor = (50 - abs(float(steering))) / 50 if steering >= 0: @@ -1970,7 +1984,7 @@ class MoveJoystick(MoveTank): (0,0) representing the center position. X is horizontal and Y is vertical. max_speed (default 100%): - A percentage or other SpeedInteger, controlling the maximum motor speed. + A percentage or other SpeedValue, controlling the maximum motor speed. radius (default 100): The radius of the joystick, controlling the range of the input (x, y) values. @@ -2012,7 +2026,7 @@ class MoveJoystick(MoveTank): # init_left_speed_percentage, init_right_speed_percentage, # left_speed_percentage, right_speed_percentage)) - MoveTank.on(self, SpeedPercent(left_speed_percentage * self.left_motor._speed_pct(max_speed) / 100), SpeedPercent(right_speed_percentage * self.right_motor._speed_pct(max_speed) / 100)) + MoveTank.on(self, SpeedNativeUnits(left_speed_percentage / 100 * self.left_motor._speed_native_units(max_speed)), SpeedNativeUnits(right_speed_percentage / 100 * self.right_motor._speed_native_units(max_speed))) @staticmethod
Should SpeedInteger actually be floating-point? We have these new `SpeedInteger` classes. But... as it turns out, it might make more sense to have them be `SpeedFloat`s (or, more likely, a better name like `SpeedMeasure` or simply `Speed`). My thinking is: given that we're converting from whatever input units are provided to native units (eventually), we should minimize intermediate truncations to enable the granularity that ticks-per-second provides. This should just be a matter of changing the name, inheritance and documentation mentions.
ev3dev/ev3dev-lang-python
diff --git a/tests/api_tests.py b/tests/api_tests.py index 0f91999..c70fce1 100755 --- a/tests/api_tests.py +++ b/tests/api_tests.py @@ -36,6 +36,9 @@ class TestAPI(unittest.TestCase): with self.assertRaises(ev3dev2.DeviceNotFound): d = ev3dev2.Device('tacho-motor', 'motor*', address='outA', driver_name='not-valid') + with self.assertRaises(ev3dev2.DeviceNotFound): + d = ev3dev2.Device('tacho-motor', 'motor*', address='this-does-not-exist') + d = ev3dev2.Device('lego-sensor', 'sensor*') with self.assertRaises(ev3dev2.DeviceNotFound): @@ -113,8 +116,8 @@ class TestAPI(unittest.TestCase): self.assertEqual(drive.left_motor.speed_sp, 1050 / 2) self.assertEqual(drive.right_motor.position, 0) - self.assertAlmostEqual(drive.right_motor.position_sp, 5 * 360, delta=5) - self.assertAlmostEqual(drive.right_motor.speed_sp, 1050 / 4, delta=1) + self.assertEqual(drive.right_motor.position_sp, 5 * 360) + self.assertAlmostEqual(drive.right_motor.speed_sp, 1050 / 4, delta=0.5) def test_tank_units(self): clean_arena() @@ -128,8 +131,8 @@ class TestAPI(unittest.TestCase): self.assertEqual(drive.left_motor.speed_sp, 400) self.assertEqual(drive.right_motor.position, 0) - self.assertAlmostEqual(drive.right_motor.position_sp, 10 * 360 * ((10000 / 60) / 400), delta=7) - self.assertAlmostEqual(drive.right_motor.speed_sp, 10000 / 60, delta=1) + self.assertAlmostEqual(drive.right_motor.position_sp, 10 * 360 * ((10000 / 60) / 400)) + self.assertAlmostEqual(drive.right_motor.speed_sp, 10000 / 60, delta=0.5) def test_steering_units(self): clean_arena() @@ -162,12 +165,12 @@ class TestAPI(unittest.TestCase): m = Motor() - self.assertEqual(SpeedPercent(35).get_speed_pct(m), 35) - self.assertEqual(SpeedDPS(300).get_speed_pct(m), 300 / 1050 * 100) - self.assertEqual(SpeedNativeUnits(300).get_speed_pct(m), 300 / 1050 * 100) - self.assertEqual(SpeedDPM(30000).get_speed_pct(m), (30000 / 60) / 1050 * 100) - self.assertEqual(SpeedRPS(2).get_speed_pct(m), 360 * 2 / 1050 * 100) - self.assertEqual(SpeedRPM(100).get_speed_pct(m), (360 * 100 / 60) / 1050 * 100) + self.assertEqual(SpeedPercent(35).to_native_units(m), 35 / 100 * m.max_speed) + self.assertEqual(SpeedDPS(300).to_native_units(m), 300) + self.assertEqual(SpeedNativeUnits(300).to_native_units(m), 300) + self.assertEqual(SpeedDPM(30000).to_native_units(m), (30000 / 60)) + self.assertEqual(SpeedRPS(2).to_native_units(m), 360 * 2) + self.assertEqual(SpeedRPM(100).to_native_units(m), (360 * 100 / 60)) if __name__ == "__main__":
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 3 }
2.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc", "git submodule init", "git submodule update" ], "python": "3.9", "reqs_path": [ "docs/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
docutils==0.14 exceptiongroup==1.2.2 iniconfig==2.1.0 packaging==24.2 pillow==11.1.0 pluggy==1.5.0 pytest==8.3.5 -e git+https://github.com/ev3dev/ev3dev-lang-python.git@e2dd9a1180eed72e20a22f5f53744b06754b9048#egg=python_ev3dev2 tomli==2.2.1
name: ev3dev-lang-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - docutils==0.14 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pillow==11.1.0 - pluggy==1.5.0 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/ev3dev-lang-python
[ "tests/api_tests.py::TestAPI::test_units" ]
[ "tests/api_tests.py::TestAPI::test_joystick_units", "tests/api_tests.py::TestAPI::test_medium_motor_write", "tests/api_tests.py::TestAPI::test_move_tank", "tests/api_tests.py::TestAPI::test_steering_units", "tests/api_tests.py::TestAPI::test_tank_units" ]
[ "tests/api_tests.py::TestAPI::test_device", "tests/api_tests.py::TestAPI::test_infrared_sensor", "tests/api_tests.py::TestAPI::test_medium_motor" ]
[]
MIT License
2,993
[ ".travis.yml", "ev3dev2/motor.py", "docs/motors.rst" ]
[ ".travis.yml", "ev3dev2/motor.py", "docs/motors.rst" ]
neogeny__TatSu-78
4a6d893c8755bcd06a322365b35c129d16543f60
2018-08-30 09:17:22
4aa9636ab1a77a24a5b60eeb06575aee5cf20dd7
diff --git a/docs/mini-tutorial.rst b/docs/mini-tutorial.rst index b32c62c..c73380c 100644 --- a/docs/mini-tutorial.rst +++ b/docs/mini-tutorial.rst @@ -305,7 +305,7 @@ Semantics for |TatSu| parsers are not specified in the grammar, but in a separat parser = tatsu.compile(grammar) ast = parser.parse( '3 + 5 * ( 10 - 20 )', - parse_with_basic_semantics=CalcBasicSemantics() + semantics=CalcBasicSemantics() ) print('# BASIC SEMANTICS RESULT') diff --git a/grammar/tatsu.ebnf b/grammar/tatsu.ebnf index d087e40..3ea4896 100644 --- a/grammar/tatsu.ebnf +++ b/grammar/tatsu.ebnf @@ -378,7 +378,7 @@ token::Token literal = - string | raw_string | word | hex | float | int + string | raw_string | boolean | word | hex | float | int ; diff --git a/tatsu/bootstrap.py b/tatsu/bootstrap.py index 50ecd1f..5fcba66 100644 --- a/tatsu/bootstrap.py +++ b/tatsu/bootstrap.py @@ -152,7 +152,7 @@ class EBNFBootstrapParser(Parser): self._boolean_() self.name_last_node('value') with self._option(): - self._constant('True') + self._constant(True) self.name_last_node('value') self._error('no available options') with self._option(): @@ -728,7 +728,7 @@ class EBNFBootstrapParser(Parser): def _special_(self): # noqa self._token('?(') self._cut() - self._pattern(r'.*?(?!\)\?)') + self._pattern('.*?(?!\\)\\?)') self.name_last_node('@') self._token(')?') self._cut() @@ -803,11 +803,11 @@ class EBNFBootstrapParser(Parser): @tatsumasu('Constant') def _constant_(self): # noqa - self._pattern(r'`') + self._pattern('`') self._cut() self._literal_() self.name_last_node('@') - self._pattern(r'`') + self._pattern('`') @tatsumasu('Token') def _token_(self): # noqa @@ -825,6 +825,8 @@ class EBNFBootstrapParser(Parser): self._string_() with self._option(): self._raw_string_() + with self._option(): + self._boolean_() with self._option(): self._word_() with self._option(): @@ -851,14 +853,14 @@ class EBNFBootstrapParser(Parser): with self._option(): self._token('"') self._cut() - self._pattern(r'([^"\n]|\\"|\\\\)*') + self._pattern('([^"\\n]|\\\\"|\\\\\\\\)*') self.name_last_node('@') self._token('"') self._cut() with self._option(): self._token("'") self._cut() - self._pattern(r"([^'\n]|\\'|\\\\)*") + self._pattern("([^'\\n]|\\\\'|\\\\\\\\)*") self.name_last_node('@') self._token("'") self._cut() @@ -866,23 +868,23 @@ class EBNFBootstrapParser(Parser): @tatsumasu() def _hex_(self): # noqa - self._pattern(r'0[xX](\d|[a-fA-F])+') + self._pattern('0[xX](\\d|[a-fA-F])+') @tatsumasu() def _float_(self): # noqa - self._pattern(r'[-+]?(?:\d+\.\d*|\d*\.\d+)(?:[Ee][-+]?\d+)?') + self._pattern('[-+]?(?:\\d+\\.\\d*|\\d*\\.\\d+)(?:[Ee][-+]?\\d+)?') @tatsumasu() def _int_(self): # noqa - self._pattern(r'[-+]?\d+') + self._pattern('[-+]?\\d+') @tatsumasu() def _path_(self): # noqa - self._pattern(r'(?!\d)\w+(::(?!\d)\w+)+') + self._pattern('(?!\\d)\\w+(::(?!\\d)\\w+)+') @tatsumasu() def _word_(self): # noqa - self._pattern(r'(?!\d)\w+') + self._pattern('(?!\\d)\\w+') @tatsumasu('Any') def _any_(self): # noqa @@ -908,16 +910,16 @@ class EBNFBootstrapParser(Parser): with self._option(): self._token('/') self._cut() - self._pattern(r'([^/\\]|\\/|\\.)+') + self._pattern('([^/\\\\]|\\\\/|\\\\.)+') self.name_last_node('@') self._token('/') self._cut() with self._option(): self._token('?/') self._cut() - self._pattern(r'(.|\n)+?(?=/\?)') + self._pattern('(.|\\n)+?(?=/\\?)') self.name_last_node('@') - self._pattern(r'/\?+') + self._pattern('/\\?+') self._cut() with self._option(): self._token('?') @@ -1140,14 +1142,16 @@ class EBNFBootstrapSemantics(object): return ast -def main(filename, start='start', **kwargs): +def main(filename, start=None, **kwargs): + if start is None: + start = 'start' if not filename or filename == '-': text = sys.stdin.read() else: with open(filename) as f: text = f.read() parser = EBNFBootstrapParser() - return parser.parse(text, start=start, filename=filename, **kwargs) + return parser.parse(text, rule_name=start, filename=filename, **kwargs) if __name__ == '__main__': diff --git a/tatsu/codegen/cgbase.py b/tatsu/codegen/cgbase.py index c31972e..b858636 100644 --- a/tatsu/codegen/cgbase.py +++ b/tatsu/codegen/cgbase.py @@ -90,6 +90,7 @@ class CodeGenerator(object): ``ModelRenderer`` class with the same name as each model's node and uses it to render the node. """ + def __init__(self, modules=None): self.formatter = DelegatingRenderingFormatter(self) self._renderers = {} diff --git a/tatsu/codegen/objectmodel.py b/tatsu/codegen/objectmodel.py index d081332..e1f87e7 100644 --- a/tatsu/codegen/objectmodel.py +++ b/tatsu/codegen/objectmodel.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals +import inspect + from datetime import datetime from collections import namedtuple @@ -15,16 +17,22 @@ from tatsu.objectmodel import BASE_CLASS_TOKEN from tatsu.exceptions import CodegenError from tatsu.rendering import Renderer from tatsu.codegen.cgbase import ModelRenderer, CodeGenerator +from tatsu.util import PY33 -NODE_NAME_PATTERN = '(?!\d)\w+(' + BASE_CLASS_TOKEN + '(?!\d)\w+)*' +NODE_NAME_PATTERN = r'(?!\d)\w+(' + BASE_CLASS_TOKEN + r'(?!\d)\w+)*' TypeSpec = namedtuple('TypeSpec', ['class_name', 'base']) +DEFAULT_BASE_TYPE = ''' +class ModelBase(Node): + pass +''' + -def codegen(model): - return ObjectModelCodeGenerator().render(model) +def codegen(model, base_type=None): + return ObjectModelCodeGenerator().render(model, base_type=base_type) def _get_node_class_name(rule): @@ -39,28 +47,79 @@ def _get_node_class_name(rule): return typespec -def _typespec(rule, default_base=True): +def _typespec(rule): if not _get_node_class_name(rule): - return TypeSpec(None, None) + return [] spec = rule.params[0].split(BASE_CLASS_TOKEN) - class_name = safe_name(spec[0]) - base = None - bases = spec[1:] - if bases: - base = safe_name(bases[0]) - elif default_base: - base = 'ModelBase' - return TypeSpec(class_name, base) + class_names = [safe_name(n) for n in spec] + ['ModelBase'] + + typespec = [] + for i, class_name in enumerate(class_names[:-1]): + base = class_names[i + 1] + typespec.append(TypeSpec(class_name, base)) + + return typespec + + +def _get_full_name(cls): + if not inspect.isclass(cls): + raise CodegenError("Base type has to be a class") + module = inspect.getmodule(cls) + if not module: + raise CodegenError("Base type has to be inside a module") + modulename = module.__name__ + + if PY33: + name = cls.__qualname__ + else: + name = cls.__name__ + + # Try to reference the class + try: + idents = name.split('.') + _cls = getattr(module, idents[0]) + for ident in idents[1:]: + _cls = getattr(_cls, ident) + + assert _cls == cls + except AttributeError: + raise CodegenError("Couldn't find base type, it has to be importable") + + return modulename, name + + +class BaseTypeRenderer(Renderer): + def __init__(self, base_type): + self.base_type = base_type + + def render_fields(self, fields): + module, name = _get_full_name(self.base_type) + if '.' in name: + lookup = "\nModelBase = %s" % name + name = name.split('.')[0] + else: + lookup = " as ModelBase" + + fields.update( + module=module, + name=name, + lookup=lookup + ) + + template = ''' + from {module} import {name}{lookup}\ + ''' class BaseClassRenderer(Renderer): - def __init__(self, class_name): - self.class_name = class_name + def __init__(self, spec): + self.class_name = spec.class_name + self.base = spec.base template = ''' - class {class_name}(ModelBase): - pass + class {class_name}({base}): + pass\ ''' @@ -80,6 +139,7 @@ class Rule(ModelRenderer): def render_fields(self, fields): defs = [safe_name(d) for d, l in compress_seq(self.defines())] defs = list(sorted(set(defs))) + spec = fields["spec"] kwargs = '\n'.join('%s = None' % d for d in defs) if kwargs: @@ -87,8 +147,6 @@ class Rule(ModelRenderer): else: kwargs = indent('pass') - spec = _typespec(self.node) - fields.update( class_name=spec.class_name, base=spec.base, @@ -108,24 +166,29 @@ class Grammar(ModelRenderer): bases = [] model_rules = [] for rule in self.node.rules: - spec = _typespec(rule, False) - if not spec.class_name: + specs = _typespec(rule) + if not specs: continue - if spec.class_name not in node_class_names: - model_rules.append(rule) - if spec.base and spec.base not in node_class_names: - bases.append(spec.base) - node_class_names.add(spec.class_name) - node_class_names.add(spec.base) + + node_spec = specs[0] + base_spec = reversed(specs[1:]) + + if node_spec.class_name not in node_class_names: + model_rules.append((rule, node_spec)) + + bases.extend(base for base in base_spec + if base.class_name not in node_class_names) + + node_class_names.update(s.class_name for s in specs) base_class_declarations = [ - BaseClassRenderer(base).render() - for base in bases + BaseClassRenderer(spec).render() + for spec in bases ] model_class_declarations = [ - self.get_renderer(rule).render() - for rule in model_rules + self.get_renderer(rule).render(spec=spec) + for rule, spec in model_rules ] base_class_declarations = '\n\n\n'.join(base_class_declarations) @@ -135,10 +198,13 @@ class Grammar(ModelRenderer): version = datetime.now().strftime('%Y.%m.%d.%H') + base_type = fields["base_type"] + fields.update( base_class_declarations=base_class_declarations, model_class_declarations=model_class_declarations, version=version, + base_type=BaseTypeRenderer(base_type).render() if base_type else DEFAULT_BASE_TYPE ) template = '''\ @@ -159,19 +225,16 @@ class Grammar(ModelRenderer): from tatsu.objectmodel import Node from tatsu.semantics import ModelBuilderSemantics + {base_type} class {name}ModelBuilderSemantics(ModelBuilderSemantics): - def __init__(self): + def __init__(self, context=None, types=None): types = [ t for t in globals().values() if type(t) is type and issubclass(t, ModelBase) - ] - super({name}ModelBuilderSemantics, self).__init__(types=types) - - - class ModelBase(Node): - pass - + ] + (types or []) + super({name}ModelBuilderSemantics, self).__init__(context=context, types=types) - {base_class_declarations}{model_class_declarations} + {base_class_declarations} + {model_class_declarations} ''' diff --git a/tatsu/contexts.py b/tatsu/contexts.py index e479945..c4ac81d 100644 --- a/tatsu/contexts.py +++ b/tatsu/contexts.py @@ -195,7 +195,7 @@ class ParseContext(object): filename=filename, buffer_class=buffer_class, semantics=semantics, - trace=trace or self.trace, + trace=trace if trace is not None else self.trace, whitespace=whitespace if whitespace is not None else self.whitespace, **kwargs ) diff --git a/tatsu/grammars.py b/tatsu/grammars.py index 9e36ed5..039cb22 100644 --- a/tatsu/grammars.py +++ b/tatsu/grammars.py @@ -52,7 +52,7 @@ class EBNFBuffer(EBNFBootstrapBuffer): if re.match(PRAGMA_RE, line): directive, arg = line.split('#', 1)[1], '' if '::' in directive: - directive, arg = directive.split('::') + directive, arg = directive.split('::', 1) directive, arg = directive.strip(), arg.strip() i = self.pragma(name, directive, arg, lines, index, i) else: diff --git a/tatsu/objectmodel.py b/tatsu/objectmodel.py index 5be7337..3cd262c 100644 --- a/tatsu/objectmodel.py +++ b/tatsu/objectmodel.py @@ -25,8 +25,8 @@ class Node(object): parseinfo = ast.parseinfo if not parseinfo else None self._parseinfo = parseinfo - attributes = ast or {} - # asume that kwargs contains node attributes of interest + attributes = ast if ast is not None else {} + # assume that kwargs contains node attributes of interest if isinstance(attributes, MutableMapping): attributes.update(kwargs) @@ -39,7 +39,10 @@ class Node(object): return for name in set(ast) - {'parseinfo'}: - setattr(self, name, ast[name]) + try: + setattr(self, name, ast[name]) + except AttributeError: + raise AttributeError("'%s' is a reserved name" % name) @property def ast(self): diff --git a/tatsu/parser_semantics.py b/tatsu/parser_semantics.py index c15b48b..3d8daf8 100644 --- a/tatsu/parser_semantics.py +++ b/tatsu/parser_semantics.py @@ -46,7 +46,7 @@ class EBNFGrammarSemantics(ModelBuilderSemantics): def string(self, ast): return eval_escapes(ast) - def hext(self, ast): + def hex(self, ast): return int(ast, 16) def float(self, ast): diff --git a/tatsu/semantics.py b/tatsu/semantics.py index 6cf88ba..7930f4e 100644 --- a/tatsu/semantics.py +++ b/tatsu/semantics.py @@ -31,6 +31,7 @@ class ModelBuilderSemantics(object): nodes using the class name given as first parameter to a grammar rule, and synthesizes the class/type if it's not known. """ + def __init__(self, context=None, base_type=Node, types=None): self.ctx = context self.base_type = base_type @@ -79,11 +80,11 @@ class ModelBuilderSemantics(object): typespec = args[0].split(BASE_CLASS_TOKEN) typename = typespec[0] - bases = typespec[1:] + bases = typespec[-1:0:-1] base = self.base_type - for base in bases: - base = self._get_constructor(bases[0], base) + for base_ in bases: + base = self._get_constructor(base_, base) constructor = self._get_constructor(typename, base) try: diff --git a/tatsu/synth.py b/tatsu/synth.py index 20a8663..03de139 100644 --- a/tatsu/synth.py +++ b/tatsu/synth.py @@ -18,7 +18,7 @@ def synthesize(name, bases): bases = (bases,) if _Synthetic not in bases: - bases = (_Synthetic,) + bases + bases = bases + (_Synthetic,) constructor = __REGISTRY.get(typename) if not constructor: diff --git a/tatsu/tool.py b/tatsu/tool.py index f071974..a146421 100644 --- a/tatsu/tool.py +++ b/tatsu/tool.py @@ -8,6 +8,7 @@ import codecs import argparse import os import sys +import importlib from tatsu._version import __version__ from tatsu.util import eval_escapes @@ -111,6 +112,24 @@ def parse_args(): help='characters to skip during parsing (use "" to disable)', ) + def import_class(path): + try: + spath = path.rsplit('.', 1) + module = importlib.import_module(spath[0]) + + return getattr(module, spath[1]) + except Exception: + raise argparse.ArgumentTypeError( + "Couldn't find class %s" % path + ) + + generation_opts.add_argument( + '--base-type', + metavar='CLASSPATH', + help='class to use as base type for the object model, for example "mymodule.MyNode"', + type=import_class + ) + std_args = argparser.add_argument_group('common options') std_args.add_argument( '--help', '-h', @@ -157,9 +176,9 @@ def to_python_sourcecode(grammar, name=None, filename=None, **kwargs): return pythoncg(model) -def to_python_model(grammar, name=None, filename=None, **kwargs): +def to_python_model(grammar, name=None, filename=None, base_type=None, **kwargs): model = compile(grammar, name=name, filename=filename, **kwargs) - return objectmodel.codegen(model) + return objectmodel.codegen(model, base_type=base_type) # for backwards compatibility. Use `compile()` instead @@ -223,7 +242,7 @@ def main(codegen=pythoncg): elif args.pretty_lean: result = model.pretty_lean() elif args.object_model: - result = objectmodel.codegen(model) + result = objectmodel.codegen(model, base_type=args.base_type) else: result = codegen(model) @@ -234,7 +253,7 @@ def main(codegen=pythoncg): # if requested, always save it if args.object_model_outfile: - save(args.object_model_outfile, objectmodel.codegen(model)) + save(args.object_model_outfile, objectmodel.codegen(model, base_type=args.base_type)) print('-' * 72, file=sys.stderr) print('{:12,d} lines in grammar'.format(len(grammar.split())), file=sys.stderr)
ModelBuilderSemantics Subclassing ```ebnf start = expression $; expression = | addition | subtraction | number; addition::BinaryOp::Op = left:expression op:'+' ~ right:number; subtraction::BinaryOp::Op = left:expression op:'-' ~ right:number; number = /\d+/; ``` This triggers an exception: https://pastebin.com/raw/qMUUXEKb It works when you remove `left:expression` in addition and subtraction I'm not sure how this feature is supposed to work in the first place, how does it decide what to put on the base class and what on the inherited one? I suppose the workaround would be to create the base class manually and pass it to the `ModelBuilderSemantics` instance. In either case, this should be documented better.
neogeny/TatSu
diff --git a/test/grammar/parameter_test.py b/test/grammar/parameter_test.py index 7c7b777..a8c4b94 100644 --- a/test/grammar/parameter_test.py +++ b/test/grammar/parameter_test.py @@ -192,7 +192,7 @@ class ParameterTests(unittest.TestCase): def test_numbers_and_unicode(self): grammar = ''' - rúle(1, -23, 4.56, 7.89e-11, 0xABCDEF, Añez) + rúle(1, -23, 4.56, 7.89e-11, Añez) = 'a' ; diff --git a/test/grammar/semantics_test.py b/test/grammar/semantics_test.py index c23478c..7ee837f 100644 --- a/test/grammar/semantics_test.py +++ b/test/grammar/semantics_test.py @@ -7,6 +7,11 @@ from tatsu.tool import compile from tatsu.semantics import ModelBuilderSemantics +class MyNode(object): + def __init__(self, ast): + pass + + class SemanticsTests(unittest.TestCase): def test_builder_semantics(self): @@ -34,3 +39,55 @@ class SemanticsTests(unittest.TestCase): model = compile(grammar, 'test') ast = model.parse(text, semantics=semantics) self.assertEqual('5.4.3.2.1', ast) + + def test_builder_subclassing(self): + from tatsu import synth + from tatsu.model import Node + + registry = getattr(synth, "__REGISTRY") + + grammar = ''' + @@grammar :: Test + start::A::B::C = $ ; + ''' + + model = compile(grammar, asmodel=True) + model.parse("") + + A = registry["A"] + B = registry["B"] + C = registry["C"] + + self.assertTrue(issubclass(A, B) and issubclass(A, synth._Synthetic) and issubclass(A, Node)) + self.assertTrue(issubclass(B, C) and issubclass(B, synth._Synthetic) and issubclass(A, Node)) + self.assertTrue(issubclass(C, synth._Synthetic) and issubclass(C, Node)) + + def test_builder_basetype_codegen(self): + grammar = ''' + @@grammar :: Test + start::A::B::C = a:() b:() $ ; + second::D::A = (); + third = (); + ''' + + from tatsu.tool import to_python_model + src = to_python_model(grammar, base_type=MyNode) + + globals = {} + exec(src, globals) + semantics = globals["TestModelBuilderSemantics"]() + + A = globals["A"] + B = globals["B"] + C = globals["C"] + D = globals["D"] + + model = compile(grammar, semantics=semantics) + ast = model.parse("", semantics=semantics) + + self.assertIsInstance(ast, MyNode) + self.assertIsInstance(ast, (A, B, C)) + self.assertTrue(hasattr(ast, "a")) + self.assertTrue(hasattr(ast, "b")) + + self.assertTrue(issubclass(D, (A, B, C))) diff --git a/test/grammar/syntax_test.py b/test/grammar/syntax_test.py index b733b19..5f007dc 100644 --- a/test/grammar/syntax_test.py +++ b/test/grammar/syntax_test.py @@ -320,3 +320,26 @@ class SyntaxTests(unittest.TestCase): model = compile(grammar, "start") ast = model.parse("1xx 2 yy") self.assertEqual(['1', 'xx', ' ', '2', 'yy'], ast) + + def test_constant(self): + grammar = ''' + start = () + _0:`0` _1:`+1` _n123:`-123` + _xF:`0xF` + _string:`string` + _string_space:`'string space'` + _true:`True` _false:`False` + $; + ''' + + model = compile(grammar) + ast = model.parse("") + + self.assertEqual(ast._0, 0) + self.assertEqual(ast._1, 1) + self.assertEqual(ast._n123, -123) + self.assertEqual(ast._xF, 0xF) + self.assertEqual(ast._string, "string") + self.assertEqual(ast._string_space, "string space") + self.assertEqual(ast._true, True) + self.assertEqual(ast._false, False)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 12 }
4.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-flake8", "pytest-mypy", "pytest-pylint" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==3.3.9 dill==0.3.9 exceptiongroup==1.2.2 filelock==3.18.0 flake8==7.2.0 iniconfig==2.1.0 isort==6.0.1 mccabe==0.7.0 mypy==1.15.0 mypy-extensions==1.0.0 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pycodestyle==2.13.0 pyflakes==3.3.1 pylint==3.3.6 pytest==8.3.5 pytest-flake8==1.3.0 pytest-mypy==1.0.0 pytest-pylint==0.21.0 -e git+https://github.com/neogeny/TatSu.git@4a6d893c8755bcd06a322365b35c129d16543f60#egg=TatSu tomli==2.2.1 tomlkit==0.13.2 typing_extensions==4.13.0
name: TatSu channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==3.3.9 - dill==0.3.9 - exceptiongroup==1.2.2 - filelock==3.18.0 - flake8==7.2.0 - iniconfig==2.1.0 - isort==6.0.1 - mccabe==0.7.0 - mypy==1.15.0 - mypy-extensions==1.0.0 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pycodestyle==2.13.0 - pyflakes==3.3.1 - pylint==3.3.6 - pytest==8.3.5 - pytest-flake8==1.3.0 - pytest-mypy==1.0.0 - pytest-pylint==0.21.0 - tomli==2.2.1 - tomlkit==0.13.2 - typing-extensions==4.13.0 prefix: /opt/conda/envs/TatSu
[ "test/grammar/semantics_test.py::SemanticsTests::test_builder_basetype_codegen", "test/grammar/semantics_test.py::SemanticsTests::test_builder_subclassing", "test/grammar/syntax_test.py::SyntaxTests::test_constant" ]
[]
[ "test/grammar/parameter_test.py::ParameterTests::test_35_only_keyword_params", "test/grammar/parameter_test.py::ParameterTests::test_36_param_combinations", "test/grammar/parameter_test.py::ParameterTests::test_36_params_and_keyword_params", "test/grammar/parameter_test.py::ParameterTests::test_36_unichars", "test/grammar/parameter_test.py::ParameterTests::test_keyword_params", "test/grammar/parameter_test.py::ParameterTests::test_numbers_and_unicode", "test/grammar/semantics_test.py::SemanticsTests::test_builder_semantics", "test/grammar/syntax_test.py::SyntaxTests::test_48_rule_override", "test/grammar/syntax_test.py::SyntaxTests::test_any", "test/grammar/syntax_test.py::SyntaxTests::test_ast_assignment", "test/grammar/syntax_test.py::SyntaxTests::test_based_rule", "test/grammar/syntax_test.py::SyntaxTests::test_empty_closure", "test/grammar/syntax_test.py::SyntaxTests::test_empty_match_token", "test/grammar/syntax_test.py::SyntaxTests::test_failed_ref", "test/grammar/syntax_test.py::SyntaxTests::test_group_ast", "test/grammar/syntax_test.py::SyntaxTests::test_include_and_override", "test/grammar/syntax_test.py::SyntaxTests::test_list_override", "test/grammar/syntax_test.py::SyntaxTests::test_new_override", "test/grammar/syntax_test.py::SyntaxTests::test_optional_closure", "test/grammar/syntax_test.py::SyntaxTests::test_optional_sequence", "test/grammar/syntax_test.py::SyntaxTests::test_parseinfo", "test/grammar/syntax_test.py::SyntaxTests::test_partial_choice", "test/grammar/syntax_test.py::SyntaxTests::test_partial_options", "test/grammar/syntax_test.py::SyntaxTests::test_raw_string", "test/grammar/syntax_test.py::SyntaxTests::test_rule_include", "test/grammar/syntax_test.py::SyntaxTests::test_update_ast" ]
[]
BSD License
2,994
[ "tatsu/codegen/cgbase.py", "grammar/tatsu.ebnf", "tatsu/grammars.py", "tatsu/objectmodel.py", "tatsu/parser_semantics.py", "tatsu/semantics.py", "tatsu/contexts.py", "docs/mini-tutorial.rst", "tatsu/codegen/objectmodel.py", "tatsu/tool.py", "tatsu/synth.py", "tatsu/bootstrap.py" ]
[ "tatsu/codegen/cgbase.py", "grammar/tatsu.ebnf", "tatsu/grammars.py", "tatsu/objectmodel.py", "tatsu/parser_semantics.py", "tatsu/semantics.py", "tatsu/contexts.py", "docs/mini-tutorial.rst", "tatsu/codegen/objectmodel.py", "tatsu/tool.py", "tatsu/synth.py", "tatsu/bootstrap.py" ]
elastic__rally-560
b47309fc63d70f25e9cc71628fc1755aa1e5c67b
2018-08-30 10:52:11
799e0642c27a0067931f305359a615cbf9fe2e20
diff --git a/docs/configuration.rst b/docs/configuration.rst index 7bd36dcb..108c735c 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -119,7 +119,19 @@ Logging in Rally is configured in ``~/.rally/logging.json``. For more informatio * The Python reference documentation on the `logging configuration schema <https://docs.python.org/3/library/logging.config.html#logging-config-dictschema>`_ explains the file format. * The `logging handler documentation <https://docs.python.org/3/library/logging.handlers.html>`_ describes how to customize where log output is written to. -By default, Rally will log all output to ``~/.rally/logs/rally.log`` and rotate this file daily. +By default, Rally will log all output to ``~/.rally/logs/rally.log``. + +The log file will not be rotated automatically as this is problematic due to Rally's multi-process architecture. Setup an external tool like `logrotate <https://linux.die.net/man/8/logrotate>`_ to achieve that. See the following example as a starting point for your own ``logrotate`` configuration and ensure to replace the path ``/home/user/.rally/logs/rally.log`` with the proper one:: + + /home/user/.rally/logs/rally.log { + daily # rotate daily + rotate 7 # keep the last seven log files + maxage 14 # remove logs older than 14 days + compress # compress old logs ... + delaycompress # ... after moving them + missingok # ignore missing log files + notifempty # don't attempt to rotate empty ones + } Example ~~~~~~~ diff --git a/docs/migrate.rst b/docs/migrate.rst index cb8a7d7c..648c3042 100644 --- a/docs/migrate.rst +++ b/docs/migrate.rst @@ -1,6 +1,26 @@ Migration Guide =============== +Migrating to Rally 1.0.1 +------------------------ + +Logs are not rotated +^^^^^^^^^^^^^^^^^^^^ + +With Rally 1.0.1 we have disabled automatic rotation of logs by default because it can lead to race conditions due to Rally's multi-process architecture. If you did not change the default out-of-the-box logging configuration, Rally will automatically fix your configuration. Otherwise, you need to replace all instances of ``logging.handlers.TimedRotatingFileHandler`` with ``logging.handlers.WatchedFileHandler`` to disable log rotation. + +To rotate logs we recommend to use external tools like `logrotate <https://linux.die.net/man/8/logrotate>`_. See the following example as a starting point for your own ``logrotate`` configuration and ensure to replace the path ``/home/user/.rally/logs/rally.log`` with the proper one:: + + /home/user/.rally/logs/rally.log { + daily # rotate daily + rotate 7 # keep the last seven log files + maxage 14 # remove logs older than 14 days + compress # compress old logs ... + delaycompress # ... after moving them + missingok # ignore missing log files + notifempty # don't attempt to rotate empty ones + } + Migrating to Rally 1.0.0 ------------------------ diff --git a/esrally/actor.py b/esrally/actor.py index c4eb3697..ac945c5d 100644 --- a/esrally/actor.py +++ b/esrally/actor.py @@ -69,7 +69,9 @@ def no_retry(f, actor_name): msg = "Error in {}".format(actor_name) # log here as the full trace might get lost. logging.getLogger(__name__).exception(msg) - self.send(sender, BenchmarkFailure(msg, e)) + # don't forward the exception as is because the main process might not have this class available on the load path + # and will fail then while deserializing the cause. + self.send(sender, BenchmarkFailure("{} ({})".format(msg, str(e)))) return guard diff --git a/esrally/log.py b/esrally/log.py index 55e726ac..5320e1da 100644 --- a/esrally/log.py +++ b/esrally/log.py @@ -3,6 +3,7 @@ import logging.config import json import time import os +import hashlib from esrally.utils import io @@ -31,6 +32,26 @@ def default_log_path(): return os.path.join(os.path.expanduser("~"), ".rally", "logs") +def remove_obsolete_default_log_config(): + """ + Log rotation is problematic because Rally uses multiple processes and there is a lurking race condition when + rolling log files. Hence, we do not rotate logs from within Rally and leverage established tools like logrotate for that. + + Checks whether the user has a problematic out-of-the-box logging configuration delivered with Rally 1.0.0 which + used log rotation and removes it so it can be replaced by a new one in a later step. + """ + log_config = log_config_path() + if io.exists(log_config): + source_path = io.normalize_path(os.path.join(os.path.dirname(__file__), "resources", "logging_1_0_0.json")) + with open(source_path, "r", encoding="UTF-8") as src: + contents = src.read().replace("${LOG_PATH}", default_log_path()) + source_hash = hashlib.sha512(contents.encode()).hexdigest() + with open(log_config, "r", encoding="UTF-8") as target: + target_hash = hashlib.sha512(target.read().encode()).hexdigest() + if source_hash == target_hash: + os.rename(log_config, "{}.bak".format(log_config)) + + def install_default_log_config(): """ Ensures a log configuration file is present on this machine. The default diff --git a/esrally/mechanic/telemetry.py b/esrally/mechanic/telemetry.py index e5f5316f..d9cd9b0c 100644 --- a/esrally/mechanic/telemetry.py +++ b/esrally/mechanic/telemetry.py @@ -675,7 +675,8 @@ class DiskIo(InternalTelemetryDevice): else: try: self.disk_start = sysstats.disk_io_counters() - self.logger.warning("Process I/O counters are unsupported on this platform. Falling back to less accurate disk I/O counters.") + self.logger.warning("Process I/O counters are not supported on this platform. Falling back to less accurate disk " + "I/O counters.") except RuntimeError: self.logger.exception("Could not determine I/O stats at benchmark start.") diff --git a/esrally/racecontrol.py b/esrally/racecontrol.py index 9fafc602..281c3564 100644 --- a/esrally/racecontrol.py +++ b/esrally/racecontrol.py @@ -97,9 +97,11 @@ class BenchmarkActor(actor.RallyActor): def receiveUnrecognizedMessage(self, msg, sender): self.logger.info("BenchmarkActor received unknown message [%s] (ignoring).", (str(msg))) + @actor.no_retry("race control") def receiveMsg_Setup(self, msg, sender): self.setup(msg, sender) + @actor.no_retry("race control") def receiveMsg_EngineStarted(self, msg, sender): self.logger.info("Mechanic has started engine successfully.") self.metrics_store.meta_info = msg.system_meta_info @@ -114,6 +116,7 @@ class BenchmarkActor(actor.RallyActor): # start running we assume that each race has at least one lap self.run() + @actor.no_retry("race control") def receiveMsg_TaskFinished(self, msg, sender): self.logger.info("Task has finished.") self.logger.info("Bulk adding request metrics to metrics store.") @@ -122,17 +125,20 @@ class BenchmarkActor(actor.RallyActor): # other stores (used by driver and mechanic). Hence there is no need to reset the timer in our own metrics store. self.send(self.mechanic, mechanic.ResetRelativeTime(msg.next_task_scheduled_in)) + @actor.no_retry("race control") def receiveMsg_BenchmarkCancelled(self, msg, sender): self.cancelled = True # even notify the start sender if it is the originator. The reason is that we call #ask() which waits for a reply. # We also need to ask in order to avoid races between this notification and the following ActorExitRequest. self.send(self.start_sender, msg) + @actor.no_retry("race control") def receiveMsg_BenchmarkFailure(self, msg, sender): self.logger.info("Received a benchmark failure from [%s] and will forward it now.", sender) self.error = True self.send(self.start_sender, msg) + @actor.no_retry("race control") def receiveMsg_BenchmarkComplete(self, msg, sender): self.logger.info("Benchmark is complete.") self.logger.info("Bulk adding request metrics to metrics store.") @@ -141,6 +147,7 @@ class BenchmarkActor(actor.RallyActor): self.main_driver = None self.send(self.mechanic, mechanic.OnBenchmarkStop()) + @actor.no_retry("race control") def receiveMsg_BenchmarkStopped(self, msg, sender): self.logger.info("Bulk adding system metrics to metrics store.") self.metrics_store.bulk_add(msg.system_metrics) @@ -153,6 +160,7 @@ class BenchmarkActor(actor.RallyActor): else: self.teardown() + @actor.no_retry("race control") def receiveMsg_EngineStopped(self, msg, sender): self.logger.info("Mechanic has stopped engine successfully.") self.logger.info("Bulk adding system metrics to metrics store.") @@ -170,8 +178,6 @@ class BenchmarkActor(actor.RallyActor): def setup(self, msg, sender): self.start_sender = sender - self.mechanic = self.createActor(mechanic.MechanicActor, targetActorRequirements={"coordinator": True}) - self.cfg = msg.cfg # to load the track we need to know the correct cluster distribution version. Usually, this value should be set but there are rare # cases (external pipeline and user did not specify the distribution version) where we need to derive it ourselves. For source @@ -203,6 +209,7 @@ class BenchmarkActor(actor.RallyActor): self.race_store = metrics.race_store(self.cfg) self.logger.info("Asking mechanic to start the engine.") cluster_settings = challenge.cluster_settings + self.mechanic = self.createActor(mechanic.MechanicActor, targetActorRequirements={"coordinator": True}) self.send(self.mechanic, mechanic.StartEngine(self.cfg, self.metrics_store.open_context, cluster_settings, msg.sources, msg.build, msg.distribution, msg.external, msg.docker)) diff --git a/esrally/rally.py b/esrally/rally.py index 51d7f14c..4e22bd51 100644 --- a/esrally/rally.py +++ b/esrally/rally.py @@ -483,6 +483,7 @@ def dispatch_sub_command(cfg, sub_command): def main(): check_python_version() + log.remove_obsolete_default_log_config() log.install_default_log_config() log.configure_logging() logger = logging.getLogger(__name__) diff --git a/esrally/rallyd.py b/esrally/rallyd.py index c4eb948d..bb446730 100644 --- a/esrally/rallyd.py +++ b/esrally/rallyd.py @@ -52,6 +52,7 @@ def status(): def main(): check_python_version() + log.remove_obsolete_default_log_config() log.install_default_log_config() log.configure_logging() console.init() diff --git a/esrally/resources/logging.json b/esrally/resources/logging.json index 8277b033..57965787 100644 --- a/esrally/resources/logging.json +++ b/esrally/resources/logging.json @@ -19,11 +19,8 @@ }, "handlers": { "rally_log_handler": { - "class": "logging.handlers.TimedRotatingFileHandler", + "class": "logging.handlers.WatchedFileHandler", "filename": "${LOG_PATH}/rally.log", - "utc": true, - "when": "midnight", - "backupCount": 14, "encoding": "UTF-8", "formatter": "normal", "filters": ["isActorLog"] diff --git a/esrally/resources/logging_1_0_0.json b/esrally/resources/logging_1_0_0.json new file mode 100644 index 00000000..8277b033 --- /dev/null +++ b/esrally/resources/logging_1_0_0.json @@ -0,0 +1,55 @@ +{ + "version": 1, + "formatters": { + "normal": { + "format": "%(asctime)s,%(msecs)d %(actorAddress)s/PID:%(process)d %(name)s %(levelname)s %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + "()": "esrally.log.configure_utc_formatter" + }, + "profile": { + "format": "%(asctime)s,%(msecs)d PID:%(process)d %(name)s %(levelname)s %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + "()": "esrally.log.configure_utc_formatter" + } + }, + "filters": { + "isActorLog": { + "()": "thespian.director.ActorAddressLogFilter" + } + }, + "handlers": { + "rally_log_handler": { + "class": "logging.handlers.TimedRotatingFileHandler", + "filename": "${LOG_PATH}/rally.log", + "utc": true, + "when": "midnight", + "backupCount": 14, + "encoding": "UTF-8", + "formatter": "normal", + "filters": ["isActorLog"] + }, + "rally_profile_handler": { + "class": "logging.FileHandler", + "filename": "${LOG_PATH}/profile.log", + "delay": true, + "encoding": "UTF-8", + "formatter": "profile" + } + }, + "root": { + "handlers": ["rally_log_handler"], + "level": "INFO" + }, + "loggers": { + "elasticsearch": { + "handlers": ["rally_log_handler"], + "level": "WARNING", + "propagate": false + }, + "rally.profile": { + "handlers": ["rally_profile_handler"], + "level": "INFO", + "propagate": false + } + } +} \ No newline at end of file diff --git a/esrally/track/loader.py b/esrally/track/loader.py index 8194ec17..e24c4c4b 100644 --- a/esrally/track/loader.py +++ b/esrally/track/loader.py @@ -231,14 +231,15 @@ def operation_parameters(t, op): def used_corpora(t, cfg): corpora = {} - challenge = t.find_challenge_or_default(cfg.opts("track", "challenge.name")) - for task in challenge.schedule: - for sub_task in task: - param_source = operation_parameters(t, sub_task.operation) - if hasattr(param_source, "corpora"): - for c in param_source.corpora: - # We might have the same corpus *but* they contain different doc sets. Therefore also need to union over doc sets. - corpora[c.name] = corpora.get(c.name, c).union(c) + if t.corpora: + challenge = t.find_challenge_or_default(cfg.opts("track", "challenge.name")) + for task in challenge.schedule: + for sub_task in task: + param_source = operation_parameters(t, sub_task.operation) + if hasattr(param_source, "corpora"): + for c in param_source.corpora: + # We might have the same corpus *but* they contain different doc sets. Therefore also need to union over doc sets. + corpora[c.name] = corpora.get(c.name, c).union(c) return corpora.values() @@ -633,7 +634,7 @@ class TrackFileReader: except jinja2.exceptions.TemplateNotFound: self.logger.exception("Could not load [%s]", track_spec_file) raise exceptions.SystemSetupError("Track {} does not exist".format(track_name)) - except (json.JSONDecodeError, jinja2.exceptions.TemplateError) as e: + except Exception as e: self.logger.exception("Could not load [%s].", track_spec_file) # Convert to string early on to avoid serialization errors with Jinja exceptions. raise TrackSyntaxError("Could not load '{}'".format(track_spec_file), str(e)) @@ -775,7 +776,7 @@ class TrackSpecificationReader: template_name="default", template_vars=self.track_params) return json.loads(rendered) - except (json.JSONDecodeError, jinja2.exceptions.TemplateError) as e: + except Exception as e: self.logger.exception("Could not load file template for %s.", description) raise TrackSyntaxError("Could not load file template for '%s'" % description, str(e)) diff --git a/esrally/utils/git.py b/esrally/utils/git.py index b5d044e8..83c90c3f 100644 --- a/esrally/utils/git.py +++ b/esrally/utils/git.py @@ -84,6 +84,11 @@ def head_revision(src_dir): return process.run_subprocess_with_output("git -C {0} rev-parse --short HEAD".format(src_dir))[0].strip() +@probed +def current_branch(src_dir): + return process.run_subprocess_with_output("git -C {0} rev-parse --abbrev-ref HEAD".format(src_dir))[0].strip() + + @probed def branches(src_dir, remote=True): if remote: diff --git a/esrally/utils/repo.py b/esrally/utils/repo.py index 46eba13b..511fd96d 100644 --- a/esrally/utils/repo.py +++ b/esrally/utils/repo.py @@ -57,8 +57,9 @@ class RallyRepository: self.logger.warning(msg) branch = versions.best_match(git.branches(self.repo_dir, remote=False), distribution_version) if branch: - self.logger.info("Checking out [%s] in [%s] for distribution version [%s].", branch, self.repo_dir, distribution_version) - git.checkout(self.repo_dir, branch=branch) + if git.current_branch(self.repo_dir) != branch: + self.logger.info("Checking out [%s] in [%s] for distribution version [%s].", branch, self.repo_dir, distribution_version) + git.checkout(self.repo_dir, branch=branch) else: raise exceptions.SystemSetupError("Cannot find %s for distribution version %s" % (self.resource_name, distribution_version)) except exceptions.SupplyError as e:
Externalize log rotation In #503 we implemented that Rally's logging can be configured via a file. Our default configuration contains the following handler definition: ```json "rally_log_handler": { "class": "logging.handlers.TimedRotatingFileHandler", "filename": "${LOG_PATH}/rally.log", "utc": true, "when": "midnight", "backupCount": 14, "encoding": "UTF-8", "formatter": "normal", "filters": ["isActorLog"] } ``` This is problematic because Rally uses multiple processes and there is a lurking race condition when rolling log files (which we had already found in #263). Hence, we will stop rotating logs from within Rally and leverage established tools like `logrotate` for that. ### Tasks * [x] Change the current log configuration template to use [`WatchedFileHandler`](https://docs.python.org/3/library/logging.handlers.html#watchedfilehandler) which is meant to be used with tools like `logrotate` * [x] Determine a suitable `logrotate` configuration for Rally and provide a snippet in our docs. * [x] Implement a check on Rally startup that ensures that the new configuration file template is used (backup the old one if present and replace it with the new one) Note: `WatchedFileHandler` uses the `stat` system call to check whether the inode of the log file has changed to determine whether to reopen the file. The file system metadata are cached. Also, this check is done every time a log line is written. In steady state this happens only a couple of times per minute in the default logging configuration. Therefore, we consider the performance impact of using `WatchedFileHandler` by default negligible.
elastic/rally
diff --git a/tests/track/loader_test.py b/tests/track/loader_test.py index f08bd286..bb8e799c 100644 --- a/tests/track/loader_test.py +++ b/tests/track/loader_test.py @@ -1367,6 +1367,58 @@ class TrackSpecificationReaderTests(TestCase): "'duplicate-task-name'. Please use the task's name property to assign a unique name for each task.", ctx.exception.args[0]) + def test_load_invalid_index_body(self): + track_specification = { + "description": "description for unit test", + "indices": [ + { + "name": "index-historical", + "body": "body.json", + "types": ["_doc"] + } + ], + "corpora": [ + { + "name": "test", + "documents": [ + { + "source-file": "documents-main.json.bz2", + "document-count": 10, + "compressed-bytes": 100, + "uncompressed-bytes": 10000 + } + ] + } + ], + "schedule": [ + { + "clients": 8, + "operation": { + "name": "index-append", + "operation-type": "index", + "bulk-size": 5000 + } + } + ] + } + reader = loader.TrackSpecificationReader( + track_params={"number_of_shards": 3}, + source=io.DictStringFileSourceFactory({ + "/mappings/body.json": [""" + { + "settings": { + "number_of_shards": {{ number_of_shards }} + }, + "mappings": { + "_doc": "no closing quotation mark!!, + } + } + """] + })) + with self.assertRaises(loader.TrackSyntaxError) as ctx: + reader("unittest", track_specification, "/mappings") + self.assertEqual("Could not load file template for 'definition for index index-historical in body.json'", ctx.exception.args[0]) + def test_parse_unique_task_names(self): track_specification = { "description": "description for unit test", diff --git a/tests/utils/repo_test.py b/tests/utils/repo_test.py index 765eddf3..ab67962c 100644 --- a/tests/utils/repo_test.py +++ b/tests/utils/repo_test.py @@ -125,7 +125,9 @@ class RallyRepositoryTests(TestCase): @mock.patch("esrally.utils.git.branches", autospec=True) @mock.patch("esrally.utils.git.checkout", autospec=True) @mock.patch("esrally.utils.git.rebase") - def test_updates_locally(self, rebase, checkout, branches, fetch, is_working_copy): + @mock.patch("esrally.utils.git.current_branch") + def test_updates_locally(self, curr_branch, rebase, checkout, branches, fetch, is_working_copy): + curr_branch.return_value = "5" branches.return_value = ["1", "2", "5", "master"] is_working_copy.return_value = True @@ -181,7 +183,9 @@ class RallyRepositoryTests(TestCase): @mock.patch("esrally.utils.git.branches", autospec=True) @mock.patch("esrally.utils.git.checkout", autospec=True) @mock.patch("esrally.utils.git.rebase") - def test_does_not_update_unknown_branch_remotely_local_fallback(self, rebase, checkout, branches, fetch, is_working_copy): + @mock.patch("esrally.utils.git.current_branch") + def test_does_not_update_unknown_branch_remotely_local_fallback(self, curr_branch, rebase, checkout, branches, fetch, is_working_copy): + curr_branch.return_value = "master" # we have only "master" remotely but a few more branches locally branches.side_effect = ["5", ["1", "2", "5", "master"]] is_working_copy.return_value = True
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_issue_reference", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 12 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip3 install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-benchmark" ], "pre_install": [ "apt-get update", "apt-get install -y gcc curl vim openssh-client git make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 elasticsearch==6.2.0 -e git+https://github.com/elastic/rally.git@b47309fc63d70f25e9cc71628fc1755aa1e5c67b#egg=esrally importlib-metadata==4.8.3 iniconfig==1.1.1 Jinja2==2.9.5 jsonschema==2.5.1 MarkupSafe==2.0.1 packaging==21.3 pluggy==1.0.0 psutil==5.4.0 py==1.11.0 py-cpuinfo==3.2.0 pyparsing==3.1.4 pytest==7.0.1 pytest-benchmark==3.4.1 tabulate==0.8.1 thespian==3.9.3 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.22 zipp==3.6.0
name: rally channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - elasticsearch==6.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jinja2==2.9.5 - jsonschema==2.5.1 - markupsafe==2.0.1 - packaging==21.3 - pluggy==1.0.0 - psutil==5.4.0 - py==1.11.0 - py-cpuinfo==3.2.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-benchmark==3.4.1 - tabulate==0.8.1 - thespian==3.9.3 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.22 - zipp==3.6.0 prefix: /opt/conda/envs/rally
[ "tests/utils/repo_test.py::RallyRepositoryTests::test_does_not_update_unknown_branch_remotely_local_fallback", "tests/utils/repo_test.py::RallyRepositoryTests::test_updates_locally" ]
[]
[ "tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_directory", "tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_directory_without_track", "tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_file", "tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_file_but_not_json", "tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_named_pipe", "tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_non_existing_path", "tests/track/loader_test.py::GitRepositoryTests::test_track_from_existing_repo", "tests/track/loader_test.py::TrackPreparationTests::test_decompresses_if_archive_available", "tests/track/loader_test.py::TrackPreparationTests::test_does_nothing_if_document_file_available", "tests/track/loader_test.py::TrackPreparationTests::test_download_document_archive_if_no_file_available", "tests/track/loader_test.py::TrackPreparationTests::test_download_document_file_if_no_file_available", "tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_decompresses_compressed_docs", "tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_does_nothing_if_no_document_files", "tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_error_compressed_docs_wrong_size", "tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_if_document_file_available", "tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_uncompressed_docs_wrong_size", "tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_if_no_url_provided_and_file_missing", "tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_if_no_url_provided_and_wrong_file_size", "tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_if_offline", "tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_no_test_mode_file", "tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_on_connection_problems", "tests/track/loader_test.py::TrackPreparationTests::test_raise_error_if_compressed_does_not_contain_expected_document_file", "tests/track/loader_test.py::TrackPreparationTests::test_raise_error_on_wrong_uncompressed_file_size", "tests/track/loader_test.py::TrackPreparationTests::test_used_corpora", "tests/track/loader_test.py::TemplateRenderTests::test_render_simple_template", "tests/track/loader_test.py::TemplateRenderTests::test_render_template_with_external_variables", "tests/track/loader_test.py::TemplateRenderTests::test_render_template_with_globbing", "tests/track/loader_test.py::TemplateRenderTests::test_render_template_with_variables", "tests/track/loader_test.py::TrackPostProcessingTests::test_post_processes_track_spec", "tests/track/loader_test.py::TrackPathTests::test_sets_absolute_path", "tests/track/loader_test.py::TrackFilterTests::test_create_filters_from_empty_included_tasks", "tests/track/loader_test.py::TrackFilterTests::test_create_filters_from_mixed_included_tasks", "tests/track/loader_test.py::TrackFilterTests::test_filters_tasks", "tests/track/loader_test.py::TrackFilterTests::test_rejects_invalid_syntax", "tests/track/loader_test.py::TrackFilterTests::test_rejects_unknown_filter_type", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_at_least_one_default_challenge", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_auto_generates_challenge_from_schedule", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_can_read_track_info", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_description_is_optional", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_document_count_mandatory_if_file_present", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_exactly_one_default_challenge", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_inline_operations", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_load_invalid_index_body", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_not_more_than_one_default_challenge_possible", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_completed_by_set", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_completed_by_set_multiple_tasks_match", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_completed_by_set_no_task_matches", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_default_clients_does_not_propagate", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_default_values", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_challenge_and_challenges_are_defined", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_duplicate_explicit_task_names", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_duplicate_implicit_task_names", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_missing_challenge_or_challenges", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_unique_task_names", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_valid_track_specification", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_valid_track_specification_with_index_template", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_with_mixed_warmup_iterations_and_measurement", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_with_mixed_warmup_time_period_and_iterations", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_selects_sole_challenge_implicitly_as_default", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_supports_target_interval", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_supports_target_throughput", "tests/track/loader_test.py::TrackSpecificationReaderTests::test_unique_challenge_names", "tests/utils/repo_test.py::RallyRepositoryTests::test_clones_initially", "tests/utils/repo_test.py::RallyRepositoryTests::test_does_not_fetch_if_suppressed", "tests/utils/repo_test.py::RallyRepositoryTests::test_does_not_update_unknown_branch_locally", "tests/utils/repo_test.py::RallyRepositoryTests::test_does_not_update_unknown_branch_remotely", "tests/utils/repo_test.py::RallyRepositoryTests::test_does_nothing_if_working_copy_present", "tests/utils/repo_test.py::RallyRepositoryTests::test_fails_in_offline_mode_if_not_a_git_repo", "tests/utils/repo_test.py::RallyRepositoryTests::test_fetches_if_already_cloned", "tests/utils/repo_test.py::RallyRepositoryTests::test_ignores_fetch_errors", "tests/utils/repo_test.py::RallyRepositoryTests::test_updates_from_remote" ]
[]
Apache License 2.0
2,995
[ "esrally/rallyd.py", "esrally/resources/logging_1_0_0.json", "esrally/utils/repo.py", "docs/configuration.rst", "esrally/track/loader.py", "esrally/resources/logging.json", "esrally/racecontrol.py", "esrally/actor.py", "esrally/utils/git.py", "esrally/log.py", "esrally/rally.py", "esrally/mechanic/telemetry.py", "docs/migrate.rst" ]
[ "esrally/rallyd.py", "esrally/resources/logging_1_0_0.json", "esrally/utils/repo.py", "docs/configuration.rst", "esrally/track/loader.py", "esrally/resources/logging.json", "esrally/racecontrol.py", "esrally/actor.py", "esrally/utils/git.py", "esrally/log.py", "esrally/rally.py", "esrally/mechanic/telemetry.py", "docs/migrate.rst" ]
getsentry__sentry-python-37
33115bf5c9c835e5488aa57baf34e2b8ee7585fe
2018-08-30 14:18:34
33115bf5c9c835e5488aa57baf34e2b8ee7585fe
diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py index 475cd478..6c9ae117 100644 --- a/sentry_sdk/consts.py +++ b/sentry_sdk/consts.py @@ -24,6 +24,7 @@ DEFAULT_OPTIONS = { "ignore_errors": [], "request_bodies": "medium", "before_send": None, + "debug": False, } SDK_INFO = {"name": "sentry-python", "version": VERSION} diff --git a/sentry_sdk/hub.py b/sentry_sdk/hub.py index ba2b4647..45881295 100644 --- a/sentry_sdk/hub.py +++ b/sentry_sdk/hub.py @@ -4,11 +4,13 @@ from contextlib import contextmanager from ._compat import with_metaclass from .scope import Scope -from .utils import exc_info_from_error, event_from_exception, ContextVar +from .utils import exc_info_from_error, event_from_exception, get_logger, ContextVar _local = ContextVar("sentry_current_hub") +logger = get_logger(__name__) + @contextmanager def _internal_exceptions(): @@ -134,7 +136,9 @@ class Hub(with_metaclass(HubMeta)): def capture_internal_exception(self, exc_info): """Capture an exception that is likely caused by a bug in the SDK itself.""" - pass + client = self.client + if client is not None and client.options["debug"]: + logger.debug("Internal error in sentry_sdk", exc_info=exc_info) def add_breadcrumb(self, *args, **kwargs): """Adds a breadcrumb.""" diff --git a/sentry_sdk/utils.py b/sentry_sdk/utils.py index b5dbf6eb..eeb5e98b 100644 --- a/sentry_sdk/utils.py +++ b/sentry_sdk/utils.py @@ -1,6 +1,7 @@ import os import sys import linecache +import logging from datetime import datetime from collections import Mapping, Sequence @@ -520,6 +521,14 @@ def strip_string(value, assume_length=None, max_length=512): return value[:max_length] +def get_logger(name): + rv = logging.getLogger(name) + if not rv.handlers: + rv.addHandler(logging.StreamHandler(sys.stderr)) + rv.setLevel(logging.DEBUG) + return rv + + try: from contextvars import ContextVar except ImportError:
Add debug mode and do not use _internal_exceptions on expected fails A debug mode should be added that logs out when events are dropped and internal exceptions are ignored.
getsentry/sentry-python
diff --git a/tests/conftest.py b/tests/conftest.py index a33aa855..b98a2c86 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -15,7 +15,10 @@ if not os.path.isfile(SEMAPHORE): @pytest.fixture(autouse=True) -def reraise_internal_exceptions(monkeypatch): +def reraise_internal_exceptions(request, monkeypatch): + if "tests_internal_exceptions" in request.keywords: + return + def capture_internal_exception(exc_info): reraise(*exc_info) diff --git a/tests/test_client.py b/tests/test_client.py index dbf1cde4..d65f13c6 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -138,7 +138,7 @@ def test_configure_scope_unavailable(no_sdk, monkeypatch): assert not calls -def test_transport_works(sentry_init, httpserver, request, capsys): +def test_transport_works(httpserver, request, capsys): httpserver.serve_content("ok", 200) client = Client("http://foobar@{}/123".format(httpserver.url[len("http://") :])) @@ -152,3 +152,21 @@ def test_transport_works(sentry_init, httpserver, request, capsys): out, err = capsys.readouterr() assert not err and not out assert httpserver.requests + + [email protected]_internal_exceptions +def test_client_debug_option_enabled(sentry_init, caplog): + sentry_init(debug=True) + + Hub.current.capture_internal_exception((ValueError, ValueError("OK"), None)) + assert "OK" in caplog.text + + [email protected]_internal_exceptions [email protected]("with_client", (True, False)) +def test_client_debug_option_disabled(with_client, sentry_init, caplog): + if with_client: + sentry_init() + + Hub.current.capture_internal_exception((ValueError, ValueError("OK"), None)) + assert "OK" not in caplog.text
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 3 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[flask]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-xdist", "pytest-cov", "pytest-localserver", "pytest-mock", "pytest-asyncio" ], "pre_install": [], "python": "3.7", "reqs_path": [ "test-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
atomicwrites==1.4.1 attrs==24.2.0 blinker==1.6.3 certifi @ file:///croot/certifi_1671487769961/work/certifi click==8.1.8 coverage==7.2.7 distlib==0.3.9 exceptiongroup==1.2.2 execnet==2.0.2 filelock==3.12.2 Flask==2.2.5 hypothesis==3.69.9 importlib-metadata==6.7.0 iniconfig==2.0.0 itsdangerous==2.1.2 Jinja2==3.1.6 MarkupSafe==2.1.5 more-itertools==9.1.0 packaging==24.0 platformdirs==4.0.0 pluggy==0.13.1 py==1.11.0 pytest==7.4.4 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-forked==1.2.0 pytest-localserver==0.4.1 pytest-mock==3.11.1 pytest-xdist==1.23.0 -e git+https://github.com/getsentry/sentry-python.git@33115bf5c9c835e5488aa57baf34e2b8ee7585fe#egg=sentry_sdk six==1.11.0 tomli==2.0.1 tox==3.2.1 typing_extensions==4.7.1 urllib3==2.0.7 virtualenv==20.26.6 Werkzeug==2.2.3 zipp==3.15.0
name: sentry-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - atomicwrites==1.4.1 - attrs==24.2.0 - blinker==1.6.3 - click==8.1.8 - coverage==7.2.7 - distlib==0.3.9 - exceptiongroup==1.2.2 - execnet==2.0.2 - filelock==3.12.2 - flask==2.2.5 - hypothesis==3.69.9 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - itsdangerous==2.1.2 - jinja2==3.1.6 - markupsafe==2.1.5 - more-itertools==9.1.0 - packaging==24.0 - platformdirs==4.0.0 - pluggy==0.13.1 - py==1.11.0 - pytest==7.4.4 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-forked==1.2.0 - pytest-localserver==0.4.1 - pytest-mock==3.11.1 - pytest-xdist==1.23.0 - sentry-sdk==0.1.3 - six==1.11.0 - tomli==2.0.1 - tox==3.2.1 - typing-extensions==4.7.1 - urllib3==2.0.7 - virtualenv==20.26.6 - werkzeug==2.2.3 - zipp==3.15.0 prefix: /opt/conda/envs/sentry-python
[ "tests/test_client.py::test_client_debug_option_enabled" ]
[]
[ "tests/test_client.py::test_transport_option", "tests/test_client.py::test_ignore_errors", "tests/test_client.py::test_capture_event_works", "tests/test_client.py::test_atexit[10]", "tests/test_client.py::test_atexit[20]", "tests/test_client.py::test_configure_scope_available", "tests/test_client.py::test_configure_scope_unavailable[True]", "tests/test_client.py::test_configure_scope_unavailable[False]", "tests/test_client.py::test_transport_works", "tests/test_client.py::test_client_debug_option_disabled[True]", "tests/test_client.py::test_client_debug_option_disabled[False]" ]
[]
MIT License
2,996
[ "sentry_sdk/hub.py", "sentry_sdk/consts.py", "sentry_sdk/utils.py" ]
[ "sentry_sdk/hub.py", "sentry_sdk/consts.py", "sentry_sdk/utils.py" ]
oasis-open__cti-python-stix2-207
b5a301ff28b6a73054741d5cd9a90a0036a7f6e5
2018-08-30 16:06:25
3084c9f51fcd00cf6b0ed76827af90d0e86746d5
diff --git a/README.rst b/README.rst index 6770ec6..79b8e7e 100644 --- a/README.rst +++ b/README.rst @@ -170,6 +170,9 @@ repositories/maintainers-guide#additionalMaintainers>`__. **Current Maintainers of this TC Open Repository** +- `Greg Back <mailto:[email protected]>`__; GitHub ID: + https://github.com/gtback/; WWW: `MITRE + Corporation <http://www.mitre.org/>`__ - `Chris Lenk <mailto:[email protected]>`__; GitHub ID: https://github.com/clenk/; WWW: `MITRE Corporation <http://www.mitre.org/>`__ diff --git a/docs/guide/creating.ipynb b/docs/guide/creating.ipynb index 61bbe15..058aae3 100644 --- a/docs/guide/creating.ipynb +++ b/docs/guide/creating.ipynb @@ -881,7 +881,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.3" + "version": "3.6.5" } }, "nbformat": 4, diff --git a/docs/guide/patterns.ipynb b/docs/guide/patterns.ipynb new file mode 100644 index 0000000..ee06675 --- /dev/null +++ b/docs/guide/patterns.ipynb @@ -0,0 +1,509 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# STIX2 Patterns\n", + "\n", + "The Python ``stix2`` library supports STIX 2 patterning insofar that patterns may be used for the pattern property of Indicators, identical to the STIX 2 specification. ``stix2`` does not evaluate patterns against STIX 2 content; for that functionality see [cti-pattern-matcher](https://github.com/oasis-open/cti-pattern-matcher).\n", + "\n", + "Patterns in the ``stix2`` library are built compositely from the bottom up, creating subcomponent expressions first before those at higher levels.\n", + "\n", + "## API Tips\n", + "\n", + "### ObservationExpression\n", + "\n", + "Within the STIX 2 Patterning specification, Observation Expressions denote a complete expression to be evaluated against a discrete observation. In other words, an Observation Expression must be created to apply to a single Observation instance. This is further made clear by the visual brackets(```[]```) that encapsulate an Observation Expression. Thus, whatever sub expressions that are within the Observation Expression are meant to be matched against the same Observable instance.\n", + "\n", + "This requirement manifests itself within the ``stix2`` library via ```ObservationExpression```. When creating STIX 2 observation expressions, whenever the current expression is complete, wrap it with ```ObservationExpression()```. This allows the complete pattern expression - no matter its complexity - to be rendered as a proper specification-adhering string. __*Note: When pattern expressions are added to Indicator objects, the expression objects are implicitly converted to string representations*__. While the extra step may seem tedious in the construction of simple pattern expressions, this explicit marking of observation expressions becomes vital when converting the pattern expressions to strings. \n", + "\n", + "In all the examples, you can observe how in the process of building pattern expressions, when an Observation Expression is completed, it is wrapped with ```ObservationExpression()```.\n", + "\n", + "### ParentheticalExpression\n", + "\n", + "Do not be confused by the ```ParentheticalExpression``` object. It is not a distinct expression type but is also used to properly craft pattern expressions by denoting order priority and grouping of expression components. Use it in a similar manner as ```ObservationExpression```, wrapping completed subcomponent expressions with ```ParentheticalExpression()``` if explicit ordering is required. For usage examples with ```ParentheticalExpression```'s, see [here](#Compound-Observation-Expressions).\n", + "\n", + "### BooleanExpressions vs CompoundObservationExpressions\n", + "\n", + "Be careful to note the difference between these two very similar pattern components. \n", + "\n", + "__BooleanExpressions__\n", + "\n", + " - [AndBooleanExpression](../api/stix2.patterns.rst#stix2.patterns.AndBooleanExpression)\n", + " - [OrbooleanExpression](../api/stix2.patterns.rst#stix2.patterns.OrBooleanExpression)\n", + " \n", + " __Usage__: When the boolean sub-expressions refer to the *same* root object \n", + "\n", + " __Example__:\n", + " ```[domain-name:value = \"www.5z8.info\" AND domain-name:resolvess_to_refs[*].value = \"'198.51.100.1/32'\"]```\n", + " \n", + " __Rendering__: when pattern is rendered, brackets or parenthesis will encapsulate boolean expression\n", + " \n", + "__CompoundObservationExpressions__\n", + "\n", + " - [AndObservationExpression](../api/stix2.patterns.rst#stix2.patterns.AndObservationExpression)\n", + " - [OrObservationExpression](../api/stix2.patterns.rst#stix2.patterns.OrObservationExpression)\n", + " \n", + " __Usage__: When the boolean sub-expressions refer to *different* root objects\n", + "\n", + " __Example__:\n", + " ```[file:name=\"foo.dll\"] AND [process:name = \"procfoo\"]```\n", + " \n", + " __Rendering__: when pattern is rendered, brackets will encapsulate each boolean sub-expression\n", + "\n", + "\n", + "\n", + "## Examples\n", + "\n", + "### Comparison Expressions" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from stix2 import DomainName, File, IPv4Address\n", + "from stix2 import (ObjectPath, EqualityComparisonExpression, ObservationExpression,\n", + " GreaterThanComparisonExpression, IsSubsetComparisonExpression,\n", + " FloatConstant, StringConstant)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Equality Comparison expressions" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\t[domain-name:value = 'site.of.interest.zaz']\n", + "\n", + "\t[file:parent_directory_ref.path = 'C:\\\\Windows\\\\System32']\n", + "\n" + ] + } + ], + "source": [ + "lhs = ObjectPath(\"domain-name\", [\"value\"])\n", + "ece_1 = ObservationExpression(EqualityComparisonExpression(lhs, \"site.of.interest.zaz\"))\n", + "print(\"\\t{}\\n\".format(ece_1))\n", + "\n", + "lhs = ObjectPath(\"file\", [\"parent_directory_ref\",\"path\"])\n", + "ece_2 = ObservationExpression(EqualityComparisonExpression(lhs, \"C:\\\\Windows\\\\System32\"))\n", + "print(\"\\t{}\\n\".format(ece_2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Greater-than Comparison expressions" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\t[file:extensions.windows-pebinary-ext.sections[*].entropy > 7.0]\n", + "\n" + ] + } + ], + "source": [ + "lhs = ObjectPath(\"file\", [\"extensions\", \"windows-pebinary-ext\", \"sections[*]\", \"entropy\"])\n", + "gte = ObservationExpression(GreaterThanComparisonExpression(lhs, FloatConstant(\"7.0\")))\n", + "print(\"\\t{}\\n\".format(gte))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### IsSubset Comparison expressions" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\t[network-traffic:dst_ref.value ISSUBSET '2001:0db8:dead:beef:0000:0000:0000:0000/64']\n", + "\n" + ] + } + ], + "source": [ + "lhs = ObjectPath(\"network-traffic\", [\"dst_ref\", \"value\"])\n", + "iss = ObservationExpression(IsSubsetComparisonExpression(lhs, StringConstant(\"2001:0db8:dead:beef:0000:0000:0000:0000/64\")))\n", + "print(\"\\t{}\\n\".format(iss))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Compound Observation Expressions" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from stix2 import (IntegerConstant, HashConstant, ObjectPath,\n", + " EqualityComparisonExpression, AndBooleanExpression,\n", + " OrBooleanExpression, ParentheticalExpression,\n", + " AndObservationExpression, OrObservationExpression,\n", + " FollowedByObservationExpression, ObservationExpression)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### AND boolean" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(AND)\n", + "[email-message:sender_ref.value = '[email protected]' AND email-message:subject = 'Conference Info']\n", + "\n" + ] + } + ], + "source": [ + "ece3 = EqualityComparisonExpression(ObjectPath(\"email-message\", [\"sender_ref\", \"value\"]), \"[email protected]\")\n", + "ece4 = EqualityComparisonExpression(ObjectPath(\"email-message\", [\"subject\"]), \"Conference Info\")\n", + "abe = ObservationExpression(AndBooleanExpression([ece3, ece4]))\n", + "print(\"(AND)\\n{}\\n\".format(abe))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### OR boolean" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(OR)\n", + "[url:value = 'http://example.com/foo' OR url:value = 'http://example.com/bar']\n", + "\n" + ] + } + ], + "source": [ + "ece5 = EqualityComparisonExpression(ObjectPath(\"url\", [\"value\"]), \"http://example.com/foo\")\n", + "ece6 = EqualityComparisonExpression(ObjectPath(\"url\", [\"value\"]), \"http://example.com/bar\")\n", + "obe = ObservationExpression(OrBooleanExpression([ece5, ece6]))\n", + "print(\"(OR)\\n{}\\n\".format(obe))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### ( OR ) AND boolean" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(OR,AND)\n", + "[(file:name = 'pdf.exe' OR file:size = 371712) AND file:created = 2014-01-13 07:03:17+00:00]\n", + "\n" + ] + } + ], + "source": [ + "ece7 = EqualityComparisonExpression(ObjectPath(\"file\", [\"name\"]), \"pdf.exe\")\n", + "ece8 = EqualityComparisonExpression(ObjectPath(\"file\", [\"size\"]), IntegerConstant(\"371712\"))\n", + "ece9 = EqualityComparisonExpression(ObjectPath(\"file\", [\"created\"]), \"2014-01-13T07:03:17Z\")\n", + "obe1 = OrBooleanExpression([ece7, ece8])\n", + "pobe = ParentheticalExpression(obe1)\n", + "abe1 = ObservationExpression(AndBooleanExpression([pobe, ece9]))\n", + "print(\"(OR,AND)\\n{}\\n\".format(abe1))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### ( AND ) OR ( OR ) observation" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(AND,OR,OR)\n", + "([file:name = 'foo.dll'] AND [win-registry-key:key = 'HKEY_LOCAL_MACHINE\\\\foo\\\\bar']) OR [process:name = 'fooproc' OR process:name = 'procfoo']\n", + "\n" + ] + } + ], + "source": [ + "ece20 = ObservationExpression(EqualityComparisonExpression(ObjectPath(\"file\", [\"name\"]), \"foo.dll\"))\n", + "ece21 = ObservationExpression(EqualityComparisonExpression(ObjectPath(\"win-registry-key\", [\"key\"]), \"HKEY_LOCAL_MACHINE\\\\foo\\\\bar\"))\n", + "ece22 = EqualityComparisonExpression(ObjectPath(\"process\", [\"name\"]), \"fooproc\")\n", + "ece23 = EqualityComparisonExpression(ObjectPath(\"process\", [\"name\"]), \"procfoo\")\n", + "# NOTE: we need to use AND/OR observation expression instead of just boolean \n", + "# expressions as the operands are not on the same object-type\n", + "aoe = ParentheticalExpression(AndObservationExpression([ece20, ece21]))\n", + "obe2 = ObservationExpression(OrBooleanExpression([ece22, ece23]))\n", + "ooe = OrObservationExpression([aoe, obe2])\n", + "print(\"(AND,OR,OR)\\n{}\\n\".format(ooe))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### FOLLOWED-BY" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(FollowedBy)\n", + "[file:hashes.MD5 = '79054025255fb1a26e4bc422aef54eb4'] FOLLOWEDBY [win-registry-key:key = 'HKEY_LOCAL_MACHINE\\\\foo\\\\bar']\n", + "\n" + ] + } + ], + "source": [ + "ece10 = ObservationExpression(EqualityComparisonExpression(ObjectPath(\"file\", [\"hashes\", \"MD5\"]), HashConstant(\"79054025255fb1a26e4bc422aef54eb4\", \"MD5\")))\n", + "ece11 = ObservationExpression(EqualityComparisonExpression(ObjectPath(\"win-registry-key\", [\"key\"]), \"HKEY_LOCAL_MACHINE\\\\foo\\\\bar\"))\n", + "fbe = FollowedByObservationExpression([ece10, ece11])\n", + "print(\"(FollowedBy)\\n{}\\n\".format(fbe))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Qualified Observation Expressions" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "from stix2 import (TimestampConstant, HashConstant, ObjectPath, EqualityComparisonExpression,\n", + " AndBooleanExpression, WithinQualifier, RepeatQualifier, StartStopQualifier,\n", + " QualifiedObservationExpression, FollowedByObservationExpression,\n", + " ParentheticalExpression, ObservationExpression)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### WITHIN" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(WITHIN)\n", + "([file:hashes.MD5 = '79054025255fb1a26e4bc422aef54eb4'] FOLLOWEDBY [win-registry-key:key = 'HKEY_LOCAL_MACHINE\\\\foo\\\\bar']) WITHIN 300 SECONDS\n", + "\n" + ] + } + ], + "source": [ + "ece10 = ObservationExpression(EqualityComparisonExpression(ObjectPath(\"file\", [\"hashes\", \"MD5\"]), HashConstant(\"79054025255fb1a26e4bc422aef54eb4\", \"MD5\")))\n", + "ece11 = ObservationExpression(EqualityComparisonExpression(ObjectPath(\"win-registry-key\", [\"key\"]), \"HKEY_LOCAL_MACHINE\\\\foo\\\\bar\"))\n", + "fbe = FollowedByObservationExpression([ece10, ece11])\n", + "par = ParentheticalExpression(fbe)\n", + "qoe = QualifiedObservationExpression(par, WithinQualifier(300))\n", + "print(\"(WITHIN)\\n{}\\n\".format(qoe))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### REPEATS, WITHIN" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(REPEAT, WITHIN)\n", + "[network-traffic:dst_ref.type = 'domain-name' AND network-traffic:dst_ref.value = 'example.com'] REPEATS 5 TIMES WITHIN 180 SECONDS\n", + "\n" + ] + } + ], + "source": [ + "ece12 = EqualityComparisonExpression(ObjectPath(\"network-traffic\", [\"dst_ref\", \"type\"]), \"domain-name\")\n", + "ece13 = EqualityComparisonExpression(ObjectPath(\"network-traffic\", [\"dst_ref\", \"value\"]), \"example.com\")\n", + "abe2 = ObservationExpression(AndBooleanExpression([ece12, ece13]))\n", + "qoe1 = QualifiedObservationExpression(QualifiedObservationExpression(abe2, RepeatQualifier(5)), WithinQualifier(180))\n", + "print(\"(REPEAT, WITHIN)\\n{}\\n\".format(qoe1))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### START, STOP" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(START-STOP)\n", + "[file:name = 'foo.dll'] START t'2016-06-01T00:00:00Z' STOP t'2016-07-01T00:00:00Z'\n", + "\n" + ] + } + ], + "source": [ + "ece14 = ObservationExpression(EqualityComparisonExpression(ObjectPath(\"file\", [\"name\"]), \"foo.dll\"))\n", + "ssq = StartStopQualifier(TimestampConstant('2016-06-01T00:00:00Z'), TimestampConstant('2016-07-01T00:00:00Z'))\n", + "qoe2 = QualifiedObservationExpression(ece14, ssq)\n", + "print(\"(START-STOP)\\n{}\\n\".format(qoe2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Attaching patterns to STIX2 Domain objects\n", + "\n", + "\n", + "### Example" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"type\": \"indicator\",\n", + " \"id\": \"indicator--219bc5fc-fdbf-4b54-a2fc-921be7ab3acb\",\n", + " \"created\": \"2018-08-29T23:58:00.548Z\",\n", + " \"modified\": \"2018-08-29T23:58:00.548Z\",\n", + " \"name\": \"Cryptotorch\",\n", + " \"pattern\": \"[file:name = '$$t00rzch$$.elf']\",\n", + " \"valid_from\": \"2018-08-29T23:58:00.548391Z\",\n", + " \"labels\": [\n", + " \"malware\",\n", + " \"ransomware\"\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "from stix2 import Indicator, EqualityComparisonExpression, ObservationExpression\n", + "\n", + "ece14 = ObservationExpression(EqualityComparisonExpression(ObjectPath(\"file\", [\"name\"]), \"$$t00rzch$$.elf\"))\n", + "ind = Indicator(name=\"Cryptotorch\", labels=[\"malware\", \"ransomware\"], pattern=ece14)\n", + "print(ind)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/stix2/patterns.py b/stix2/patterns.py index 3f9cbd9..146ec04 100644 --- a/stix2/patterns.py +++ b/stix2/patterns.py @@ -18,6 +18,11 @@ class _Constant(object): class StringConstant(_Constant): + """Pattern string constant + + Args: + value (str): string value + """ def __init__(self, value): self.value = value @@ -26,17 +31,27 @@ class StringConstant(_Constant): class TimestampConstant(_Constant): + """Pattern timestamp constant + + Args: + value (datetime.datetime OR str): if string, must be a timestamp string + """ def __init__(self, value): try: self.value = parse_into_datetime(value) except Exception: - raise ValueError("must be a datetime object or timestamp string.") + raise ValueError("Must be a datetime object or timestamp string.") def __str__(self): return "t%s" % repr(self.value) class IntegerConstant(_Constant): + """Pattern interger constant + + Args: + value (int): integer value + """ def __init__(self, value): try: self.value = int(value) @@ -59,6 +74,13 @@ class FloatConstant(_Constant): class BooleanConstant(_Constant): + """Pattern boolean constant + + Args: + value (str OR int): + (str) 'true', 't' for True; 'false', 'f' for False + (int) 1 for True; 0 for False + """ def __init__(self, value): if isinstance(value, bool): self.value = value @@ -106,6 +128,15 @@ _HASH_REGEX = { class HashConstant(StringConstant): + """Pattern hash constant + + Args: + value (str): hash value + type (str): hash algorithm name. Supported hash algorithms: + "MD5", "MD6", RIPEMD160", "SHA1", "SHA224", "SHA256", + "SHA384", "SHA512", "SHA3224", "SHA3256", "SHA3384", + "SHA3512", "SSDEEP", "WHIRLPOOL" + """ def __init__(self, value, type): key = type.upper().replace('-', '') if key in _HASH_REGEX: @@ -116,7 +147,11 @@ class HashConstant(StringConstant): class BinaryConstant(_Constant): + """Pattern binary constant + Args: + value (str): base64 encoded string value + """ def __init__(self, value): try: base64.b64decode(value) @@ -129,6 +164,11 @@ class BinaryConstant(_Constant): class HexConstant(_Constant): + """Pattern hexadecimal constant + + Args: + value (str): hexadecimal value + """ def __init__(self, value): if not re.match('^([a-fA-F0-9]{2})+$', value): raise ValueError("must contain an even number of hexadecimal characters") @@ -139,6 +179,11 @@ class HexConstant(_Constant): class ListConstant(_Constant): + """Pattern list constant + + Args: + value (list): list of values + """ def __init__(self, values): self.value = values @@ -147,6 +192,12 @@ class ListConstant(_Constant): def make_constant(value): + """Convert value to Pattern constant, best effort attempt + at determining root value type and corresponding conversion + + Args: + value: value to convert to Pattern constant + """ if isinstance(value, _Constant): return value @@ -182,6 +233,16 @@ class _ObjectPathComponent(object): class BasicObjectPathComponent(_ObjectPathComponent): + """Basic object path component (for an observation or expression) + + By "Basic", implies that the object path component is not a + list, object reference or futher referenced property, i.e. terminal + component + + Args: + property_name (str): object property name + is_key (bool): is dictionary key, default: False + """ def __init__(self, property_name, is_key=False): self.property_name = property_name # TODO: set is_key to True if this component is a dictionary key @@ -192,6 +253,12 @@ class BasicObjectPathComponent(_ObjectPathComponent): class ListObjectPathComponent(_ObjectPathComponent): + """List object path component (for an observation or expression) + + Args: + property_name (str): list object property name + index (int): index of the list property's value that is specified + """ def __init__(self, property_name, index): self.property_name = property_name self.index = index @@ -201,6 +268,11 @@ class ListObjectPathComponent(_ObjectPathComponent): class ReferenceObjectPathComponent(_ObjectPathComponent): + """Reference object path component (for an observation or expression) + + Args: + reference_property_name (str): reference object property name + """ def __init__(self, reference_property_name): self.property_name = reference_property_name @@ -209,6 +281,12 @@ class ReferenceObjectPathComponent(_ObjectPathComponent): class ObjectPath(object): + """Pattern operand object (property) path + + Args: + object_type_name (str): name of object type for corresponding object path component + property_path (_ObjectPathComponent OR str): object path + """ def __init__(self, object_type_name, property_path): self.object_type_name = object_type_name self.property_path = [x if isinstance(x, _ObjectPathComponent) else @@ -219,11 +297,17 @@ class ObjectPath(object): return "%s:%s" % (self.object_type_name, ".".join(["%s" % x for x in self.property_path])) def merge(self, other): + """Extend the object property with that of the supplied object property path""" self.property_path.extend(other.property_path) return self @staticmethod def make_object_path(lhs): + """Create ObjectPath from string encoded object path + + Args: + lhs (str): object path of left-hand-side component of expression + """ path_as_parts = lhs.split(":") return ObjectPath(path_as_parts[0], path_as_parts[1].split(".")) @@ -233,6 +317,14 @@ class _PatternExpression(object): class _ComparisonExpression(_PatternExpression): + """Pattern Comparison Expression + + Args: + operator (str): operator of comparison expression + lhs (ObjectPath OR str): object path of left-hand-side component of expression + rhs (ObjectPath OR str): object path of right-hand-side component of expression + negated (bool): comparison expression negated. Default: False + """ def __init__(self, operator, lhs, rhs, negated=False): if operator == "=" and isinstance(rhs, (ListConstant, list)): self.operator = "IN" @@ -257,56 +349,134 @@ class _ComparisonExpression(_PatternExpression): class EqualityComparisonExpression(_ComparisonExpression): + """Pattern Equality Comparison Expression + + Args: + lhs (ObjectPath OR str): object path of left-hand-side component of expression + rhs (ObjectPath OR str): object path of right-hand-side component of expression + negated (bool): comparison expression negated. Default: False + """ def __init__(self, lhs, rhs, negated=False): super(EqualityComparisonExpression, self).__init__("=", lhs, rhs, negated) class GreaterThanComparisonExpression(_ComparisonExpression): + """Pattern Greater-than Comparison Expression + + Args: + lhs (ObjectPath OR str): object path of left-hand-side component of expression + rhs (ObjectPath OR str): object path of right-hand-side component of expression + negated (bool): comparison expression negated. Default: False + """ def __init__(self, lhs, rhs, negated=False): super(GreaterThanComparisonExpression, self).__init__(">", lhs, rhs, negated) class LessThanComparisonExpression(_ComparisonExpression): + """Pattern Less-than Comparison Expression + + Args: + lhs (ObjectPath OR str): object path of left-hand-side component of expression + rhs (ObjectPath OR str): object path of right-hand-side component of expression + negated (bool): comparison expression negated. Default: False + """ def __init__(self, lhs, rhs, negated=False): super(LessThanComparisonExpression, self).__init__("<", lhs, rhs, negated) class GreaterThanEqualComparisonExpression(_ComparisonExpression): + """Pattern Greater-Than-or-Equal-to Comparison Expression + + Args: + lhs (ObjectPath OR str): object path of left-hand-side component of expression + rhs (ObjectPath OR str): object path of right-hand-side component of expression + negated (bool): comparison expression negated. Default: False + """ def __init__(self, lhs, rhs, negated=False): super(GreaterThanEqualComparisonExpression, self).__init__(">=", lhs, rhs, negated) class LessThanEqualComparisonExpression(_ComparisonExpression): + """Pattern Less-Than-or-Equal-to Comparison Expression + + Args: + lhs (ObjectPath OR str): object path of left-hand-side component of expression + rhs (ObjectPath OR str): object path of right-hand-side component of expression + negated (bool): comparison expression negated. Default: False + """ + def __init__(self, lhs, rhs, negated=False): super(LessThanEqualComparisonExpression, self).__init__("<=", lhs, rhs, negated) class InComparisonExpression(_ComparisonExpression): + """'in' Comparison Expression + + Args: + lhs (ObjectPath OR str): object path of left-hand-side component of expression + rhs (ObjectPath OR str): object path of right-hand-side component of expression + negated (bool): comparison expression negated. Default: False + """ def __init__(self, lhs, rhs, negated=False): super(InComparisonExpression, self).__init__("IN", lhs, rhs, negated) class LikeComparisonExpression(_ComparisonExpression): + """'like' Comparison Expression + + Args: + lhs (ObjectPath OR str): object path of left-hand-side component of expression + rhs (ObjectPath OR str): object path of right-hand-side component of expression + negated (bool): comparison expression negated. Default: False + """ + def __init__(self, lhs, rhs, negated=False): super(LikeComparisonExpression, self).__init__("LIKE", lhs, rhs, negated) class MatchesComparisonExpression(_ComparisonExpression): + """'Matches' Comparison Expression + + Args: + lhs (ObjectPath OR str): object path of left-hand-side component of expression + rhs (ObjectPath OR str): object path of right-hand-side component of expression + negated (bool): comparison expression negated. Default: False + """ def __init__(self, lhs, rhs, negated=False): super(MatchesComparisonExpression, self).__init__("MATCHES", lhs, rhs, negated) class IsSubsetComparisonExpression(_ComparisonExpression): - def __init__(self, lhs, rhs, negated=False): - super(IsSubsetComparisonExpression, self).__init__("ISSUBSET", lhs, rhs, negated) + """ 'is subset' Comparison Expression + + Args: + lhs (ObjectPath OR str): object path of left-hand-side component of expression + rhs (ObjectPath OR str): object path of right-hand-side component of expression + negated (bool): comparison expression negated. Default: False + """ + def __init__(self, lhs, rhs, negated=False): + super(IsSubsetComparisonExpression, self).__init__("ISSUBSET", lhs, rhs, negated) class IsSupersetComparisonExpression(_ComparisonExpression): - def __init__(self, lhs, rhs, negated=False): - super(IsSupersetComparisonExpression, self).__init__("ISSUPERSET", lhs, rhs, negated) + """ 'is super set' Comparison Expression + + Args: + lhs (ObjectPath OR str): object path of left-hand-side component of expression + rhs (ObjectPath OR str): object path of right-hand-side component of expression + negated (bool): comparison expression negated. Default: False + """ + def __init__(self, lhs, rhs, negated=False): + super(IsSupersetComparisonExpression, self).__init__("ISSUPERSET", lhs, rhs, negated) class _BooleanExpression(_PatternExpression): + """Boolean Pattern Expression + + Args: + operator (str): boolean operator + operands (list): boolean operands + """ def __init__(self, operator, operands): self.operator = operator self.operands = [] @@ -322,21 +492,37 @@ class _BooleanExpression(_PatternExpression): def __str__(self): sub_exprs = [] for o in self.operands: - sub_exprs.append("%s" % o) + sub_exprs.append(str(o)) return (" " + self.operator + " ").join(sub_exprs) class AndBooleanExpression(_BooleanExpression): + """'AND' Boolean Pattern Expression. Only use if both operands are of + the same root object. + + Args: + operands (list): AND operands + """ def __init__(self, operands): super(AndBooleanExpression, self).__init__("AND", operands) class OrBooleanExpression(_BooleanExpression): + """'OR' Boolean Pattern Expression. Only use if both operands are of the same root object + + Args: + operands (list): OR operands + """ def __init__(self, operands): super(OrBooleanExpression, self).__init__("OR", operands) class ObservationExpression(_PatternExpression): + """Observation Expression + + Args: + operand (str): observation expression operand + """ def __init__(self, operand): self.operand = operand @@ -345,6 +531,12 @@ class ObservationExpression(_PatternExpression): class _CompoundObservationExpression(_PatternExpression): + """Compound Observation Expression + + Args: + operator (str): compound observation operator + operands (str): compound observation operands + """ def __init__(self, operator, operands): self.operator = operator self.operands = operands @@ -357,21 +549,41 @@ class _CompoundObservationExpression(_PatternExpression): class AndObservationExpression(_CompoundObservationExpression): + """'AND' Compound Observation Pattern Expression + + Args: + operands (str): compound observation operands + """ def __init__(self, operands): super(AndObservationExpression, self).__init__("AND", operands) class OrObservationExpression(_CompoundObservationExpression): + """Pattern 'OR' Compound Observation Expression + + Args: + operands (str): compound observation operands + """ def __init__(self, operands): super(OrObservationExpression, self).__init__("OR", operands) class FollowedByObservationExpression(_CompoundObservationExpression): + """Pattern 'Followed by' Compound Observation Expression + + Args: + operands (str): compound observation operands + """ def __init__(self, operands): super(FollowedByObservationExpression, self).__init__("FOLLOWEDBY", operands) class ParentheticalExpression(_PatternExpression): + """Pattern Parenthetical Observation Expression + + Args: + exp (str): observation expression + """ def __init__(self, exp): self.expression = exp if hasattr(exp, "root_type"): @@ -386,6 +598,11 @@ class _ExpressionQualifier(_PatternExpression): class RepeatQualifier(_ExpressionQualifier): + """Pattern Repeat Qualifier + + Args: + times_to_repeat (int): times the qualifiers is repeated + """ def __init__(self, times_to_repeat): if isinstance(times_to_repeat, IntegerConstant): self.times_to_repeat = times_to_repeat @@ -399,6 +616,11 @@ class RepeatQualifier(_ExpressionQualifier): class WithinQualifier(_ExpressionQualifier): + """Pattern 'Within' Qualifier + + Args: + number_of_seconds (int): seconds value for 'within' qualifier + """ def __init__(self, number_of_seconds): if isinstance(number_of_seconds, IntegerConstant): self.number_of_seconds = number_of_seconds @@ -412,6 +634,12 @@ class WithinQualifier(_ExpressionQualifier): class StartStopQualifier(_ExpressionQualifier): + """Pattern Start/Stop Qualifier + + Args: + start_time (TimestampConstant OR datetime.date): start timestamp for qualifier + stop_time (TimestampConstant OR datetime.date): stop timestamp for qualifier + """ def __init__(self, start_time, stop_time): if isinstance(start_time, TimestampConstant): self.start_time = start_time @@ -431,6 +659,12 @@ class StartStopQualifier(_ExpressionQualifier): class QualifiedObservationExpression(_PatternExpression): + """Pattern Qualified Observation Expression + + Args: + observation_expression (PatternExpression OR _CompoundObservationExpression OR ): pattern expression + qualifier (_ExpressionQualifier): pattern expression qualifier + """ def __init__(self, observation_expression, qualifier): self.observation_expression = observation_expression self.qualifier = qualifier
Add documentation for the `patterns` module https://stix2.readthedocs.io/en/latest/api/stix2.patterns.html is pretty barebones, and we don't have anything in the User's Guide about it.
oasis-open/cti-python-stix2
diff --git a/stix2/test/test_pattern_expressions.py b/stix2/test/test_pattern_expressions.py index 14e3774..a4d0a5c 100644 --- a/stix2/test/test_pattern_expressions.py +++ b/stix2/test/test_pattern_expressions.py @@ -9,6 +9,7 @@ def test_create_comparison_expression(): exp = stix2.EqualityComparisonExpression("file:hashes.'SHA-256'", stix2.HashConstant("aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f", "SHA-256")) # noqa + assert str(exp) == "file:hashes.'SHA-256' = 'aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f'" @@ -18,6 +19,7 @@ def test_boolean_expression(): exp2 = stix2.MatchesComparisonExpression("email-message:body_multipart[*].body_raw_ref.name", stix2.StringConstant("^Final Report.+\\.exe$")) exp = stix2.AndBooleanExpression([exp1, exp2]) + assert str(exp) == "email-message:from_ref.value MATCHES '.+\\\\@example\\\\.com$' AND email-message:body_multipart[*].body_raw_ref.name MATCHES '^Final Report.+\\\\.exe$'" # noqa @@ -66,9 +68,8 @@ def test_file_observable_expression(): "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f", 'SHA-256')) exp2 = stix2.EqualityComparisonExpression("file:mime_type", stix2.StringConstant("application/x-pdf")) - bool_exp = stix2.AndBooleanExpression([exp1, exp2]) - exp = stix2.ObservationExpression(bool_exp) - assert str(exp) == "[file:hashes.'SHA-256' = 'aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f' AND file:mime_type = 'application/x-pdf']" # noqa + bool_exp = stix2.ObservationExpression(stix2.AndBooleanExpression([exp1, exp2])) + assert str(bool_exp) == "[file:hashes.'SHA-256' = 'aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f' AND file:mime_type = 'application/x-pdf']" # noqa @pytest.mark.parametrize("observation_class, op", [ @@ -109,14 +110,12 @@ def test_artifact_payload(): "application/vnd.tcpdump.pcap") exp2 = stix2.MatchesComparisonExpression("artifact:payload_bin", stix2.StringConstant("\\xd4\\xc3\\xb2\\xa1\\x02\\x00\\x04\\x00")) - and_exp = stix2.AndBooleanExpression([exp1, exp2]) - exp = stix2.ObservationExpression(and_exp) - assert str(exp) == "[artifact:mime_type = 'application/vnd.tcpdump.pcap' AND artifact:payload_bin MATCHES '\\\\xd4\\\\xc3\\\\xb2\\\\xa1\\\\x02\\\\x00\\\\x04\\\\x00']" # noqa + and_exp = stix2.ObservationExpression(stix2.AndBooleanExpression([exp1, exp2])) + assert str(and_exp) == "[artifact:mime_type = 'application/vnd.tcpdump.pcap' AND artifact:payload_bin MATCHES '\\\\xd4\\\\xc3\\\\xb2\\\\xa1\\\\x02\\\\x00\\\\x04\\\\x00']" # noqa def test_greater_than_python_constant(): - exp1 = stix2.GreaterThanComparisonExpression("file:extensions.windows-pebinary-ext.sections[*].entropy", - 7.0) + exp1 = stix2.GreaterThanComparisonExpression("file:extensions.windows-pebinary-ext.sections[*].entropy", 7.0) exp = stix2.ObservationExpression(exp1) assert str(exp) == "[file:extensions.windows-pebinary-ext.sections[*].entropy > 7.0]" @@ -129,14 +128,14 @@ def test_greater_than(): def test_less_than(): - exp = stix2.LessThanComparisonExpression("file:size", - 1024) + exp = stix2.LessThanComparisonExpression("file:size", 1024) assert str(exp) == "file:size < 1024" def test_greater_than_or_equal(): exp = stix2.GreaterThanEqualComparisonExpression("file:size", 1024) + assert str(exp) == "file:size >= 1024" @@ -261,7 +260,7 @@ def test_invalid_integer_constant(): def test_invalid_timestamp_constant(): with pytest.raises(ValueError) as excinfo: stix2.TimestampConstant('foo') - assert 'must be a datetime object or timestamp string' in str(excinfo) + assert 'Must be a datetime object or timestamp string' in str(excinfo) def test_invalid_float_constant():
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 3 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 antlr4-python3-runtime==4.9.3 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 backcall==0.2.0 bleach==4.1.0 bump2version==1.0.1 bumpversion==0.6.0 certifi==2021.5.30 cfgv==3.3.1 charset-normalizer==2.0.12 coverage==6.2 decorator==5.1.1 defusedxml==0.7.1 distlib==0.3.9 docutils==0.18.1 entrypoints==0.4 filelock==3.4.1 identify==2.4.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.2.3 iniconfig==1.1.1 ipython==7.16.3 ipython-genutils==0.2.0 jedi==0.17.2 Jinja2==3.0.3 jsonschema==3.2.0 jupyter-client==7.1.2 jupyter-core==4.9.2 jupyterlab-pygments==0.1.2 MarkupSafe==2.0.1 mistune==0.8.4 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nbsphinx==0.3.2 nest-asyncio==1.6.0 nodeenv==1.6.0 packaging==21.3 pandocfilters==1.5.1 parso==0.7.1 pexpect==4.9.0 pickleshare==0.7.5 platformdirs==2.4.0 pluggy==1.0.0 pre-commit==2.17.0 prompt-toolkit==3.0.36 ptyprocess==0.7.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 pyzmq==25.1.2 requests==2.27.1 simplejson==3.20.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==1.5.6 sphinx-prompt==1.5.0 -e git+https://github.com/oasis-open/cti-python-stix2.git@b5a301ff28b6a73054741d5cd9a90a0036a7f6e5#egg=stix2 stix2-patterns==2.0.0 testpath==0.6.0 toml==0.10.2 tomli==1.2.3 tornado==6.1 tox==3.28.0 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 virtualenv==20.16.2 wcwidth==0.2.13 webencodings==0.5.1 zipp==3.6.0
name: cti-python-stix2 channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - antlr4-python3-runtime==4.9.3 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - backcall==0.2.0 - bleach==4.1.0 - bump2version==1.0.1 - bumpversion==0.6.0 - cfgv==3.3.1 - charset-normalizer==2.0.12 - coverage==6.2 - decorator==5.1.1 - defusedxml==0.7.1 - distlib==0.3.9 - docutils==0.18.1 - entrypoints==0.4 - filelock==3.4.1 - identify==2.4.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.2.3 - iniconfig==1.1.1 - ipython==7.16.3 - ipython-genutils==0.2.0 - jedi==0.17.2 - jinja2==3.0.3 - jsonschema==3.2.0 - jupyter-client==7.1.2 - jupyter-core==4.9.2 - jupyterlab-pygments==0.1.2 - markupsafe==2.0.1 - mistune==0.8.4 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nbsphinx==0.3.2 - nest-asyncio==1.6.0 - nodeenv==1.6.0 - packaging==21.3 - pandocfilters==1.5.1 - parso==0.7.1 - pexpect==4.9.0 - pickleshare==0.7.5 - platformdirs==2.4.0 - pluggy==1.0.0 - pre-commit==2.17.0 - prompt-toolkit==3.0.36 - ptyprocess==0.7.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - pyzmq==25.1.2 - requests==2.27.1 - simplejson==3.20.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==1.5.6 - sphinx-prompt==1.5.0 - stix2-patterns==2.0.0 - testpath==0.6.0 - toml==0.10.2 - tomli==1.2.3 - tornado==6.1 - tox==3.28.0 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - virtualenv==20.16.2 - wcwidth==0.2.13 - webencodings==0.5.1 - zipp==3.6.0 prefix: /opt/conda/envs/cti-python-stix2
[ "stix2/test/test_pattern_expressions.py::test_invalid_timestamp_constant" ]
[]
[ "stix2/test/test_pattern_expressions.py::test_create_comparison_expression", "stix2/test/test_pattern_expressions.py::test_boolean_expression", "stix2/test/test_pattern_expressions.py::test_boolean_expression_with_parentheses", "stix2/test/test_pattern_expressions.py::test_hash_followed_by_registryKey_expression_python_constant", "stix2/test/test_pattern_expressions.py::test_hash_followed_by_registryKey_expression", "stix2/test/test_pattern_expressions.py::test_file_observable_expression", "stix2/test/test_pattern_expressions.py::test_multiple_file_observable_expression[AndObservationExpression-AND]", "stix2/test/test_pattern_expressions.py::test_multiple_file_observable_expression[OrObservationExpression-OR]", "stix2/test/test_pattern_expressions.py::test_root_types", "stix2/test/test_pattern_expressions.py::test_artifact_payload", "stix2/test/test_pattern_expressions.py::test_greater_than_python_constant", "stix2/test/test_pattern_expressions.py::test_greater_than", "stix2/test/test_pattern_expressions.py::test_less_than", "stix2/test/test_pattern_expressions.py::test_greater_than_or_equal", "stix2/test/test_pattern_expressions.py::test_less_than_or_equal", "stix2/test/test_pattern_expressions.py::test_not", "stix2/test/test_pattern_expressions.py::test_and_observable_expression", "stix2/test/test_pattern_expressions.py::test_invalid_and_observable_expression", "stix2/test/test_pattern_expressions.py::test_hex", "stix2/test/test_pattern_expressions.py::test_multiple_qualifiers", "stix2/test/test_pattern_expressions.py::test_set_op", "stix2/test/test_pattern_expressions.py::test_timestamp", "stix2/test/test_pattern_expressions.py::test_boolean", "stix2/test/test_pattern_expressions.py::test_binary", "stix2/test/test_pattern_expressions.py::test_list", "stix2/test/test_pattern_expressions.py::test_list2", "stix2/test/test_pattern_expressions.py::test_invalid_constant_type", "stix2/test/test_pattern_expressions.py::test_invalid_integer_constant", "stix2/test/test_pattern_expressions.py::test_invalid_float_constant", "stix2/test/test_pattern_expressions.py::test_boolean_constant[True-True0]", "stix2/test/test_pattern_expressions.py::test_boolean_constant[False-False0]", "stix2/test/test_pattern_expressions.py::test_boolean_constant[True-True1]", "stix2/test/test_pattern_expressions.py::test_boolean_constant[False-False1]", "stix2/test/test_pattern_expressions.py::test_boolean_constant[true-True]", "stix2/test/test_pattern_expressions.py::test_boolean_constant[false-False]", "stix2/test/test_pattern_expressions.py::test_boolean_constant[t-True]", "stix2/test/test_pattern_expressions.py::test_boolean_constant[f-False]", "stix2/test/test_pattern_expressions.py::test_boolean_constant[T-True]", "stix2/test/test_pattern_expressions.py::test_boolean_constant[F-False]", "stix2/test/test_pattern_expressions.py::test_boolean_constant[1-True]", "stix2/test/test_pattern_expressions.py::test_boolean_constant[0-False]", "stix2/test/test_pattern_expressions.py::test_invalid_boolean_constant", "stix2/test/test_pattern_expressions.py::test_invalid_hash_constant[MD5-zzz]", "stix2/test/test_pattern_expressions.py::test_invalid_hash_constant[ssdeep-zzz==]", "stix2/test/test_pattern_expressions.py::test_invalid_hex_constant", "stix2/test/test_pattern_expressions.py::test_invalid_binary_constant", "stix2/test/test_pattern_expressions.py::test_escape_quotes_and_backslashes", "stix2/test/test_pattern_expressions.py::test_like", "stix2/test/test_pattern_expressions.py::test_issuperset", "stix2/test/test_pattern_expressions.py::test_repeat_qualifier", "stix2/test/test_pattern_expressions.py::test_invalid_repeat_qualifier", "stix2/test/test_pattern_expressions.py::test_invalid_within_qualifier", "stix2/test/test_pattern_expressions.py::test_startstop_qualifier", "stix2/test/test_pattern_expressions.py::test_invalid_startstop_qualifier", "stix2/test/test_pattern_expressions.py::test_make_constant_already_a_constant" ]
[]
BSD 3-Clause "New" or "Revised" License
2,997
[ "README.rst", "docs/guide/patterns.ipynb", "stix2/patterns.py", "docs/guide/creating.ipynb" ]
[ "README.rst", "docs/guide/patterns.ipynb", "stix2/patterns.py", "docs/guide/creating.ipynb" ]
zopefoundation__zope.schema-50
ff8f7293507663e463a01034b506c73ca9af0718
2018-08-30 16:20:35
0a719f2ded189630a0a77e9292a66a3662c6512c
diff --git a/CHANGES.rst b/CHANGES.rst index 500e178..624d87f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -87,6 +87,16 @@ subclass, enabling a simpler constructor call. See `issue 23 <https://github.com/zopefoundation/zope.schema/issues/23>`_. +- Add fields and interfaces representing Python's numeric tower. In + descending order of generality these are ``Number``, ``Complex``, + ``Real``, ``Rational`` and ``Integral``. The ``Int`` class extends + ``Integral``, the ``Float`` class extends ``Real``, and the + ``Decimal`` class extends ``Number``. See `issue 49 + <https://github.com/zopefoundation/zope.schema/issues/49>`_. + +- Make ``Iterable`` and ``Container`` properly implement ``IIterable`` + and ``IContainer``, respectively. + 4.5.0 (2017-07-10) ================== diff --git a/docs/api.rst b/docs/api.rst index a133111..284fae3 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -41,6 +41,12 @@ Strings Numbers ------- +.. autoclass:: zope.schema.interfaces.INumber +.. autoclass:: zope.schema.interfaces.IComplex +.. autoclass:: zope.schema.interfaces.IReal +.. autoclass:: zope.schema.interfaces.IRational +.. autoclass:: zope.schema.interfaces.IIntegral + .. autoclass:: zope.schema.interfaces.IInt .. autoclass:: zope.schema.interfaces.IFloat .. autoclass:: zope.schema.interfaces.IDecimal @@ -141,16 +147,9 @@ Fields .. autoclass:: zope.schema.Field .. autoclass:: zope.schema.Collection .. autoclass:: zope.schema._field.AbstractCollection -.. autoclass:: zope.schema.ASCII - :no-show-inheritance: -.. autoclass:: zope.schema.ASCIILine - :no-show-inheritance: + .. autoclass:: zope.schema.Bool :no-show-inheritance: -.. autoclass:: zope.schema.Bytes - :no-show-inheritance: -.. autoclass:: zope.schema.BytesLine - :no-show-inheritance: .. autoclass:: zope.schema.Choice :no-show-inheritance: .. autoclass:: zope.schema.Container @@ -159,20 +158,14 @@ Fields :no-show-inheritance: .. autoclass:: zope.schema.Datetime :no-show-inheritance: -.. autoclass:: zope.schema.Decimal - :no-show-inheritance: .. autoclass:: zope.schema.Dict .. autoclass:: zope.schema.DottedName :no-show-inheritance: -.. autoclass:: zope.schema.Float - :no-show-inheritance: .. autoclass:: zope.schema.FrozenSet :no-show-inheritance: .. autoclass:: zope.schema.Id :no-show-inheritance: -.. autoclass:: zope.schema.Int - :no-show-inheritance: .. autoclass:: zope.schema.InterfaceField :no-show-inheritance: .. autoclass:: zope.schema.Iterable @@ -192,12 +185,6 @@ Fields :no-show-inheritance: .. autoclass:: zope.schema.Set .. autoclass:: zope.schema.Sequence -.. autoclass:: zope.schema.SourceText - :no-show-inheritance: -.. autoclass:: zope.schema.Text - :no-show-inheritance: -.. autoclass:: zope.schema.TextLine - :no-show-inheritance: .. autoclass:: zope.schema.Time :no-show-inheritance: .. autoclass:: zope.schema.Timedelta @@ -206,6 +193,35 @@ Fields .. autoclass:: zope.schema.URI :no-show-inheritance: +Strings +------- +.. autoclass:: zope.schema.ASCII + :no-show-inheritance: +.. autoclass:: zope.schema.ASCIILine + :no-show-inheritance: +.. autoclass:: zope.schema.Bytes + :no-show-inheritance: +.. autoclass:: zope.schema.BytesLine + :no-show-inheritance: +.. autoclass:: zope.schema.SourceText + :no-show-inheritance: +.. autoclass:: zope.schema.Text + :no-show-inheritance: +.. autoclass:: zope.schema.TextLine + :no-show-inheritance: + +Numbers +------- +.. autoclass:: zope.schema.Number +.. autoclass:: zope.schema.Complex +.. autoclass:: zope.schema.Real +.. autoclass:: zope.schema.Rational +.. autoclass:: zope.schema.Integral +.. autoclass:: zope.schema.Float +.. autoclass:: zope.schema.Int +.. autoclass:: zope.schema.Decimal + + Accessors ========= diff --git a/src/zope/schema/__init__.py b/src/zope/schema/__init__.py index 8fc6825..92300fd 100644 --- a/src/zope/schema/__init__.py +++ b/src/zope/schema/__init__.py @@ -21,6 +21,7 @@ from zope.schema._field import Bytes from zope.schema._field import BytesLine from zope.schema._field import Choice from zope.schema._field import Collection +from zope.schema._field import Complex from zope.schema._field import Container from zope.schema._field import Date from zope.schema._field import Datetime @@ -32,20 +33,24 @@ from zope.schema._field import Float from zope.schema._field import FrozenSet from zope.schema._field import Id from zope.schema._field import Int +from zope.schema._field import Integral from zope.schema._field import InterfaceField from zope.schema._field import Iterable from zope.schema._field import List from zope.schema._field import Mapping +from zope.schema._field import MinMaxLen from zope.schema._field import MutableMapping from zope.schema._field import MutableSequence -from zope.schema._field import MinMaxLen from zope.schema._field import NativeString from zope.schema._field import NativeStringLine +from zope.schema._field import Number from zope.schema._field import Object from zope.schema._field import Orderable from zope.schema._field import Password -from zope.schema._field import Set +from zope.schema._field import Rational +from zope.schema._field import Real from zope.schema._field import Sequence +from zope.schema._field import Set from zope.schema._field import SourceText from zope.schema._field import Text from zope.schema._field import TextLine @@ -77,6 +82,7 @@ __all__ = [ 'BytesLine', 'Choice', 'Collection', + 'Complex', 'Container', 'Date', 'Datetime', @@ -88,6 +94,7 @@ __all__ = [ 'FrozenSet', 'Id', 'Int', + 'Integral', 'InterfaceField', 'Iterable', 'List', @@ -97,9 +104,12 @@ __all__ = [ 'MinMaxLen', 'NativeString', 'NativeStringLine', + 'Number', 'Object', 'Orderable', 'Password', + 'Rational', + 'Real', 'Set', 'Sequence', 'SourceText', diff --git a/src/zope/schema/_bootstrapfields.py b/src/zope/schema/_bootstrapfields.py index 6d613b4..5904068 100644 --- a/src/zope/schema/_bootstrapfields.py +++ b/src/zope/schema/_bootstrapfields.py @@ -15,6 +15,11 @@ """ __docformat__ = 'restructuredtext' +import decimal +import fractions +import numbers +from math import isinf + from zope.interface import Attribute from zope.interface import providedBy from zope.interface import implementer @@ -359,21 +364,24 @@ class Text(MinMaxLen, Field): def fromUnicode(self, str): """ + >>> from zope.schema.interfaces import WrongType + >>> from zope.schema.interfaces import ConstraintNotSatisfied >>> from zope.schema import Text + >>> from zope.schema._compat import text_type >>> t = Text(constraint=lambda v: 'x' in v) - >>> t.fromUnicode(b"foo x spam") + >>> t.fromUnicode(b"foo x spam") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... - WrongType: ('foo x spam', <type 'unicode'>, '') + zope.schema._bootstrapinterfaces.WrongType: ('foo x spam', <type 'unicode'>, '') >>> result = t.fromUnicode(u"foo x spam") >>> isinstance(result, bytes) False >>> str(result) 'foo x spam' - >>> t.fromUnicode(u"foo spam") + >>> t.fromUnicode(u"foo spam") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... - ConstraintNotSatisfied: (u'foo spam', '') + zope.schema._bootstrapinterfaces.ConstraintNotSatisfied: (u'foo spam', '') """ self.validate(str) return str @@ -453,32 +461,197 @@ class Bool(Field): self.validate(v) return v +class InvalidNumberLiteral(ValueError, ValidationError): + """Invalid number literal.""" + +@implementer(IFromUnicode) +class Number(Orderable, Field): + """ + A field representing a :class:`numbers.Number` and implementing + :class:`zope.schema.interfaces.INumber`. + + The :meth:`fromUnicode` method will attempt to use the smallest or + strictest possible type to represent incoming strings:: + + >>> from zope.schema._bootstrapfields import Number + >>> f = Number() + >>> f.fromUnicode("1") + 1 + >>> f.fromUnicode("125.6") + 125.6 + >>> f.fromUnicode("1+0j") + (1+0j) + >>> f.fromUnicode("1/2") + Fraction(1, 2) + >>> f.fromUnicode(str(2**31234) + '.' + str(2**256)) # doctest: +ELLIPSIS + Decimal('234...936') + >>> f.fromUnicode("not a number") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidNumberLiteral: Invalid literal for Decimal: 'not a number' + + .. versionadded:: 4.6.0 + """ + _type = numbers.Number + + # An ordered sequence of conversion routines. These should accept + # a string and produce an object that is an instance of `_type`, or raise + # a ValueError. The order should be most specific/strictest towards least + # restrictive (in other words, lowest in the numeric tower towards highest). + # We break this rule with fractions, though: a floating point number is + # more generally useful and expected than a fraction, so we attempt to parse + # as a float before a fraction. + _unicode_converters = (int, float, fractions.Fraction, complex, decimal.Decimal) + + # The type of error we will raise if all conversions fail. + _validation_error = InvalidNumberLiteral + + def fromUnicode(self, value): + last_exc = None + for converter in self._unicode_converters: + try: + val = converter(value) + if converter is float and isinf(val) and decimal.Decimal in self._unicode_converters: + # Pass this on to decimal, if we're allowed + val = decimal.Decimal(value) + except (ValueError, decimal.InvalidOperation) as e: + last_exc = e + else: + self.validate(val) + return val + try: + raise self._validation_error(*last_exc.args).with_field_and_value(self, value) + finally: + last_exc = None + + +class Complex(Number): + """ + A field representing a :class:`numbers.Complex` and implementing + :class:`zope.schema.interfaces.IComplex`. + + The :meth:`fromUnicode` method is like that for :class:`Number`, + but doesn't allow Decimals:: + + >>> from zope.schema._bootstrapfields import Complex + >>> f = Complex() + >>> f.fromUnicode("1") + 1 + >>> f.fromUnicode("125.6") + 125.6 + >>> f.fromUnicode("1+0j") + (1+0j) + >>> f.fromUnicode("1/2") + Fraction(1, 2) + >>> f.fromUnicode(str(2**31234) + '.' + str(2**256)) # doctest: +ELLIPSIS + inf + >>> f.fromUnicode("not a number") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidNumberLiteral: Invalid literal for Decimal: 'not a number' + + .. versionadded:: 4.6.0 + """ + _type = numbers.Complex + _unicode_converters = (int, float, complex, fractions.Fraction) + + +class Real(Complex): + """ + A field representing a :class:`numbers.Real` and implementing + :class:`zope.schema.interfaces.IReal`. + + The :meth:`fromUnicode` method is like that for :class:`Complex`, + but doesn't allow Decimals or complex numbers:: + + >>> from zope.schema._bootstrapfields import Real + >>> f = Real() + >>> f.fromUnicode("1") + 1 + >>> f.fromUnicode("125.6") + 125.6 + >>> f.fromUnicode("1+0j") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidNumberLiteral: Invalid literal for Fraction: '1+0j' + >>> f.fromUnicode("1/2") + Fraction(1, 2) + >>> f.fromUnicode(str(2**31234) + '.' + str(2**256)) # doctest: +ELLIPSIS + inf + >>> f.fromUnicode("not a number") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidNumberLiteral: Invalid literal for Decimal: 'not a number' + + .. versionadded:: 4.6.0 + """ + _type = numbers.Real + _unicode_converters = (int, float, fractions.Fraction) + + +class Rational(Real): + """ + A field representing a :class:`numbers.Rational` and implementing + :class:`zope.schema.interfaces.IRational`. + + The :meth:`fromUnicode` method is like that for :class:`Real`, + but does not allow arbitrary floating point numbers:: + + >>> from zope.schema._bootstrapfields import Rational + >>> f = Rational() + >>> f.fromUnicode("1") + 1 + >>> f.fromUnicode("1/2") + Fraction(1, 2) + >>> f.fromUnicode("125.6") + Fraction(628, 5) + >>> f.fromUnicode("1+0j") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidNumberLiteral: Invalid literal for Fraction: '1+0j' + >>> f.fromUnicode(str(2**31234) + '.' + str(2**256)) # doctest: +ELLIPSIS + Fraction(777..., 330...) + >>> f.fromUnicode("not a number") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidNumberLiteral: Invalid literal for Decimal: 'not a number' + + .. versionadded:: 4.6.0 + """ + _type = numbers.Rational + _unicode_converters = (int, fractions.Fraction) + + class InvalidIntLiteral(ValueError, ValidationError): """Invalid int literal.""" -@implementer(IFromUnicode) -class Int(Orderable, Field): - """A field representing an Integer.""" - _type = integer_types +class Integral(Rational): + """ + A field representing a :class:`numbers.Integral` and implementing + :class:`zope.schema.interfaces.IIntegral`. - def __init__(self, *args, **kw): - super(Int, self).__init__(*args, **kw) + The :meth:`fromUnicode` method only allows integral values:: - def fromUnicode(self, str): - """ - >>> from zope.schema._bootstrapfields import Int - >>> f = Int() + >>> from zope.schema._bootstrapfields import Integral + >>> f = Integral() >>> f.fromUnicode("125") 125 >>> f.fromUnicode("125.6") #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidIntLiteral: invalid literal for int(): 125.6 - """ - try: - v = int(str) - except ValueError as v: - raise InvalidIntLiteral(*v.args).with_field_and_value(self, str) - self.validate(v) - return v + + .. versionadded:: 4.6.0 + """ + _type = numbers.Integral + _unicode_converters = (int,) + _validation_error = InvalidIntLiteral + + +class Int(Integral): + """A field representing a native integer type. and implementing + :class:`zope.schema.interfaces.IInt`. + """ + _type = integer_types + _unicode_converters = (int,) diff --git a/src/zope/schema/_field.py b/src/zope/schema/_field.py index d80ee3c..5ec0d1b 100644 --- a/src/zope/schema/_field.py +++ b/src/zope/schema/_field.py @@ -46,6 +46,8 @@ from zope.schema.interfaces import IBytes from zope.schema.interfaces import IBytesLine from zope.schema.interfaces import IChoice from zope.schema.interfaces import ICollection +from zope.schema.interfaces import IComplex +from zope.schema.interfaces import IContainer from zope.schema.interfaces import IContextSourceBinder from zope.schema.interfaces import IDate from zope.schema.interfaces import IDatetime @@ -57,7 +59,9 @@ from zope.schema.interfaces import IFloat from zope.schema.interfaces import IFromUnicode from zope.schema.interfaces import IFrozenSet from zope.schema.interfaces import IId +from zope.schema.interfaces import IIterable from zope.schema.interfaces import IInt +from zope.schema.interfaces import IIntegral from zope.schema.interfaces import IInterfaceField from zope.schema.interfaces import IList from zope.schema.interfaces import IMinMaxLen @@ -65,7 +69,10 @@ from zope.schema.interfaces import IMapping from zope.schema.interfaces import IMutableMapping from zope.schema.interfaces import IMutableSequence from zope.schema.interfaces import IObject +from zope.schema.interfaces import INumber from zope.schema.interfaces import IPassword +from zope.schema.interfaces import IReal +from zope.schema.interfaces import IRational from zope.schema.interfaces import ISet from zope.schema.interfaces import ISequence from zope.schema.interfaces import ISource @@ -91,6 +98,7 @@ from zope.schema.interfaces import InvalidDottedName from zope.schema.interfaces import ConstraintNotSatisfied from zope.schema._bootstrapfields import Field +from zope.schema._bootstrapfields import Complex from zope.schema._bootstrapfields import Container # API import for __init__ from zope.schema._bootstrapfields import Iterable from zope.schema._bootstrapfields import Orderable @@ -98,7 +106,11 @@ from zope.schema._bootstrapfields import Text from zope.schema._bootstrapfields import TextLine from zope.schema._bootstrapfields import Bool from zope.schema._bootstrapfields import Int +from zope.schema._bootstrapfields import Integral +from zope.schema._bootstrapfields import Number from zope.schema._bootstrapfields import Password +from zope.schema._bootstrapfields import Rational +from zope.schema._bootstrapfields import Real from zope.schema._bootstrapfields import MinMaxLen from zope.schema._bootstrapfields import _NotGiven from zope.schema.fieldproperty import FieldProperty @@ -113,9 +125,6 @@ from zope.schema._compat import binary_type from zope.schema._compat import PY3 from zope.schema._compat import make_binary -# pep 8 friendlyness -Container - # Fix up bootstrap field types Field.title = FieldProperty(IField['title']) Field.description = FieldProperty(IField['description']) @@ -132,6 +141,14 @@ classImplements(TextLine, ITextLine) classImplements(Password, IPassword) classImplements(Bool, IBool) classImplements(Bool, IFromUnicode) +classImplements(Iterable, IIterable) +classImplements(Container, IContainer) + +classImplements(Number, INumber) +classImplements(Complex, IComplex) +classImplements(Real, IReal) +classImplements(Rational, IRational) +classImplements(Integral, IIntegral) classImplements(Int, IInt) @@ -203,48 +220,80 @@ class InvalidFloatLiteral(ValueError, ValidationError): @implementer(IFloat, IFromUnicode) -class Float(Orderable, Field): - __doc__ = IFloat.__doc__ +class Float(Real): + """ + A field representing a native :class:`float` and implementing + :class:`zope.schema.interfaces.IFloat`. + + The class :class:`zope.schema.Real` is a more general version, + accepting floats, integers, and fractions. + + The :meth:`fromUnicode` method only accepts values that can be parsed + by the ``float`` constructor:: + + >>> from zope.schema._field import Float + >>> f = Float() + >>> f.fromUnicode("1") + 1.0 + >>> f.fromUnicode("125.6") + 125.6 + >>> f.fromUnicode("1+0j") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidFloatLiteral: Invalid literal for float(): 1+0j + >>> f.fromUnicode("1/2") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidFloatLiteral: invalid literal for float(): 1/2 + >>> f.fromUnicode(str(2**31234) + '.' + str(2**256)) # doctest: +ELLIPSIS + inf + >>> f.fromUnicode("not a number") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidFloatLiteral: could not convert string to float: not a number + """ _type = float - - def __init__(self, *args, **kw): - super(Float, self).__init__(*args, **kw) - - def fromUnicode(self, uc): - """ See IFromUnicode. - """ - try: - v = float(uc) - except ValueError as v: - raise InvalidFloatLiteral(*v.args).with_field_and_value(self, uc) - self.validate(v) - return v + _unicode_converters = (float,) + _validation_error = InvalidFloatLiteral class InvalidDecimalLiteral(ValueError, ValidationError): - - def __init__(self, literal): - super(InvalidDecimalLiteral, self).__init__( - "invalid literal for Decimal(): %s" % literal) + "Raised by decimal fields" @implementer(IDecimal, IFromUnicode) -class Decimal(Orderable, Field): - __doc__ = IDecimal.__doc__ +class Decimal(Number): + """ + A field representing a native :class:`decimal.Decimal` and implementing + :class:`zope.schema.interfaces.IDecimal`. + + The :meth:`fromUnicode` method only accepts values that can be parsed + by the ``Decimal`` constructor:: + + >>> from zope.schema._field import Decimal + >>> f = Decimal() + >>> f.fromUnicode("1") + Decimal('1') + >>> f.fromUnicode("125.6") + Decimal('125.6') + >>> f.fromUnicode("1+0j") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidDecimalLiteral: Invalid literal for Decimal(): 1+0j + >>> f.fromUnicode("1/2") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidDecimalLiteral: Invalid literal for Decimal(): 1/2 + >>> f.fromUnicode(str(2**31234) + '.' + str(2**256)) # doctest: +ELLIPSIS + Decimal('2349...936') + >>> f.fromUnicode("not a number") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidDecimalLiteral: could not convert string to float: not a number + """ _type = decimal.Decimal - - def __init__(self, *args, **kw): - super(Decimal, self).__init__(*args, **kw) - - def fromUnicode(self, uc): - """ See IFromUnicode. - """ - try: - v = decimal.Decimal(uc) - except decimal.InvalidOperation: - raise InvalidDecimalLiteral(uc).with_field_and_value(self, uc) - self.validate(v) - return v + _unicode_converters = (decimal.Decimal,) + _validation_error = InvalidDecimalLiteral @implementer(IDatetime) @@ -505,9 +554,10 @@ def _validate_sequence(value_type, value, errors=None): To validate a sequence of various values: - >>> errors = _validate_sequence(field, (b'foo', u'bar', 1)) - >>> errors # XXX assumes Python2 reprs - [WrongType('foo', <type 'unicode'>, ''), WrongType(1, <type 'unicode'>, '')] + >>> from zope.schema._compat import text_type + >>> errors = _validate_sequence(field, (bytearray(b'foo'), u'bar', 1)) + >>> errors + [WrongType(bytearray(b'foo'), <...>, ''), WrongType(1, <...>, '')] The only valid value in the sequence is the second item. The others generated errors. @@ -516,8 +566,8 @@ def _validate_sequence(value_type, value, errors=None): for a new sequence: >>> errors = _validate_sequence(field, (2, u'baz'), errors) - >>> errors # XXX assumes Python2 reprs - [WrongType('foo', <type 'unicode'>, ''), WrongType(1, <type 'unicode'>, ''), WrongType(2, <type 'unicode'>, '')] + >>> errors + [WrongType(bytearray(b'foo'), <...>, ''), WrongType(1, <...>, ''), WrongType(2, <...>, '')] """ if errors is None: diff --git a/src/zope/schema/interfaces.py b/src/zope/schema/interfaces.py index a1c7f35..2868b71 100644 --- a/src/zope/schema/interfaces.py +++ b/src/zope/schema/interfaces.py @@ -25,6 +25,11 @@ from zope.schema._bootstrapfields import Field from zope.schema._bootstrapfields import Text from zope.schema._bootstrapfields import TextLine from zope.schema._bootstrapfields import Bool +from zope.schema._bootstrapfields import Number +from zope.schema._bootstrapfields import Complex +from zope.schema._bootstrapfields import Rational +from zope.schema._bootstrapfields import Real +from zope.schema._bootstrapfields import Integral from zope.schema._bootstrapfields import Int from zope.schema._bootstrapinterfaces import StopValidation from zope.schema._bootstrapinterfaces import ValidationError @@ -364,9 +369,152 @@ else: # pragma: no cover class IPassword(ITextLine): "Field containing a unicode string without newlines that is a password." +### +# Numbers +### -class IInt(IMinMax, IField): - """Field containing an Integer Value.""" +## +# Abstract numbers +## + +class INumber(IMinMax, IField): + """ + Field containing a generic number: :class:`numbers.Number`. + + .. seealso:: :class:`zope.schema.Number` + .. versionadded:: 4.6.0 + """ + min = Number( + title=_("Start of the range"), + required=False, + default=None + ) + + max = Number( + title=_("End of the range (including the value itself)"), + required=False, + default=None + ) + + default = Number( + title=_("Default Value"), + description=_("""The field default value may be None or a legal + field value""") + ) + + +class IComplex(INumber): + """ + Field containing a complex number: :class:`numbers.Complex`. + + .. seealso:: :class:`zope.schema.Real` + .. versionadded:: 4.6.0 + """ + min = Complex( + title=_("Start of the range"), + required=False, + default=None + ) + + max = Complex( + title=_("End of the range (including the value itself)"), + required=False, + default=None + ) + + default = Complex( + title=_("Default Value"), + description=_("""The field default value may be None or a legal + field value""") + ) + + +class IReal(IComplex): + """ + Field containing a real number: :class:`numbers.IReal`. + + .. seealso:: :class:`zope.schema.Real` + .. versionadded:: 4.6.0 + """ + min = Real( + title=_("Start of the range"), + required=False, + default=None + ) + + max = Real( + title=_("End of the range (including the value itself)"), + required=False, + default=None + ) + + default = Real( + title=_("Default Value"), + description=_("""The field default value may be None or a legal + field value""") + ) + +class IRational(IReal): + """ + Field containing a rational number: :class:`numbers.IRational`. + + .. seealso:: :class:`zope.schema.Rational` + .. versionadded:: 4.6.0 + """ + + min = Rational( + title=_("Start of the range"), + required=False, + default=None + ) + + max = Rational( + title=_("End of the range (including the value itself)"), + required=False, + default=None + ) + + default = Rational( + title=_("Default Value"), + description=_("""The field default value may be None or a legal + field value""") + ) + +class IIntegral(IRational): + """ + Field containing an integral number: class:`numbers.Integral`. + + .. seealso:: :class:`zope.schema.Integral` + .. versionadded:: 4.6.0 + """ + min = Integral( + title=_("Start of the range"), + required=False, + default=None + ) + + max = Integral( + title=_("End of the range (including the value itself)"), + required=False, + default=None + ) + + default = Integral( + title=_("Default Value"), + description=_("""The field default value may be None or a legal + field value""") + ) +## +# Concrete numbers +## + +class IInt(IIntegral): + """ + Field containing exactly the native class :class:`int` (or, on + Python 2, ``long``). + + .. seealso:: :class:`zope.schema.Int` + """ min = Int( title=_("Start of the range"), @@ -387,13 +535,23 @@ class IInt(IMinMax, IField): ) -class IFloat(IMinMax, IField): - """Field containing a Float.""" +class IFloat(IReal): + """ + Field containing exactly the native class :class:`float`. + + :class:`IReal` is a more general interface, allowing all of + floats, ints, and fractions. + + .. seealso:: :class:`zope.schema.Float` + """ -class IDecimal(IMinMax, IField): - """Field containing a Decimal.""" +class IDecimal(INumber): + """Field containing a :class:`decimal.Decimal`""" +### +# End numbers +### class IDatetime(IMinMax, IField): """Field containing a datetime."""
Add fields/interfaces for the numeric tower Similar to #11 and its implementation in #47 where we added fields and interfaces for `collections.Sequence`, `collections.MutableSequence`, `collections.Mapping` and`collections.MutableMapping`, I think it makes sense to have fields and interfaces for `numbers.Number`, `numbers.Real`, `numbers.Rational` and `numbers.Integral`. In our own usage, we commonly parse data from JSON, and depending on the browser/client, we can get floats or ints for the same field, so we use a `Number` field that allows `numbers.Number`. But it's badly specified, what we *really* want is a `numbers.Real` field.
zopefoundation/zope.schema
diff --git a/src/zope/schema/tests/test__bootstrapfields.py b/src/zope/schema/tests/test__bootstrapfields.py index 3e6365a..419378a 100644 --- a/src/zope/schema/tests/test__bootstrapfields.py +++ b/src/zope/schema/tests/test__bootstrapfields.py @@ -11,8 +11,132 @@ # FOR A PARTICULAR PURPOSE. # ############################################################################## +import doctest import unittest +# pylint:disable=protected-access + +class EqualityTestsMixin(object): + + def _getTargetClass(self): + raise NotImplementedError + + def _getTargetInterface(self): + raise NotImplementedError + + def _makeOne(self, *args, **kwargs): + return self._makeOneFromClass(self._getTargetClass(), + *args, + **kwargs) + + def _makeOneFromClass(self, cls, *args, **kwargs): + return cls(*args, **kwargs) + + def test_class_conforms_to_iface(self): + from zope.interface.verify import verifyClass + cls = self._getTargetClass() + __traceback_info__ = cls + verifyClass(self._getTargetInterface(), cls) + return verifyClass + + def test_instance_conforms_to_iface(self): + from zope.interface.verify import verifyObject + instance = self._makeOne() + __traceback_info__ = instance + verifyObject(self._getTargetInterface(), instance) + return verifyObject + + def test_is_hashable(self): + field = self._makeOne() + hash(field) # doesn't raise + + def test_equal_instances_have_same_hash(self): + # Equal objects should have equal hashes + field1 = self._makeOne() + field2 = self._makeOne() + self.assertIsNot(field1, field2) + self.assertEqual(field1, field2) + self.assertEqual(hash(field1), hash(field2)) + + def test_instances_in_different_interfaces_not_equal(self): + from zope import interface + + field1 = self._makeOne() + field2 = self._makeOne() + self.assertEqual(field1, field2) + self.assertEqual(hash(field1), hash(field2)) + + class IOne(interface.Interface): + one = field1 + + class ITwo(interface.Interface): + two = field2 + + self.assertEqual(field1, field1) + self.assertEqual(field2, field2) + self.assertNotEqual(field1, field2) + self.assertNotEqual(hash(field1), hash(field2)) + + def test_hash_across_unequal_instances(self): + # Hash equality does not imply equal objects. + # Our implementation only considers property names, + # not values. That's OK, a dict still does the right thing. + field1 = self._makeOne(title=u'foo') + field2 = self._makeOne(title=u'bar') + self.assertIsNot(field1, field2) + self.assertNotEqual(field1, field2) + self.assertEqual(hash(field1), hash(field2)) + + d = {field1: 42} + self.assertIn(field1, d) + self.assertEqual(42, d[field1]) + self.assertNotIn(field2, d) + with self.assertRaises(KeyError): + d.__getitem__(field2) + + def test___eq___different_type(self): + left = self._makeOne() + + class Derived(self._getTargetClass()): + pass + right = self._makeOneFromClass(Derived) + self.assertNotEqual(left, right) + self.assertTrue(left != right) + + def test___eq___same_type_different_attrs(self): + left = self._makeOne(required=True) + right = self._makeOne(required=False) + self.assertNotEqual(left, right) + self.assertTrue(left != right) + + def test___eq___same_type_same_attrs(self): + left = self._makeOne() + self.assertEqual(left, left) + + right = self._makeOne() + self.assertEqual(left, right) + self.assertFalse(left != right) + + +class OrderableMissingValueMixin(object): + mvm_missing_value = -1 + mvm_default = 0 + + def test_missing_value_no_min_or_max(self): + # We should be able to provide a missing_value without + # also providing a min or max. But note that we must still + # provide a default. + # See https://github.com/zopefoundation/zope.schema/issues/9 + Kind = self._getTargetClass() + self.assertTrue(Kind.min._allow_none) + self.assertTrue(Kind.max._allow_none) + + field = self._makeOne(missing_value=self.mvm_missing_value, + default=self.mvm_default) + self.assertIsNone(field.min) + self.assertIsNone(field.max) + self.assertEqual(self.mvm_missing_value, field.missing_value) + class ValidatedPropertyTests(unittest.TestCase): @@ -142,86 +266,6 @@ class DefaultPropertyTests(unittest.TestCase): self.assertEqual(_called_with, [inst.context]) -class EqualityTestsMixin(object): - - def _getTargetClass(self): - raise NotImplementedError - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_is_hashable(self): - field = self._makeOne() - hash(field) # doesn't raise - - def test_equal_instances_have_same_hash(self): - # Equal objects should have equal hashes - field1 = self._makeOne() - field2 = self._makeOne() - self.assertIsNot(field1, field2) - self.assertEqual(field1, field2) - self.assertEqual(hash(field1), hash(field2)) - - def test_instances_in_different_interfaces_not_equal(self): - from zope import interface - - field1 = self._makeOne() - field2 = self._makeOne() - self.assertEqual(field1, field2) - self.assertEqual(hash(field1), hash(field2)) - - class IOne(interface.Interface): - one = field1 - - class ITwo(interface.Interface): - two = field2 - - self.assertEqual(field1, field1) - self.assertEqual(field2, field2) - self.assertNotEqual(field1, field2) - self.assertNotEqual(hash(field1), hash(field2)) - - def test_hash_across_unequal_instances(self): - # Hash equality does not imply equal objects. - # Our implementation only considers property names, - # not values. That's OK, a dict still does the right thing. - field1 = self._makeOne(title=u'foo') - field2 = self._makeOne(title=u'bar') - self.assertIsNot(field1, field2) - self.assertNotEqual(field1, field2) - self.assertEqual(hash(field1), hash(field2)) - - d = {field1: 42} - self.assertIn(field1, d) - self.assertEqual(42, d[field1]) - self.assertNotIn(field2, d) - with self.assertRaises(KeyError): - d.__getitem__(field2) - - def test___eq___different_type(self): - left = self._makeOne() - - class Derived(self._getTargetClass()): - pass - right = Derived() - self.assertNotEqual(left, right) - self.assertTrue(left != right) - - def test___eq___same_type_different_attrs(self): - left = self._makeOne(required=True) - right = self._makeOne(required=False) - self.assertNotEqual(left, right) - self.assertTrue(left != right) - - def test___eq___same_type_same_attrs(self): - left = self._makeOne() - self.assertEqual(left, left) - - right = self._makeOne() - self.assertEqual(left, right) - self.assertFalse(left != right) - - class FieldTests(EqualityTestsMixin, unittest.TestCase): @@ -229,8 +273,9 @@ class FieldTests(EqualityTestsMixin, from zope.schema._bootstrapfields import Field return Field - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) + def _getTargetInterface(self): + from zope.schema.interfaces import IField + return IField def test_ctor_defaults(self): @@ -432,6 +477,10 @@ class ContainerTests(EqualityTestsMixin, from zope.schema._bootstrapfields import Container return Container + def _getTargetInterface(self): + from zope.schema.interfaces import IContainer + return IContainer + def test_validate_not_required(self): field = self._makeOne(required=False) field.validate(None) @@ -482,6 +531,10 @@ class IterableTests(ContainerTests): from zope.schema._bootstrapfields import Iterable return Iterable + def _getTargetInterface(self): + from zope.schema.interfaces import IIterable + return IIterable + def test__validate_collection_but_not_iterable(self): from zope.schema._bootstrapinterfaces import NotAnIterator itr = self._makeOne() @@ -566,6 +619,10 @@ class TextTests(EqualityTestsMixin, from zope.schema._bootstrapfields import Text return Text + def _getTargetInterface(self): + from zope.schema.interfaces import IText + return IText + def test_ctor_defaults(self): from zope.schema._compat import text_type txt = self._makeOne() @@ -628,15 +685,9 @@ class TextLineTests(EqualityTestsMixin, from zope.schema._field import TextLine return TextLine - def test_class_conforms_to_ITextLine(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import ITextLine - verifyClass(ITextLine, self._getTargetClass()) - - def test_instance_conforms_to_ITextLine(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import ITextLine - verifyObject(ITextLine, self._makeOne()) + return ITextLine def test_validate_wrong_types(self): from zope.schema.interfaces import WrongType @@ -675,14 +726,16 @@ class TextLineTests(EqualityTestsMixin, self.assertEqual(field.constraint(u'abc\ndef'), False) -class PasswordTests(unittest.TestCase): +class PasswordTests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._bootstrapfields import Password return Password - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) + def _getTargetInterface(self): + from zope.schema.interfaces import IPassword + return IPassword def test_set_unchanged(self): klass = self._getTargetClass() @@ -744,6 +797,10 @@ class BoolTests(EqualityTestsMixin, from zope.schema._bootstrapfields import Bool return Bool + def _getTargetInterface(self): + from zope.schema.interfaces import IBool + return IBool + def test_ctor_defaults(self): txt = self._makeOne() self.assertEqual(txt._type, bool) @@ -777,39 +834,87 @@ class BoolTests(EqualityTestsMixin, self.assertEqual(txt.fromUnicode(u'true'), True) -class OrderableMissingValueMixin(object): +class NumberTests(EqualityTestsMixin, + OrderableMissingValueMixin, + unittest.TestCase): - mvm_missing_value = -1 - mvm_default = 0 + def _getTargetClass(self): + from zope.schema._bootstrapfields import Number + return Number - def test_missing_value_no_min_or_max(self): - # We should be able to provide a missing_value without - # also providing a min or max. But note that we must still - # provide a default. - # See https://github.com/zopefoundation/zope.schema/issues/9 - Kind = self._getTargetClass() - self.assertTrue(Kind.min._allow_none) - self.assertTrue(Kind.max._allow_none) + def _getTargetInterface(self): + from zope.schema.interfaces import INumber + return INumber - field = self._makeOne(missing_value=self.mvm_missing_value, - default=self.mvm_default) - self.assertIsNone(field.min) - self.assertIsNone(field.max) - self.assertEqual(self.mvm_missing_value, field.missing_value) + def test_class_conforms_to_iface(self): + from zope.schema._bootstrapinterfaces import IFromUnicode + verifyClass = super(NumberTests, self).test_class_conforms_to_iface() + verifyClass(IFromUnicode, self._getTargetClass()) + + def test_instance_conforms_to_iface(self): + from zope.schema._bootstrapinterfaces import IFromUnicode + verifyObject = super(NumberTests, self).test_instance_conforms_to_iface() + verifyObject(IFromUnicode, self._makeOne()) -class IntTests(EqualityTestsMixin, - OrderableMissingValueMixin, - unittest.TestCase): +class ComplexTests(NumberTests): def _getTargetClass(self): - from zope.schema._bootstrapfields import Int - return Int + from zope.schema._bootstrapfields import Complex + return Complex - def test_ctor_defaults(self): - from zope.schema._compat import integer_types - txt = self._makeOne() - self.assertEqual(txt._type, integer_types) + def _getTargetInterface(self): + from zope.schema.interfaces import IComplex + return IComplex + +class RealTests(NumberTests): + + def _getTargetClass(self): + from zope.schema._bootstrapfields import Real + return Real + + def _getTargetInterface(self): + from zope.schema.interfaces import IReal + return IReal + + def test_ctor_real_min_max(self): + from zope.schema.interfaces import WrongType + from zope.schema.interfaces import TooSmall + from zope.schema.interfaces import TooBig + from fractions import Fraction + + with self.assertRaises(WrongType): + self._makeOne(min='') + with self.assertRaises(WrongType): + self._makeOne(max='') + + field = self._makeOne(min=Fraction(1, 2), max=2) + field.validate(1.0) + field.validate(2.0) + self.assertRaises(TooSmall, field.validate, 0) + self.assertRaises(TooSmall, field.validate, 0.4) + self.assertRaises(TooBig, field.validate, 2.1) + +class RationalTests(NumberTests): + + def _getTargetClass(self): + from zope.schema._bootstrapfields import Rational + return Rational + + def _getTargetInterface(self): + from zope.schema.interfaces import IRational + return IRational + + +class IntegralTests(RationalTests): + + def _getTargetClass(self): + from zope.schema._bootstrapfields import Integral + return Integral + + def _getTargetInterface(self): + from zope.schema.interfaces import IIntegral + return IIntegral def test_validate_not_required(self): field = self._makeOne(required=False) @@ -870,6 +975,22 @@ class IntTests(EqualityTestsMixin, self.assertEqual(txt.fromUnicode(u'-1'), -1) +class IntTests(IntegralTests): + + def _getTargetClass(self): + from zope.schema._bootstrapfields import Int + return Int + + def _getTargetInterface(self): + from zope.schema.interfaces import IInt + return IInt + + def test_ctor_defaults(self): + from zope.schema._compat import integer_types + txt = self._makeOne() + self.assertEqual(txt._type, integer_types) + + class DummyInst(object): missing_value = object() @@ -879,3 +1000,14 @@ class DummyInst(object): def validate(self, value): if self._exc is not None: raise self._exc() + + +def test_suite(): + import zope.schema._bootstrapfields + from zope.testing.renormalizing import IGNORE_EXCEPTION_MODULE_IN_PYTHON2 + suite = unittest.defaultTestLoader.loadTestsFromName(__name__) + suite.addTests(doctest.DocTestSuite( + zope.schema._bootstrapfields, + optionflags=doctest.ELLIPSIS|IGNORE_EXCEPTION_MODULE_IN_PYTHON2 + )) + return suite diff --git a/src/zope/schema/tests/test__field.py b/src/zope/schema/tests/test__field.py index c035e85..cb0e127 100644 --- a/src/zope/schema/tests/test__field.py +++ b/src/zope/schema/tests/test__field.py @@ -13,9 +13,12 @@ ############################################################################## import datetime import decimal +import doctest import unittest from zope.schema.tests.test__bootstrapfields import OrderableMissingValueMixin +from zope.schema.tests.test__bootstrapfields import EqualityTestsMixin + # pylint:disable=protected-access # pylint:disable=too-many-lines @@ -23,24 +26,15 @@ from zope.schema.tests.test__bootstrapfields import OrderableMissingValueMixin # pylint:disable=no-member # pylint:disable=blacklisted-name -class BytesTests(unittest.TestCase): +class BytesTests(EqualityTestsMixin, unittest.TestCase): def _getTargetClass(self): from zope.schema._field import Bytes return Bytes - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IBytes(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IBytes - verifyClass(IBytes, self._getTargetClass()) - - def test_instance_conforms_to_IBytes(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IBytes - verifyObject(IBytes, self._makeOne()) + return IBytes def test_validate_wrong_types(self): from zope.schema.interfaces import WrongType @@ -89,24 +83,16 @@ class BytesTests(unittest.TestCase): self.assertEqual(byt.fromUnicode(u'DEADBEEF'), b'DEADBEEF') -class ASCIITests(unittest.TestCase): +class ASCIITests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._field import ASCII return ASCII - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IASCII(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IASCII - verifyClass(IASCII, self._getTargetClass()) - - def test_instance_conforms_to_IASCII(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IASCII - verifyObject(IASCII, self._makeOne()) + return IASCII def test_validate_wrong_types(self): from zope.schema.interfaces import WrongType @@ -142,24 +128,16 @@ class ASCIITests(unittest.TestCase): asc._validate(chr(i)) # doesn't raise -class BytesLineTests(unittest.TestCase): +class BytesLineTests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._field import BytesLine return BytesLine - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IBytesLine(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IBytesLine - verifyClass(IBytesLine, self._getTargetClass()) - - def test_instance_conforms_to_IBytesLine(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IBytesLine - verifyObject(IBytesLine, self._makeOne()) + return IBytesLine def test_validate_wrong_types(self): from zope.schema.interfaces import WrongType @@ -202,24 +180,16 @@ class BytesLineTests(unittest.TestCase): self.assertEqual(field.constraint(b'abc\ndef'), False) -class ASCIILineTests(unittest.TestCase): +class ASCIILineTests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._field import ASCIILine return ASCIILine - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IASCIILine(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IASCIILine - verifyClass(IASCIILine, self._getTargetClass()) - - def test_instance_conforms_to_IASCIILine(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IASCIILine - verifyObject(IASCIILine, self._makeOne()) + return IASCIILine def test_validate_wrong_types(self): from zope.schema.interfaces import WrongType @@ -262,7 +232,7 @@ class ASCIILineTests(unittest.TestCase): self.assertEqual(field.constraint('abc\ndef'), False) -class FloatTests(OrderableMissingValueMixin, +class FloatTests(OrderableMissingValueMixin, EqualityTestsMixin, unittest.TestCase): mvm_missing_value = -1.0 @@ -272,18 +242,9 @@ class FloatTests(OrderableMissingValueMixin, from zope.schema._field import Float return Float - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IFloat(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IFloat - verifyClass(IFloat, self._getTargetClass()) - - def test_instance_conforms_to_IFloat(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IFloat - verifyObject(IFloat, self._makeOne()) + return IFloat def test_validate_not_required(self): field = self._makeOne(required=False) @@ -343,7 +304,7 @@ class FloatTests(OrderableMissingValueMixin, self.assertEqual(flt.fromUnicode(u'1.23e6'), 1230000.0) -class DecimalTests(OrderableMissingValueMixin, +class DecimalTests(OrderableMissingValueMixin, EqualityTestsMixin, unittest.TestCase): mvm_missing_value = decimal.Decimal("-1") @@ -353,18 +314,9 @@ class DecimalTests(OrderableMissingValueMixin, from zope.schema._field import Decimal return Decimal - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IDecimal(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IDecimal - verifyClass(IDecimal, self._getTargetClass()) - - def test_instance_conforms_to_IDecimal(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IDecimal - verifyObject(IDecimal, self._makeOne()) + return IDecimal def test_validate_not_required(self): field = self._makeOne(required=False) @@ -443,7 +395,7 @@ class DecimalTests(OrderableMissingValueMixin, self.assertEqual(flt.fromUnicode(u'12345.6'), Decimal('12345.6')) -class DatetimeTests(OrderableMissingValueMixin, +class DatetimeTests(OrderableMissingValueMixin, EqualityTestsMixin, unittest.TestCase): mvm_missing_value = datetime.datetime.now() @@ -453,18 +405,9 @@ class DatetimeTests(OrderableMissingValueMixin, from zope.schema._field import Datetime return Datetime - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IDatetime(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IDatetime - verifyClass(IDatetime, self._getTargetClass()) - - def test_instance_conforms_to_IDatetime(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IDatetime - verifyObject(IDatetime, self._makeOne()) + return IDatetime def test_validate_wrong_types(self): from datetime import date @@ -529,7 +472,7 @@ class DatetimeTests(OrderableMissingValueMixin, self.assertRaises(TooBig, field.validate, d5) -class DateTests(OrderableMissingValueMixin, +class DateTests(OrderableMissingValueMixin, EqualityTestsMixin, unittest.TestCase): mvm_missing_value = datetime.date.today() @@ -539,18 +482,9 @@ class DateTests(OrderableMissingValueMixin, from zope.schema._field import Date return Date - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IDate(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IDate - verifyClass(IDate, self._getTargetClass()) - - def test_instance_conforms_to_IDate(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IDate - verifyObject(IDate, self._makeOne()) + return IDate def test_validate_wrong_types(self): from zope.schema.interfaces import WrongType @@ -625,7 +559,7 @@ class DateTests(OrderableMissingValueMixin, self.assertRaises(TooBig, field.validate, d5) -class TimedeltaTests(OrderableMissingValueMixin, +class TimedeltaTests(OrderableMissingValueMixin, EqualityTestsMixin, unittest.TestCase): mvm_missing_value = datetime.timedelta(minutes=15) @@ -635,18 +569,9 @@ class TimedeltaTests(OrderableMissingValueMixin, from zope.schema._field import Timedelta return Timedelta - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_ITimedelta(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import ITimedelta - verifyClass(ITimedelta, self._getTargetClass()) - - def test_instance_conforms_to_ITimedelta(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import ITimedelta - verifyObject(ITimedelta, self._makeOne()) + return ITimedelta def test_validate_not_required(self): from datetime import timedelta @@ -699,7 +624,7 @@ class TimedeltaTests(OrderableMissingValueMixin, self.assertRaises(TooBig, field.validate, t5) -class TimeTests(OrderableMissingValueMixin, +class TimeTests(OrderableMissingValueMixin, EqualityTestsMixin, unittest.TestCase): mvm_missing_value = datetime.time(12, 15, 37) @@ -709,18 +634,9 @@ class TimeTests(OrderableMissingValueMixin, from zope.schema._field import Time return Time - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_ITime(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import ITime - verifyClass(ITime, self._getTargetClass()) - - def test_instance_conforms_to_ITime(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import ITime - verifyObject(ITime, self._makeOne()) + return ITime def test_validate_not_required(self): from datetime import time @@ -774,7 +690,8 @@ class TimeTests(OrderableMissingValueMixin, self.assertRaises(TooBig, field.validate, t5) -class ChoiceTests(unittest.TestCase): +class ChoiceTests(EqualityTestsMixin, + unittest.TestCase): def setUp(self): from zope.schema.vocabulary import _clear @@ -788,27 +705,31 @@ class ChoiceTests(unittest.TestCase): from zope.schema._field import Choice return Choice - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) + from zope.schema.vocabulary import SimpleVocabulary + # SimpleVocabulary uses identity semantics for equality + _default_vocabulary = SimpleVocabulary.fromValues([1, 2, 3]) - def test_class_conforms_to_IChoice(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IChoice - verifyClass(IChoice, self._getTargetClass()) + def _makeOneFromClass(self, cls, *args, **kwargs): + if (not args + and 'vocabulary' not in kwargs + and 'values' not in kwargs + and 'source' not in kwargs): + kwargs['vocabulary'] = self._default_vocabulary + return super(ChoiceTests, self)._makeOneFromClass(cls, *args, **kwargs) - def test_instance_conforms_to_IChoice(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IChoice - verifyObject(IChoice, self._makeOne(values=[1, 2, 3])) + return IChoice + def test_ctor_wo_values_vocabulary_or_source(self): - self.assertRaises(ValueError, self._makeOne) + self.assertRaises(ValueError, self._getTargetClass()) def test_ctor_invalid_vocabulary(self): - self.assertRaises(ValueError, self._makeOne, vocabulary=object()) + self.assertRaises(ValueError, self._getTargetClass(), vocabulary=object()) def test_ctor_invalid_source(self): - self.assertRaises(ValueError, self._makeOne, source=object()) + self.assertRaises(ValueError, self._getTargetClass(), source=object()) def test_ctor_both_vocabulary_and_source(self): self.assertRaises( @@ -1024,24 +945,16 @@ class ChoiceTests(unittest.TestCase): self.assertRaises(ConstraintNotSatisfied, clone._validate, 42) -class URITests(unittest.TestCase): +class URITests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._field import URI return URI - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IURI(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IURI - verifyClass(IURI, self._getTargetClass()) - - def test_instance_conforms_to_IURI(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IURI - verifyObject(IURI, self._makeOne()) + return IURI def test_validate_wrong_types(self): from zope.schema.interfaces import WrongType @@ -1101,24 +1014,16 @@ class URITests(unittest.TestCase): field.fromUnicode, u'http://example.com/\nDAV:') -class DottedNameTests(unittest.TestCase): +class DottedNameTests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._field import DottedName return DottedName - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IDottedName(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IDottedName - verifyClass(IDottedName, self._getTargetClass()) - - def test_instance_conforms_to_IDottedName(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IDottedName - verifyObject(IDottedName, self._makeOne()) + return IDottedName def test_ctor_defaults(self): dotted = self._makeOne() @@ -1219,24 +1124,16 @@ class DottedNameTests(unittest.TestCase): field.fromUnicode, u'http://example.com/\nDAV:') -class IdTests(unittest.TestCase): +class IdTests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._field import Id return Id - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IId(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IId - verifyClass(IId, self._getTargetClass()) - - def test_instance_conforms_to_IId(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IId - verifyObject(IId, self._makeOne()) + return IId def test_validate_wrong_types(self): from zope.schema.interfaces import WrongType @@ -1310,24 +1207,16 @@ class IdTests(unittest.TestCase): field.fromUnicode, u'http://example.com/\nDAV:') -class InterfaceFieldTests(unittest.TestCase): +class InterfaceFieldTests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._field import InterfaceField return InterfaceField - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_IInterfaceField(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IInterfaceField - verifyClass(IInterfaceField, self._getTargetClass()) - - def test_instance_conforms_to_IInterfaceField(self): - from zope.interface.verify import verifyObject + def _getTargetInterface(self): from zope.schema.interfaces import IInterfaceField - verifyObject(IInterfaceField, self._makeOne()) + return IInterfaceField def test_validate_wrong_types(self): from datetime import date @@ -1375,7 +1264,8 @@ class InterfaceFieldTests(unittest.TestCase): self.assertRaises(RequiredMissing, field.validate, None) -class CollectionTests(unittest.TestCase): +class CollectionTests(EqualityTestsMixin, + unittest.TestCase): _DEFAULT_UNIQUE = False @@ -1387,19 +1277,8 @@ class CollectionTests(unittest.TestCase): from zope.schema.interfaces import ICollection return ICollection - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - _makeCollection = list - def test_class_conforms_to_iface(self): - from zope.interface.verify import verifyClass - verifyClass(self._getTargetInterface(), self._getTargetClass()) - - def test_instance_conforms_to_iface(self): - from zope.interface.verify import verifyObject - verifyObject(self._getTargetInterface(), self._makeOne()) - def test_schema_defined_by_subclass(self): from zope import interface @@ -1714,7 +1593,8 @@ class FrozenSetTests(SetTests): return IFrozenSet -class ObjectTests(unittest.TestCase): +class ObjectTests(EqualityTestsMixin, + unittest.TestCase): def setUp(self): from zope.event import subscribers @@ -1728,10 +1608,14 @@ class ObjectTests(unittest.TestCase): from zope.schema._field import Object return Object - def _makeOne(self, schema=None, *args, **kw): + def _getTargetInterface(self): + from zope.schema.interfaces import IObject + return IObject + + def _makeOneFromClass(self, cls, schema=None, *args, **kw): if schema is None: schema = self._makeSchema() - return self._getTargetClass()(schema, *args, **kw) + return super(ObjectTests, self)._makeOneFromClass(cls, schema, *args, **kw) def _makeSchema(self, **kw): from zope.interface import Interface @@ -2125,7 +2009,8 @@ class ObjectTests(unittest.TestCase): field.validate(ValueType()) -class MappingTests(unittest.TestCase): +class MappingTests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._field import Mapping @@ -2135,17 +2020,6 @@ class MappingTests(unittest.TestCase): from zope.schema.interfaces import IMapping return IMapping - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_class_conforms_to_iface(self): - from zope.interface.verify import verifyClass - verifyClass(self._getTargetInterface(), self._getTargetClass()) - - def test_instance_conforms_to_iface(self): - from zope.interface.verify import verifyObject - verifyObject(self._getTargetInterface(), self._makeOne()) - def test_ctor_key_type_not_IField(self): self.assertRaises(ValueError, self._makeOne, key_type=object()) @@ -2347,3 +2221,13 @@ def _makeDummyRegistry(v): def get(self, object, name): return self._vocabulary return DummyRegistry(v) + + +def test_suite(): + import zope.schema._field + suite = unittest.defaultTestLoader.loadTestsFromName(__name__) + suite.addTests(doctest.DocTestSuite( + zope.schema._field, + optionflags=doctest.ELLIPSIS + )) + return suite
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 6 }
4.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work zope.event==5.0 zope.exceptions==5.1 zope.interface==6.4.post2 -e git+https://github.com/zopefoundation/zope.schema.git@ff8f7293507663e463a01034b506c73ca9af0718#egg=zope.schema zope.testing==5.0.1 zope.testrunner==6.5
name: zope.schema channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - zope-event==5.0 - zope-exceptions==5.1 - zope-interface==6.4.post2 - zope-testing==5.0.1 - zope-testrunner==6.5 prefix: /opt/conda/envs/zope.schema
[ "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_ctor_real_min_max", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_required" ]
[ "src/zope/schema/tests/test__bootstrapfields.py::test_suite" ]
[ "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___get__", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___not_missing_w_check", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___not_missing_wo_check", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___w_missing_wo_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___w_defaultFactory_not_ICAF_no_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___w_defaultFactory_w_ICAF_w_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___wo_defaultFactory_hit", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___wo_defaultFactory_miss", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test__get___wo_defaultFactory_in_dict", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_bind", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_order_madness", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_w_both_title_and_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_w_title_wo_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_wo_title_w_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_constraint_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_defaultFactory", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_defaultFactory_returning_missing_value", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_required_readonly_missingValue", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_get_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_get_miss", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_miss_no_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_miss_w_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_set_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_set_readonly", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_fails", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_raises_StopValidation", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_missing_and_required", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_missing_not_required", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_wrong_type", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_collection_but_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_not_collection_but_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_not_collection_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_w_collections", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_collection_but_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_not_collection_but_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_not_collection_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_w_collections", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_default_too_large", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_default_too_small", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_validate_too_long", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_validate_too_short", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_w_invalid_default", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_wrong_types", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_constraint", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_wrong_types", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_constraint", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_set_normal", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_set_unchanged", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_unchanged_already_set", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_unchanged_not_already_set", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test__validate_w_int", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_set_w_int", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_required", "src/zope/schema/tests/test__field.py::BytesTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::BytesTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::BytesTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::BytesTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::BytesTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::BytesTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::BytesTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::BytesTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::BytesTests::test_is_hashable", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_required", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_w_invalid_default", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ASCIITests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ASCIITests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ASCIITests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_empty", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_non_empty_hit", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_non_empty_miss", "src/zope/schema/tests/test__field.py::ASCIITests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIITests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ASCIITests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ASCIITests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIITests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ASCIITests::test_is_hashable", "src/zope/schema/tests/test__field.py::ASCIITests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::BytesLineTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::BytesLineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::BytesLineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::BytesLineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesLineTests::test_constraint", "src/zope/schema/tests/test__field.py::BytesLineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::BytesLineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::BytesLineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesLineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::BytesLineTests::test_is_hashable", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_required", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ASCIILineTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ASCIILineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ASCIILineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_constraint", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_is_hashable", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_required", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::FloatTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::FloatTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::FloatTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::FloatTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::FloatTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::FloatTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::FloatTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::FloatTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::FloatTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::FloatTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::FloatTests::test_is_hashable", "src/zope/schema/tests/test__field.py::FloatTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_min", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_required", "src/zope/schema/tests/test__field.py::DecimalTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DecimalTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DecimalTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DecimalTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DecimalTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DecimalTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::DecimalTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::DecimalTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DecimalTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DecimalTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DecimalTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DecimalTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_min", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DatetimeTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DatetimeTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DatetimeTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DatetimeTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DatetimeTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DatetimeTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DatetimeTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DatetimeTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DatetimeTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_w_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_w_min", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_w_min_and_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DateTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DateTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DateTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DateTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DateTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DateTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DateTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DateTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DateTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DateTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DateTests::test_validate_required", "src/zope/schema/tests/test__field.py::DateTests::test_validate_w_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_w_min", "src/zope/schema/tests/test__field.py::DateTests::test_validate_w_min_and_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::TimedeltaTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::TimedeltaTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::TimedeltaTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_is_hashable", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_min", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_required", "src/zope/schema/tests/test__field.py::TimeTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::TimeTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::TimeTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::TimeTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimeTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::TimeTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::TimeTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimeTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::TimeTests::test_is_hashable", "src/zope/schema/tests/test__field.py::TimeTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_min", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_required", "src/zope/schema/tests/test__field.py::ChoiceTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ChoiceTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ChoiceTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_int", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_mixed", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_source_is_ICSB_bound", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_source_is_ICSB_unbound", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_string", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_tuple", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary_invalid", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_preconstructed_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_is_ICSB", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_is_ICSB_but_not_ISource", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_not_ICSB", "src/zope/schema/tests/test__field.py::ChoiceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_both_vocabulary_and_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_both_vocabulary_and_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_invalid_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_invalid_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_named_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_preconstructed_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_unicode_non_ascii_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_wo_values_vocabulary_or_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ChoiceTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::ChoiceTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::ChoiceTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ChoiceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ChoiceTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ChoiceTests::test_is_hashable", "src/zope/schema/tests/test__field.py::URITests::test___eq___different_type", "src/zope/schema/tests/test__field.py::URITests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::URITests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::URITests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::URITests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::URITests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::URITests::test_fromUnicode_ok", "src/zope/schema/tests/test__field.py::URITests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::URITests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::URITests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::URITests::test_is_hashable", "src/zope/schema/tests/test__field.py::URITests::test_validate_not_a_uri", "src/zope/schema/tests/test__field.py::URITests::test_validate_not_required", "src/zope/schema/tests/test__field.py::URITests::test_validate_required", "src/zope/schema/tests/test__field.py::URITests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DottedNameTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DottedNameTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DottedNameTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DottedNameTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_max_dots_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_max_dots_valid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_min_dots_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_min_dots_valid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DottedNameTests::test_fromUnicode_dotted_name_ok", "src/zope/schema/tests/test__field.py::DottedNameTests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DottedNameTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DottedNameTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DottedNameTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_not_a_dotted_name", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_required", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_w_max_dots", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_w_min_dots", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::IdTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::IdTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::IdTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::IdTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::IdTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_dotted_name_ok", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_url_ok", "src/zope/schema/tests/test__field.py::IdTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::IdTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::IdTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::IdTests::test_is_hashable", "src/zope/schema/tests/test__field.py::IdTests::test_validate_not_a_uri", "src/zope/schema/tests/test__field.py::IdTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::IdTests::test_validate_required", "src/zope/schema/tests/test__field.py::IdTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_is_hashable", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_required", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::CollectionTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::CollectionTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::CollectionTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::CollectionTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::CollectionTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::CollectionTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::CollectionTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::CollectionTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::CollectionTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::CollectionTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::CollectionTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::CollectionTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::CollectionTests::test_is_hashable", "src/zope/schema/tests/test__field.py::CollectionTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_required", "src/zope/schema/tests/test__field.py::SequenceTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::SequenceTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::SequenceTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::SequenceTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::SequenceTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::SequenceTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::SequenceTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::SequenceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::SequenceTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::SequenceTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::SequenceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::SequenceTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::SequenceTests::test_is_hashable", "src/zope/schema/tests/test__field.py::SequenceTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::SequenceTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::SequenceTests::test_sequence", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_required", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::TupleTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::TupleTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::TupleTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::TupleTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::TupleTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::TupleTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::TupleTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::TupleTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::TupleTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::TupleTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::TupleTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TupleTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::TupleTests::test_is_hashable", "src/zope/schema/tests/test__field.py::TupleTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::TupleTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::TupleTests::test_sequence", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_required", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_is_hashable", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_sequence", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_required", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ListTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ListTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ListTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ListTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::ListTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::ListTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::ListTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::ListTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::ListTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ListTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ListTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ListTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ListTests::test_is_hashable", "src/zope/schema/tests/test__field.py::ListTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::ListTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::ListTests::test_sequence", "src/zope/schema/tests/test__field.py::ListTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ListTests::test_validate_required", "src/zope/schema/tests/test__field.py::ListTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::SetTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::SetTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::SetTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::SetTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::SetTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::SetTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::SetTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::SetTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_disallows_unique", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::SetTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::SetTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::SetTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::SetTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::SetTests::test_is_hashable", "src/zope/schema/tests/test__field.py::SetTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::SetTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::SetTests::test_validate_required", "src/zope/schema/tests/test__field.py::SetTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::FrozenSetTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::FrozenSetTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::FrozenSetTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::FrozenSetTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_disallows_unique", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_is_hashable", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_required", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ObjectTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ObjectTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ObjectTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_empty_schema", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_value_not_providing_schema", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_value_providing_schema", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_value_providing_schema_but_invalid_fields", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_value_providing_schema_but_missing_fields", "src/zope/schema/tests/test__field.py::ObjectTests::test_class_conforms_to_IObject", "src/zope/schema/tests/test__field.py::ObjectTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ObjectTests::test_ctor_w_bad_schema", "src/zope/schema/tests/test__field.py::ObjectTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ObjectTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ObjectTests::test_instance_conforms_to_IObject", "src/zope/schema/tests/test__field.py::ObjectTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ObjectTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ObjectTests::test_is_hashable", "src/zope/schema/tests/test__field.py::ObjectTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::ObjectTests::test_set_allows_IBOAE_subscr_to_replace_value", "src/zope/schema/tests/test__field.py::ObjectTests::test_set_emits_IBOAE", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_required", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_w_cycles", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_w_cycles_collection_not_valid", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_w_cycles_object_not_valid", "src/zope/schema/tests/test__field.py::ObjectTests::test_validates_invariants_by_default", "src/zope/schema/tests/test__field.py::MappingTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::MappingTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::MappingTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::MappingTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::MappingTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MappingTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::MappingTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::MappingTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::MappingTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::MappingTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MappingTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::MappingTests::test_is_hashable", "src/zope/schema/tests/test__field.py::MappingTests::test_mapping", "src/zope/schema/tests/test__field.py::MappingTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_required", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::MutableMappingTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::MutableMappingTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_is_hashable", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_mapping", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_required", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DictTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DictTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DictTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DictTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::DictTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DictTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::DictTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::DictTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DictTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DictTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DictTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DictTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DictTests::test_mapping", "src/zope/schema/tests/test__field.py::DictTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::DictTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::DictTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::DictTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DictTests::test_validate_required", "src/zope/schema/tests/test__field.py::DictTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::test_suite" ]
[]
Zope Public License 2.1
2,998
[ "src/zope/schema/_field.py", "src/zope/schema/_bootstrapfields.py", "CHANGES.rst", "docs/api.rst", "src/zope/schema/interfaces.py", "src/zope/schema/__init__.py" ]
[ "src/zope/schema/_field.py", "src/zope/schema/_bootstrapfields.py", "CHANGES.rst", "docs/api.rst", "src/zope/schema/interfaces.py", "src/zope/schema/__init__.py" ]
google__mobly-495
c86a37b358b2a04f5838bb8721ce136db1ba356d
2018-08-30 21:57:36
95286a01a566e056d44acfa9577a45bc7f37f51d
winterfroststrom: build failure?
diff --git a/mobly/records.py b/mobly/records.py index 051ff9c..171c0ca 100644 --- a/mobly/records.py +++ b/mobly/records.py @@ -21,6 +21,7 @@ import io import logging import sys import threading +import time import traceback import yaml @@ -52,7 +53,7 @@ class TestSummaryEntryType(enum.Enum): RECORD = 'Record' # A summary of the test run stats, e.g. how many test failed. SUMMARY = 'Summary' - # Information on the controllers used in the test. + # Information on the controllers used in a test class. CONTROLLER_INFO = 'ControllerInfo' # Additional data added by users during test. # This can be added at any point in the test, so do not assume the location @@ -155,8 +156,34 @@ class TestResultEnums(object): TEST_RESULT_ERROR = 'ERROR' +class ControllerInfoRecord(object): + """A record representing the controller info in test results.""" + + KEY_TEST_CLASS = TestResultEnums.RECORD_CLASS + KEY_CONTROLLER_NAME = 'Controller Name' + KEY_CONTROLLER_INFO = 'Controller Info' + KEY_TIMESTAMP = 'Timestamp' + + def __init__(self, test_class, controller_name, info): + self.test_class = test_class + self.controller_name = controller_name + self.controller_info = info + self.timestamp = time.time() + + def to_dict(self): + result = {} + result[self.KEY_TEST_CLASS] = self.test_class + result[self.KEY_CONTROLLER_NAME] = self.controller_name + result[self.KEY_CONTROLLER_INFO] = self.controller_info + result[self.KEY_TIMESTAMP] = self.timestamp + return result + + def __repr__(self): + return str(self.to_dict()) + + class ExceptionRecord(object): - """A wrapper class for representing exception objects in TestResultRecord. + """A record representing exception objects in TestResultRecord. Attributes: exception: Exception object, the original Exception. @@ -235,7 +262,7 @@ class ExceptionRecord(object): result = ExceptionRecord(exception, self.position) result.stacktrace = self.stacktrace result.details = self.details - result.extras = self.extras + result.extras = copy.deepcopy(self.extras) result.position = self.position return result @@ -447,13 +474,14 @@ class TestResult(object): This class is essentially a container of TestResultRecord objects. Attributes: - self.requested: A list of strings, each is the name of a test requested + requested: A list of strings, each is the name of a test requested by user. - self.failed: A list of records for tests failed. - self.executed: A list of records for tests that were actually executed. - self.passed: A list of records for tests passed. - self.skipped: A list of records for tests skipped. - self.error: A list of records for tests with error result token. + failed: A list of records for tests failed. + executed: A list of records for tests that were actually executed. + passed: A list of records for tests passed. + skipped: A list of records for tests skipped. + error: A list of records for tests with error result token. + controller_info: list of ControllerInfoRecord. """ def __init__(self): @@ -463,7 +491,7 @@ class TestResult(object): self.passed = [] self.skipped = [] self.error = [] - self.controller_info = {} + self.controller_info = [] def __add__(self, r): """Overrides '+' operator for TestResult class. @@ -486,12 +514,6 @@ class TestResult(object): l_value = getattr(self, name) if isinstance(r_value, list): setattr(sum_result, name, l_value + r_value) - elif isinstance(r_value, dict): - # '+' operator for TestResult is only valid when multiple - # TestResult objs were created in the same test run, use the - # r-value which is more up to date. - # TODO(xpconanfan): have a better way to validate this situation. - setattr(sum_result, name, r_value) return sum_result def add_record(self, record): @@ -517,15 +539,28 @@ class TestResult(object): else: self.error.append(record) - def add_controller_info(self, name, info): + def add_controller_info(self, controller_name, controller_info, + test_class): + """Adds controller info to results. + + This can be called multiple times for each + + Args: + controller_name: string, name of the controller. + controller_info: yaml serializable info about the controller. + test_class: string, a tag for identifying a class. This should be + the test class's own `TAG` attribute. + """ + info = controller_info try: - yaml.dump(info) + yaml.dump(controller_info) except TypeError: - logging.warning('Controller info for %s is not YAML serializable!' - ' Coercing it to string.' % name) - self.controller_info[name] = str(info) - return - self.controller_info[name] = info + logging.warning('The info of controller %s in class "%s" is not ' + 'YAML serializable! Coercing it to string.', + controller_name, test_class) + info = str(controller_info) + self.controller_info.append( + ControllerInfoRecord(test_class, controller_name, info)) def add_class_error(self, test_record): """Add a record to indicate a test class has failed before any test
Custom controller info may not be recorded in summary yaml Controller modules may choose to allow customization of controller info during the test. E.g. `AndroidDevice.add_device_info` adds extra info of the controller objects to the summary yaml. But `TestRunner` only record the controller info at the controller registration time, so the customized info may never get recorded depending on the implementation of the controller module. I.e. If the info is a static string, we it will be set at registration time, and subsequent changes to the info field won't be picked up.
google/mobly
diff --git a/mobly/base_test.py b/mobly/base_test.py index e4139d9..045fee8 100644 --- a/mobly/base_test.py +++ b/mobly/base_test.py @@ -134,8 +134,9 @@ class BaseTestClass(object): self.current_test_name = None self._generated_test_table = collections.OrderedDict() # Controller object management. - self._controller_registry = {} - self._controller_destructors = {} + self._controller_registry = collections.OrderedDict( + ) # controller_name: objects + self._controller_modules = {} # controller_name: module def __enter__(self): return self @@ -312,22 +313,9 @@ class BaseTestClass(object): # Save a shallow copy of the list for internal usage, so tests can't # affect internal registry by manipulating the object list. self._controller_registry[module_ref_name] = copy.copy(objects) - # Collect controller information and write to test result. - # Implementation of 'get_info' is optional for a controller module. - if hasattr(module, 'get_info'): - controller_info = module.get_info(copy.copy(objects)) - logging.debug('Controller %s: %s', module_config_name, - controller_info) - self.results.add_controller_info(module_config_name, - controller_info) - else: - logging.warning('No optional debug info found for controller %s. ' - 'To provide it, implement get_info in this ' - 'controller module.', module_config_name) logging.debug('Found %d objects for controller %s', len(objects), module_config_name) - destroy_func = module.destroy - self._controller_destructors[module_ref_name] = destroy_func + self._controller_modules[module_ref_name] = module return objects def _unregister_controllers(self): @@ -337,14 +325,30 @@ class BaseTestClass(object): """ # TODO(xpconanfan): actually record these errors instead of just # logging them. - for name, destroy in self._controller_destructors.items(): + for name, module in self._controller_modules.items(): try: logging.debug('Destroying %s.', name) - destroy(self._controller_registry[name]) + module.destroy(self._controller_registry[name]) except: logging.exception('Exception occurred destroying %s.', name) - self._controller_registry = {} - self._controller_destructors = {} + self._controller_registry = collections.OrderedDict() + self._controller_modules = {} + + def _record_controller_info(self): + # Collect controller information and write to test result. + for module_ref_name, objects in self._controller_registry.items(): + module = self._controller_modules[module_ref_name] + try: + controller_info = module.get_info(copy.copy(objects)) + except AttributeError: + logging.warning('No optional debug info found for controller ' + '%s. To provide it, implement `get_info`.', + module_ref_name) + continue + self.results.add_controller_info( + controller_name=module.MOBLY_CONTROLLER_CONFIG_NAME, + controller_info=controller_info, + test_class=self.TAG) def _setup_generated_tests(self): """Proxy function to guarantee the base implementation of @@ -901,6 +905,12 @@ class BaseTestClass(object): setattr(e, 'results', self.results) raise e finally: + # Write controller info and summary to summary file. + self._record_controller_info() + for controller_info in self.results.controller_info: + self.summary_writer.dump( + controller_info.to_dict(), + records.TestSummaryEntryType.CONTROLLER_INFO) self._teardown_class() self._unregister_controllers() logging.info('Summary for test class %s: %s', self.TAG, diff --git a/mobly/test_runner.py b/mobly/test_runner.py index 9c57715..909385b 100644 --- a/mobly/test_runner.py +++ b/mobly/test_runner.py @@ -351,9 +351,6 @@ class TestRunner(object): 'Abort all subsequent test classes. Reason: %s', e) raise finally: - # Write controller info and summary to summary file. - summary_writer.dump(self.results.controller_info, - records.TestSummaryEntryType.CONTROLLER_INFO) summary_writer.dump(self.results.summary_dict(), records.TestSummaryEntryType.SUMMARY) # Stop and show summary. diff --git a/tests/lib/mock_second_controller.py b/tests/lib/mock_second_controller.py new file mode 100644 index 0000000..a4a847a --- /dev/null +++ b/tests/lib/mock_second_controller.py @@ -0,0 +1,59 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is a second mock third-party controller module used for testing Mobly's +# handling of multiple controller modules. + +import logging + +MOBLY_CONTROLLER_CONFIG_NAME = "AnotherMagicDevice" + + +def create(configs): + objs = [] + for c in configs: + if isinstance(c, dict): + c.pop("serial") + objs.append(AnotherMagicDevice(c)) + return objs + + +def destroy(objs): + print("Destroying other magic") + + +def get_info(objs): + infos = [] + for obj in objs: + infos.append(obj.who_am_i()) + return infos + + +class AnotherMagicDevice(object): + """This controller supports adding controller's info during test. + + It is used for testing that this info is correctly recorded by Mobly. + """ + def __init__(self, config): + self.magic = config + + def get_magic(self): + logging.info("My other magic is %s.", self.magic) + return self.magic + + def set_magic(self, extra_magic): + self.magic['extra_magic'] = extra_magic + + def who_am_i(self): + return {"MyOtherMagic": self.magic} diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py index 898ab5d..ead1bf7 100755 --- a/tests/mobly/base_test_test.py +++ b/tests/mobly/base_test_test.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy import io import os import mock @@ -30,6 +31,7 @@ from mobly import signals from tests.lib import utils from tests.lib import mock_controller +from tests.lib import mock_second_controller MSG_EXPECTED_EXCEPTION = "This is an expected exception." MSG_EXPECTED_TEST_FAILURE = "This is an expected test failure." @@ -1784,6 +1786,56 @@ class BaseTestTest(unittest.TestCase): self.assertIsNotNone(c['timestamp']) self.assertTrue(hit) + def test_record_controller_info(self): + """Verifies that controller info is correctly recorded. + + 1. Info added in test is recorded. + 2. Info of multiple controller types are recorded. + """ + mock_test_config = self.mock_test_cls_configs.copy() + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + mock_ctrlr_2_config_name = mock_second_controller.MOBLY_CONTROLLER_CONFIG_NAME + my_config = [{'serial': 'xxxx', 'magic': 'Magic'}] + mock_test_config.controller_configs[mock_ctrlr_config_name] = my_config + mock_test_config.controller_configs[ + mock_ctrlr_2_config_name] = copy.copy(my_config) + + class ControllerInfoTest(base_test.BaseTestClass): + """Registers two different controller types and modifies controller + info at runtime. + """ + + def setup_class(self): + self.register_controller(mock_controller) + second_controller = self.register_controller( + mock_second_controller)[0] + # This should appear in recorded controller info. + second_controller.set_magic('haha') + + def test_func(self): + pass + + bt_cls = ControllerInfoTest(mock_test_config) + bt_cls.run() + info1 = bt_cls.results.controller_info[0] + info2 = bt_cls.results.controller_info[1] + self.assertNotEqual(info1.timestamp, info2.timestamp) + self.assertEqual(info1.test_class, 'ControllerInfoTest') + self.assertEqual(info1.controller_name, 'MagicDevice') + self.assertEqual(info1.controller_info, [{ + 'MyMagic': { + 'magic': 'Magic' + } + }]) + self.assertEqual(info2.test_class, 'ControllerInfoTest') + self.assertEqual(info2.controller_name, 'AnotherMagicDevice') + self.assertEqual(info2.controller_info, [{ + 'MyOtherMagic': { + 'magic': 'Magic', + 'extra_magic': 'haha' + } + }]) + def test_register_controller_no_config(self): bt_cls = MockEmptyBaseTest(self.mock_test_cls_configs) with self.assertRaisesRegex(signals.ControllerError, @@ -1812,7 +1864,7 @@ class BaseTestTest(unittest.TestCase): mock_ctrlrs = bt_cls._controller_registry[registered_name] self.assertEqual(mock_ctrlrs[0].magic, 'magic1') self.assertEqual(mock_ctrlrs[1].magic, 'magic2') - self.assertTrue(bt_cls._controller_destructors[registered_name]) + self.assertTrue(bt_cls._controller_modules[registered_name]) expected_msg = 'Controller module .* has already been registered.' with self.assertRaisesRegex(signals.ControllerError, expected_msg): bt_cls.register_controller(mock_controller) @@ -1828,7 +1880,7 @@ class BaseTestTest(unittest.TestCase): } bt_cls = MockEmptyBaseTest(mock_test_config) bt_cls.register_controller(mock_controller) - self.assertEqual(bt_cls.results.controller_info, {}) + self.assertEqual(bt_cls.results.controller_info, []) finally: setattr(mock_controller, 'get_info', get_info) diff --git a/tests/mobly/records_test.py b/tests/mobly/records_test.py index f818e62..a967b8b 100755 --- a/tests/mobly/records_test.py +++ b/tests/mobly/records_test.py @@ -1,13 +1,13 @@ # Copyright 2016 Google Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); +# Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, +# distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @@ -46,10 +46,10 @@ class RecordsTest(unittest.TestCase): """ def setUp(self): - self.tn = "test_name" - self.details = "Some details about the test execution." + self.tn = 'test_name' + self.details = 'Some details about the test execution.' self.float_extra = 12345.56789 - self.json_extra = {"ha": "whatever"} + self.json_extra = {'ha': 'whatever'} self.tmp_path = tempfile.mkdtemp() def tearDown(self): @@ -62,8 +62,8 @@ class RecordsTest(unittest.TestCase): self.assertEqual(record.result, result) self.assertEqual(record.details, details) self.assertEqual(record.extras, extras) - self.assertTrue(record.begin_time, "begin time should not be empty.") - self.assertTrue(record.end_time, "end time should not be empty.") + self.assertTrue(record.begin_time, 'begin time should not be empty.') + self.assertTrue(record.end_time, 'end time should not be empty.') # UID is not used at the moment, should always be None. self.assertIsNone(record.uid) # Verify to_dict. @@ -88,7 +88,7 @@ class RecordsTest(unittest.TestCase): self.assertDictEqual(actual_d, d) # Verify that these code paths do not cause crashes and yield non-empty # results. - self.assertTrue(str(record), "str of the record should not be empty.") + self.assertTrue(str(record), 'str of the record should not be empty.') self.assertTrue(repr(record), "the record's repr shouldn't be empty.") """ Begin of Tests """ @@ -239,17 +239,18 @@ class RecordsTest(unittest.TestCase): record1.test_pass(s) tr1 = records.TestResult() tr1.add_record(record1) - tr1.add_controller_info("MockDevice", ["magicA", "magicB"]) + tr1.add_controller_info('SomeClass', 'MockDevice', + ['magicA', 'magicB']) record2 = records.TestResultRecord(self.tn) record2.test_begin() s = signals.TestPass(self.details, self.json_extra) record2.test_pass(s) tr2 = records.TestResult() tr2.add_record(record2) - tr2.add_controller_info("MockDevice", ["magicC"]) + tr2.add_controller_info('SomeClass', 'MockDevice', ['magicC']) tr2 += tr1 self.assertTrue(tr2.passed, [tr1, tr2]) - self.assertTrue(tr2.controller_info, {"MockDevice": ["magicC"]}) + self.assertTrue(tr2.controller_info, {'MockDevice': ['magicC']}) def test_result_add_operator_type_mismatch(self): record1 = records.TestResultRecord(self.tn) @@ -258,9 +259,9 @@ class RecordsTest(unittest.TestCase): record1.test_pass(s) tr1 = records.TestResult() tr1.add_record(record1) - expected_msg = "Operand .* of type .* is not a TestResult." + expected_msg = 'Operand .* of type .* is not a TestResult.' with self.assertRaisesRegex(TypeError, expected_msg): - tr1 += "haha" + tr1 += 'haha' def test_result_add_class_error_with_test_signal(self): record1 = records.TestResultRecord(self.tn) @@ -270,7 +271,7 @@ class RecordsTest(unittest.TestCase): tr = records.TestResult() tr.add_record(record1) s = signals.TestFailure(self.details, self.float_extra) - record2 = records.TestResultRecord("SomeTest", s) + record2 = records.TestResultRecord('SomeTest', s) tr.add_class_error(record2) self.assertEqual(len(tr.passed), 1) self.assertEqual(len(tr.error), 1) @@ -289,10 +290,10 @@ class RecordsTest(unittest.TestCase): class SpecialError(Exception): def __init__(self, arg1, arg2): - self.msg = "%s %s" % (arg1, arg2) + self.msg = '%s %s' % (arg1, arg2) - se = SpecialError("haha", 42) - record2 = records.TestResultRecord("SomeTest", se) + se = SpecialError('haha', 42) + record2 = records.TestResultRecord('SomeTest', se) tr.add_class_error(record2) self.assertEqual(len(tr.passed), 1) self.assertEqual(len(tr.error), 1) @@ -334,7 +335,7 @@ class RecordsTest(unittest.TestCase): """ record1 = records.TestResultRecord(self.tn) record1.test_begin() - record1.test_fail(Exception("haha")) + record1.test_fail(Exception('haha')) tr = records.TestResult() tr.add_class_error(record1) self.assertFalse(tr.is_all_pass) @@ -342,7 +343,7 @@ class RecordsTest(unittest.TestCase): def test_is_test_executed(self): record1 = records.TestResultRecord(self.tn) record1.test_begin() - record1.test_fail(Exception("haha")) + record1.test_fail(Exception('haha')) tr = records.TestResult() tr.add_record(record1) self.assertTrue(tr.is_test_executed(record1.test_name)) @@ -415,20 +416,22 @@ class RecordsTest(unittest.TestCase): def test_add_controller_info(self): tr = records.TestResult() self.assertFalse(tr.controller_info) - tr.add_controller_info('MockDevice', ['magicA', 'magicB']) - self.assertTrue(tr.controller_info) - self.assertEqual(tr.controller_info['MockDevice'], + tr.add_controller_info('MockDevice', ['magicA', 'magicB'], 'MyTest') + self.assertTrue(tr.controller_info[0]) + self.assertEqual(tr.controller_info[0].controller_name, 'MockDevice') + self.assertEqual(tr.controller_info[0].controller_info, ['magicA', 'magicB']) @mock.patch('yaml.dump', side_effect=TypeError('ha')) def test_add_controller_info_not_serializable(self, mock_yaml_dump): tr = records.TestResult() self.assertFalse(tr.controller_info) - tr.add_controller_info('MockDevice', ['magicA', 'magicB']) - self.assertTrue(tr.controller_info) - self.assertEqual(tr.controller_info['MockDevice'], + tr.add_controller_info('MockDevice', ['magicA', 'magicB'], 'MyTest') + self.assertTrue(tr.controller_info[0]) + self.assertEqual(tr.controller_info[0].controller_name, 'MockDevice') + self.assertEqual(tr.controller_info[0].controller_info, "['magicA', 'magicB']") -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() diff --git a/tests/mobly/test_runner_test.py b/tests/mobly/test_runner_test.py index d86f60c..9459f44 100755 --- a/tests/mobly/test_runner_test.py +++ b/tests/mobly/test_runner_test.py @@ -55,6 +55,13 @@ class TestRunnerTest(unittest.TestCase): def tearDown(self): shutil.rmtree(self.tmp_dir) + def _assertControllerInfoEqual(self, info, expected_info_dict): + self.assertEqual(expected_info_dict['Controller Name'], + info.controller_name) + self.assertEqual(expected_info_dict['Test Class'], info.test_class) + self.assertEqual(expected_info_dict['Controller Info'], + info.controller_info) + def test_run_twice(self): """Verifies that: 1. Repeated run works properly. @@ -81,8 +88,8 @@ class TestRunnerTest(unittest.TestCase): self.assertEqual(results['Requested'], 2) self.assertEqual(results['Executed'], 2) self.assertEqual(results['Passed'], 2) - expected_info = { - 'MagicDevice': [{ + expected_info_dict = { + 'Controller Info': [{ 'MyMagic': { 'magic': 'Magic1' } @@ -90,9 +97,18 @@ class TestRunnerTest(unittest.TestCase): 'MyMagic': { 'magic': 'Magic2' } - }] + }], + 'Controller Name': + 'MagicDevice', + 'Test Class': + 'IntegrationTest', } - self.assertEqual(tr.results.controller_info, expected_info) + self._assertControllerInfoEqual(tr.results.controller_info[0], + expected_info_dict) + self._assertControllerInfoEqual(tr.results.controller_info[1], + expected_info_dict) + self.assertNotEqual(tr.results.controller_info[0].timestamp, + tr.results.controller_info[1].timestamp) def test_summary_file_entries(self): """Verifies the output summary's file format. @@ -123,6 +139,10 @@ class TestRunnerTest(unittest.TestCase): records.TestSummaryEntryType.TEST_NAME_LIST.value) self.assertEqual(summary_entries[1]['Type'], records.TestSummaryEntryType.RECORD.value) + self.assertEqual(summary_entries[2]['Type'], + records.TestSummaryEntryType.CONTROLLER_INFO.value) + self.assertEqual(summary_entries[3]['Type'], + records.TestSummaryEntryType.SUMMARY.value) @mock.patch( 'mobly.controllers.android_device_lib.adb.AdbProxy',
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
1.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "mock", "numpy>=1.16.0", "pandas>=1.0.0" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 future==1.0.0 iniconfig==2.1.0 -e git+https://github.com/google/mobly.git@c86a37b358b2a04f5838bb8721ce136db1ba356d#egg=mobly mock==5.2.0 numpy==2.0.2 packaging==24.2 pandas==2.2.3 pluggy==1.5.0 portpicker==1.6.0 psutil==7.0.0 pyserial==3.5 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.2 six==1.17.0 timeout-decorator==0.5.0 tomli==2.2.1 tzdata==2025.2
name: mobly channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - future==1.0.0 - iniconfig==2.1.0 - mock==5.2.0 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - pluggy==1.5.0 - portpicker==1.6.0 - psutil==7.0.0 - pyserial==3.5 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.2 - six==1.17.0 - timeout-decorator==0.5.0 - tomli==2.2.1 - tzdata==2025.2 prefix: /opt/conda/envs/mobly
[ "tests/mobly/base_test_test.py::BaseTestTest::test_record_controller_info", "tests/mobly/base_test_test.py::BaseTestTest::test_register_controller_no_get_info", "tests/mobly/records_test.py::RecordsTest::test_add_controller_info", "tests/mobly/records_test.py::RecordsTest::test_add_controller_info_not_serializable", "tests/mobly/records_test.py::RecordsTest::test_result_add_operator_success", "tests/mobly/test_runner_test.py::TestRunnerTest::test_run_twice", "tests/mobly/test_runner_test.py::TestRunnerTest::test_run_two_test_classes", "tests/mobly/test_runner_test.py::TestRunnerTest::test_run_two_test_classes_different_configs_and_aliases" ]
[ "tests/mobly/base_test_test.py::BaseTestTest::test_write_user_data", "tests/mobly/records_test.py::RecordsTest::test_summary_user_data", "tests/mobly/records_test.py::RecordsTest::test_summary_write_dump", "tests/mobly/records_test.py::RecordsTest::test_summary_write_dump_with_unicode", "tests/mobly/test_runner_test.py::TestRunnerTest::test_summary_file_entries" ]
[ "tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_on_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_on_fail_from_setup_class", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_setup_class", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_setup_test", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_teardown_class", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_test", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_on_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_setup_test", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_setup_class", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_true", "tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions", "tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention", "tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list", "tests/mobly/base_test_test.py::BaseTestTest::test_current_test_info", "tests/mobly/base_test_test.py::BaseTestTest::test_current_test_info_in_setup_class", "tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name", "tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests", "tests/mobly/base_test_test.py::BaseTestTest::test_exception_objects_in_record", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_equal", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_false", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_in_teardown_test", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_multiple_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_no_op", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_no_raises_custom_msg", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_no_raises_default_msg", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_true", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_true_and_assert_true", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_two_tests", "tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded", "tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded", "tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests", "tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name", "tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run", "tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run", "tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_cannot_modify_original_record", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_both_test_and_teardown_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_setup_class_fails_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_setup_test_fails_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_triggered_by_setup_class_failure_then_fail_too", "tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_cannot_modify_original_record", "tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_procedure_function_gets_correct_record", "tests/mobly/base_test_test.py::BaseTestTest::test_promote_extra_errors_to_termination_signal", "tests/mobly/base_test_test.py::BaseTestTest::test_register_controller_change_return_value", "tests/mobly/base_test_test.py::BaseTestTest::test_register_controller_dup_register", "tests/mobly/base_test_test.py::BaseTestTest::test_register_controller_less_than_min_number", "tests/mobly/base_test_test.py::BaseTestTest::test_register_controller_no_config", "tests/mobly/base_test_test.py::BaseTestTest::test_register_controller_no_config_for_not_required", "tests/mobly/base_test_test.py::BaseTestTest::test_register_controller_return_value", "tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list", "tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_and_teardown_execution_count", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_generated_tests_failure", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal", "tests/mobly/base_test_test.py::BaseTestTest::test_skip", "tests/mobly/base_test_test.py::BaseTestTest::test_skip_if", "tests/mobly/base_test_test.py::BaseTestTest::test_skip_in_setup_test", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_class_fail_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing", "tests/mobly/base_test_test.py::BaseTestTest::test_verify_controller_module", "tests/mobly/base_test_test.py::BaseTestTest::test_verify_controller_module_missing_attr", "tests/mobly/base_test_test.py::BaseTestTest::test_verify_controller_module_null_attr", "tests/mobly/records_test.py::RecordsTest::test_exception_record_deepcopy", "tests/mobly/records_test.py::RecordsTest::test_is_all_pass", "tests/mobly/records_test.py::RecordsTest::test_is_all_pass_negative", "tests/mobly/records_test.py::RecordsTest::test_is_all_pass_with_add_class_error", "tests/mobly/records_test.py::RecordsTest::test_is_test_executed", "tests/mobly/records_test.py::RecordsTest::test_result_add_class_error_with_special_error", "tests/mobly/records_test.py::RecordsTest::test_result_add_class_error_with_test_signal", "tests/mobly/records_test.py::RecordsTest::test_result_add_operator_type_mismatch", "tests/mobly/records_test.py::RecordsTest::test_result_record_fail_none", "tests/mobly/records_test.py::RecordsTest::test_result_record_fail_stacktrace", "tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_float_extra", "tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_json_extra", "tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_unicode_exception", "tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_unicode_test_signal", "tests/mobly/records_test.py::RecordsTest::test_result_record_pass_none", "tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_float_extra", "tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_json_extra", "tests/mobly/records_test.py::RecordsTest::test_result_record_skip_none", "tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_float_extra", "tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_json_extra", "tests/mobly/test_runner_test.py::TestRunnerTest::test_add_test_class_mismatched_log_path", "tests/mobly/test_runner_test.py::TestRunnerTest::test_add_test_class_mismatched_test_bed_name", "tests/mobly/test_runner_test.py::TestRunnerTest::test_main_parse_args", "tests/mobly/test_runner_test.py::TestRunnerTest::test_run_no_tests", "tests/mobly/test_runner_test.py::TestRunnerTest::test_run_with_abort_all", "tests/mobly/test_runner_test.py::TestRunnerTest::test_teardown_logger_before_setup_logger" ]
[]
Apache License 2.0
3,000
[ "mobly/records.py" ]
[ "mobly/records.py" ]
kivy__kivy-5926
5cfa5bf6edeaaa6b3fc3948d76bb6d356e3d0150
2018-08-31 00:21:12
315b7425657d002cf9c35cf2e8b67fb00f9cb030
matham: I wonder if `has_animation_properties` is a better name? And maybe it's better if its a `@property`? matham: Oh, Never mind, the function already exists in master. I'll let someone with animation usage review it. gottadiveintopython: yep it override `Animation.have_prop...()` Thanks for the review. tshirtman: I meant for the Sequence and Parallel to have a common method, (but i'm not sure which meaning of "like" you used here, if you meant "in the same way as", then ok). i.e: ``` class CompoundAnimation(Animation): def have_property_to_animate(self, widget): … class Sequence(CompoundAnimation): … class Parallel(CompoundAnimation): … ``` i think is slightly better than having the exact same method in two classes. gottadiveintopython: ah I understand what you mean. `stop()`, `stop_property()`, `cancel()`, `have_properties_to_animate()` are completely same. So moving them to `CompoundAnimation`, the base class. I feel that doing that loose some flexibility. But anyawys I'm doing that now. gottadiveintopython: `Sequence` and `Parallel` are really buggy while `Animation` is not. If I continue fixing thoe bugs, this PR will become not related to the issue(#5443). So I'm probably going to make another PR since the issue is fixed. tshirtman: Yes, there are certainly bugs left in them, thank you for looking into it. gottadiveintopython: np :) well I updated the PR. Can you review it? gottadiveintopython: thanks gottadiveintopython: When I ran [this code](https://gist.github.com/gottadiveintopython/ac5e4e1a33a93c31bc53d6875e2b035c) on master branch, `Parallel`s were never released. I assume that's the reason why `Parallel` doesn't override `cancel_property()`. That means I should revert 3608092fcefb6ce949a5ae92a6c347569b95f7f8. gottadiveintopython: This PR contains this hackey code: ```python old_max_fps = None def setUpModule(): global old_max_fps old_max_fps = Clock._max_fps Clock._max_fps = 0.0 def tearDownModule(): global old_max_fps Clock._max_fps = old_max_fps ``` Does anyone know proper way of changing `maxfps` temporarily? gottadiveintopython: ok I'll make this PR more simple. um should I reset commits? I mean `git reset --hard/soft xxxx`. Since you already spent a time on reading this PR, I better keep current commits? tshirtman: It's ok to leave the commits, we can always squash & merge in the interface, if we decide the history is not important in the end. Thank you, just let us know when you are happy about the state, so i can review again :). gottadiveintopython: ok I got it. I might reset commits to make this pr easier to understand though. gottadiveintopython: This should be ok. Can you review it? > it also contains tests that are expected to fail, with comments about things that are currently unclear, so i'm not sure what to make of it sorry for that unreadable test code. I created simple code: [this](https://gist.github.com/gottadiveintopython/cc23a1ecaa6aa5ffd8a2f60445478eaf) and [this](https://gist.github.com/gottadiveintopython/7605ac5e2d0eaebbc68f124619717087) Those are not fixed by this PR and I'm not even sure that those should be fixed or not. tshirtman: Hm, I'm really sorry this lingered for so long, but the code looks good, it does still feel worth merging, especially people still hits the bugs with reusing composed animations from time to time. tshirtman: Hm, I'm really sorry this lingered for so long, but the code looks good, it does still feel worth merging, especially people still hits the bugs with reusing composed animations from time to time. gottadiveintopython: @tshirtman Hi, is it better to `git rebase master` and rewrite unit tests in pytest-style? or better to keep the current state? tshirtman: Hey, if you are willing to, rebasing/updating would be great. gottadiveintopython: ok, I'll do it then. gottadiveintopython: `Parallel` never fires `on_progress`, and I think it's corrent behavior. But it might confuse the user, so we may need to document it. tshirtman: > `Parallel` never fires `on_progress`, and I think it's a corrent behavior. But it might confuse the user, so we may need to document it. > > Or maybe `Parallel` should fire `on_progress` only when `Parallel.anim1` fires `on_progress`. hm, if we can't then it's better not to document, i don't see how only firing for anim1 would be correct, ideally it would fire for both, and have the progress value take both durations into account (so if anim 1 is 3s and anim 2 is 2s, 1s into the first animation, we'd get progress at 0.2, 1/5th of the total), but i didn't check how hard it would be to implement that. gottadiveintopython: > we'd get progress at 0.2, 1/5th of the total Sounds like you are confusing `Sequence` with `Parallel`. But anyways It may be better to keep the current implementation. If we change it to fire `on_progress`, it probably impact the performance, because `on_progress` usually is fired very frequently. > if we can't then it's better not to document Ok, got it. tshirtman: > > we'd get progress at 0.2, 1/5th of the total > > Sounds like you are confusing `Sequence` with `Parallel`. But anyways It may be better to keep the current implementation. If we change it to fire `on_progress`, it probably impact the performance, because `on_progress` usually is fired very frequently. > :facepalm: you are right, maybe we could report based on the longest, but yeah, in any case, no need to think about that in this PR, it would be a new feature imho, to be evaluated independently. > > if we can't then it's better not to document > > Ok, got it. :+1: gottadiveintopython: `EventCounter.assert_()` is my concern. gottadiveintopython: @tshirtman I believe the PR is mergeable. Could you check if it's ok or not? > we could report based on the longest Yeah I agree with that way if we implement it.
diff --git a/doc/sources/guide/packaging-ios-prerequisites.rst b/doc/sources/guide/packaging-ios-prerequisites.rst index 577238392..7cfcf2d7e 100644 --- a/doc/sources/guide/packaging-ios-prerequisites.rst +++ b/doc/sources/guide/packaging-ios-prerequisites.rst @@ -40,11 +40,9 @@ The first thing is to ensure you have run the following commands: .. parsed-literal:: - brew install autoconf automake libtool pkg-config mercurial - brew link libtool - brew link mercurial - sudo easy_install pip - sudo pip install |cython_install| + $ brew install autoconf automake libtool pkg-config + $ brew link libtool + $ pip install |cython_install| If you still receive build errors, check your Homebrew is in a healthy state:: diff --git a/doc/sources/guide/packaging-ios.rst b/doc/sources/guide/packaging-ios.rst index dc6a21cad..b767bccdc 100644 --- a/doc/sources/guide/packaging-ios.rst +++ b/doc/sources/guide/packaging-ios.rst @@ -5,7 +5,7 @@ Create a package for IOS .. note:: - Currently, kivy-iOS builds packages with Python 2.7 and 3.7. + Currently, kivy-iOS builds packages with Python 3.7. The overall process for creating a package for IOS can be explained in 4 steps: @@ -23,10 +23,9 @@ those dependencies: .. parsed-literal:: - brew install autoconf automake libtool pkg-config - brew link libtool - sudo easy_install pip - sudo pip install |cython_install| + $ brew install autoconf automake libtool pkg-config + $ brew link libtool + $ pip install |cython_install| For more detail, see :ref:`IOS Prerequisites <packaging_ios_prerequisites>`. Just ensure that everything is ok before starting the second step! @@ -38,12 +37,10 @@ Compile the distribution Open a terminal, and type:: - $ git clone git://github.com/kivy/kivy-ios - $ cd kivy-ios - $ ./toolchain.py build kivy + $ pip install kivy-ios + $ toolchain build kivy -Most of the python distribution is packed into `python27.zip`. If you -experience any issues, please refer to our +If you experience any issues, please refer to our `user group <https://groups.google.com/forum/#!forum/kivy-users>`_ or the `kivy-ios project page <https://github.com/kivy/kivy-ios>`_. @@ -56,11 +53,11 @@ Before proceeding to the next step, ensure your application entry point is a fil named `main.py`. We provide a script that creates an initial Xcode project to start with. In the -command line below, replace `test` with your project name. It must be a +command line below, replace `title` with your project name. It must be a name without any spaces or illegal characters:: - $ ./toolchain.py create <title> <app_directory> - $ ./toolchain.py create Touchtracer ~/code/kivy/examples/demo/touchtracer + $ toolchain create <title> <app_directory> + $ toolchain create Touchtracer ~/code/kivy/examples/demo/touchtracer .. Note:: You must use a fully qualified path to your application directory. @@ -86,11 +83,11 @@ Update the Xcode project Let's say you want to add numpy to your project but you did not compile it prior to creating your XCode project. First, ensure it is built:: - $ ./toolchain.py build numpy + $ toolchain build numpy Then, update your Xcode project:: - $ ./toolchain.py update touchtracer-ios + $ toolchain update touchtracer-ios All the libraries / frameworks necessary to run all the compiled recipes will be added to your Xcode project. @@ -135,15 +132,15 @@ commenting out this line in `main.m`:: Then you should see all the Kivy logging on the Xcode console. -How can Apple accept a python app ? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +How can Apple accept a python app? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We managed to merge the app binary with all the libraries into a single binary, called libpython. This means all binary modules are loaded beforehand, so nothing is dynamically loaded. -Have you already submited a Kivy application to the App store ? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Have you already submited a Kivy application to the App store? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Yes, check: diff --git a/kivy/animation.py b/kivy/animation.py index 83123a594..fa64b187a 100644 --- a/kivy/animation.py +++ b/kivy/animation.py @@ -84,6 +84,7 @@ already in place in the animation module. __all__ = ('Animation', 'AnimationTransition') from math import sqrt, cos, sin, pi +from collections import ChainMap from kivy.event import EventDispatcher from kivy.clock import Clock from kivy.compat import string_types, iterkeys @@ -128,7 +129,7 @@ class Animation(EventDispatcher): __events__ = ('on_start', 'on_progress', 'on_complete') def __init__(self, **kw): - super(Animation, self).__init__() + super().__init__() # Initialize self._clock_installed = False self._duration = kw.pop('d', kw.pop('duration', 1.)) @@ -391,41 +392,7 @@ class Animation(EventDispatcher): return Parallel(self, animation) -class Sequence(Animation): - - def __init__(self, anim1, anim2): - super(Sequence, self).__init__() - - #: Repeat the sequence. See 'Repeating animation' in the header - #: documentation. - self.repeat = False - - self.anim1 = anim1 - self.anim2 = anim2 - - self.anim1.bind(on_start=self.on_anim1_start, - on_progress=self.on_anim1_progress) - self.anim2.bind(on_complete=self.on_anim2_complete, - on_progress=self.on_anim2_progress) - - @property - def duration(self): - return self.anim1.duration + self.anim2.duration - - def start(self, widget): - self.stop(widget) - self._widgets[widget.uid] = True - self._register() - self.anim1.start(widget) - self.anim1.bind(on_complete=self.on_anim1_complete) - - def stop(self, widget): - self.anim1.stop(widget) - self.anim2.stop(widget) - props = self._widgets.pop(widget.uid, None) - if props: - self.dispatch('on_complete', widget) - super(Sequence, self).cancel(widget) +class CompoundAnimation(Animation): def stop_property(self, widget, prop): self.anim1.stop_property(widget, prop) @@ -437,7 +404,7 @@ class Sequence(Animation): def cancel(self, widget): self.anim1.cancel(widget) self.anim2.cancel(widget) - super(Sequence, self).cancel(widget) + super().cancel(widget) def cancel_property(self, widget, prop): '''Even if an animation is running, remove a property. It will not be @@ -455,11 +422,62 @@ class Sequence(Animation): not self.anim2.have_properties_to_animate(widget)): self.cancel(widget) - def on_anim1_start(self, instance, widget): + def have_properties_to_animate(self, widget): + return (self.anim1.have_properties_to_animate(widget) or + self.anim2.have_properties_to_animate(widget)) + + @property + def animated_properties(self): + return ChainMap({}, + self.anim2.animated_properties, + self.anim1.animated_properties) + + @property + def transition(self): + # This property is impossible to implement + raise AttributeError( + "Can't lookup transition attribute of a CompoundAnimation") + + +class Sequence(CompoundAnimation): + + def __init__(self, anim1, anim2): + super().__init__() + + #: Repeat the sequence. See 'Repeating animation' in the header + #: documentation. + self.repeat = False + + self.anim1 = anim1 + self.anim2 = anim2 + + self.anim1.bind(on_complete=self.on_anim1_complete, + on_progress=self.on_anim1_progress) + self.anim2.bind(on_complete=self.on_anim2_complete, + on_progress=self.on_anim2_progress) + + @property + def duration(self): + return self.anim1.duration + self.anim2.duration + + def stop(self, widget): + props = self._widgets.pop(widget.uid, None) + self.anim1.stop(widget) + self.anim2.stop(widget) + if props: + self.dispatch('on_complete', widget) + super().cancel(widget) + + def start(self, widget): + self.stop(widget) + self._widgets[widget.uid] = True + self._register() self.dispatch('on_start', widget) + self.anim1.start(widget) def on_anim1_complete(self, instance, widget): - self.anim1.unbind(on_complete=self.on_anim1_complete) + if widget.uid not in self._widgets: + return self.anim2.start(widget) def on_anim1_progress(self, instance, widget, progress): @@ -470,9 +488,10 @@ class Sequence(Animation): .. versionadded:: 1.7.1 ''' + if widget.uid not in self._widgets: + return if self.repeat: self.anim1.start(widget) - self.anim1.bind(on_complete=self.on_anim1_complete) else: self.dispatch('on_complete', widget) self.cancel(widget) @@ -481,10 +500,10 @@ class Sequence(Animation): self.dispatch('on_progress', widget, .5 + progress / 2.) -class Parallel(Animation): +class Parallel(CompoundAnimation): def __init__(self, anim1, anim2): - super(Parallel, self).__init__() + super().__init__() self.anim1 = anim1 self.anim2 = anim2 @@ -495,6 +514,13 @@ class Parallel(Animation): def duration(self): return max(self.anim1.duration, self.anim2.duration) + def stop(self, widget): + self.anim1.stop(widget) + self.anim2.stop(widget) + if self._widgets.pop(widget.uid, None): + self.dispatch('on_complete', widget) + super().cancel(widget) + def start(self, widget): self.stop(widget) self.anim1.start(widget) @@ -503,33 +529,13 @@ class Parallel(Animation): self._register() self.dispatch('on_start', widget) - def stop(self, widget): - self.anim1.stop(widget) - self.anim2.stop(widget) - props = self._widgets.pop(widget.uid, None) - if props: - self.dispatch('on_complete', widget) - super(Parallel, self).cancel(widget) - - def stop_property(self, widget, prop): - self.anim1.stop_property(widget, prop) - self.anim2.stop_property(widget, prop) - if (not self.anim1.have_properties_to_animate(widget) and - not self.anim2.have_properties_to_animate(widget)): - self.stop(widget) - - def cancel(self, widget): - self.anim1.cancel(widget) - self.anim2.cancel(widget) - super(Parallel, self).cancel(widget) - def on_anim_complete(self, instance, widget): self._widgets[widget.uid]['complete'] += 1 if self._widgets[widget.uid]['complete'] == 2: self.stop(widget) -class AnimationTransition(object): +class AnimationTransition: '''Collection of animation functions to be used with the Animation object. Easing Functions ported to Kivy from the Clutter Project https://developer.gnome.org/clutter/stable/ClutterAlpha.html diff --git a/kivy/input/providers/hidinput.py b/kivy/input/providers/hidinput.py index 7259d4d9f..42435e27c 100644 --- a/kivy/input/providers/hidinput.py +++ b/kivy/input/providers/hidinput.py @@ -21,10 +21,14 @@ To fix that, you can add these options to the argument line: * invert_x : 1 to invert X axis * invert_y : 1 to invert Y axis -* min_position_x : X minimum -* max_position_x : X maximum -* min_position_y : Y minimum -* max_position_y : Y maximum +* min_position_x : X relative minimum +* max_position_x : X relative maximum +* min_position_y : Y relative minimum +* max_position_y : Y relative maximum +* min_abs_x : X absolute minimum +* min_abs_y : Y absolute minimum +* max_abs_x : X absolute maximum +* max_abs_y : Y absolute maximum * min_pressure : pressure minimum * max_pressure : pressure maximum * rotation : rotate the input coordinate (0, 90, 180, 270)
kivy.animation.Sequence sends "on_complete" event while animation is still in progress ### Versions * Python: 2.7 * OS: Linux Ubuntu 16.04 * Kivy: 1.10.1.dev0 * Kivy installation method: git clone and python setup.py install ### Description The program uses Animation to pulse a Widget color from black to red and back again. When the animation completes, the Widget is supposed to be black, but it is red. Could be a canvas update problem, could be an animation bug. ### Code and Logs ```python from kivy.app import App from kivy.uix.widget import Widget from kivy.uix.button import Button from kivy.uix.boxlayout import BoxLayout from kivy.graphics import Color, Line, Rectangle from kivy.animation import Animation class Blinker(Widget): def __init__(self, *args, **kwargs): super(Blinker, self).__init__(*args, **kwargs) self.color = None self.animation = None self.draw() self.bind(size=self.draw, pos=self.draw, parent=self.draw) def draw(self, *args): self.canvas.clear() with self.canvas: self.color = Color(0, 0, 0, 1) Rectangle(pos=self.pos, size=self.size) def repaint(self, *args): if self.color is not None: self.color.rgba = (0, 0, 0, 1) self.canvas.ask_update() print 'color.rgba is', self.color.rgba def start(self, *args): if self.color is not None: self.animation = Animation(r=1, duration=0.5) + Animation(r=0, duration=0.5) self.animation.repeat = True self.animation.bind(on_complete=self.repaint) self.animation.start(self.color) def stop(self, *args): if self.animation is not None: self.animation.stop(self.color) self.animation = None class A(App): def stop_start_animation(self, *args): if self.blinker.animation is not None: self.blinker.stop() self.button.text = "Start" else: self.blinker.start() self.button.text = "Stop" def build(self): top = BoxLayout(orientation="horizontal") self.button = (Button(on_press=self.stop_start_animation, text="Start")) top.add_widget(self.button) self.blinker = Blinker() top.add_widget(self.blinker) return top if __name__ == "__main__": A().run() ``` Logs: ``` (testkivy) wjanssen@archie:/local/kivy-1.10$ python ~/projects/cbmx/platform/t.py [INFO ] [Logger ] Record log in /home/wjanssen/.kivy/logs/kivy_17-05-31_34.txt [WARNING] [Modules ] Module <clipboard> not found [WARNING] [Modules ] Module <cutbuffer> not found [INFO ] [Factory ] 194 symbols loaded [INFO ] [Image ] Providers: img_tex, img_dds, img_sdl2, img_pil, img_gif (img_ffpyplayer ignored) [INFO ] [Text ] Provider: sdl2 [INFO ] [Kivy ] v1.10.1.dev0, git-b7906e7, 20170531 [INFO ] [Python ] v2.7.13 |Continuum Analytics, Inc.| (default, Dec 20 2016, 23:09:15) [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] [INFO ] [OSC ] using <multiprocessing> for socket [INFO ] [Window ] Provider: sdl2(['window_egl_rpi'] ignored) [INFO ] [GL ] Using the "OpenGL" graphics system [INFO ] [GL ] Backend used <gl> [INFO ] [GL ] OpenGL version <3.0 Mesa 12.0.6> [INFO ] [GL ] OpenGL vendor <Intel Open Source Technology Center> [INFO ] [GL ] OpenGL renderer <Mesa DRI Intel(R) HD Graphics 5500 (Broadwell GT2) > [INFO ] [GL ] OpenGL parsed version: 3, 0 [INFO ] [GL ] Shading version <1.30> [INFO ] [GL ] Texture max size <8192> [INFO ] [GL ] Texture max units <32> [INFO ] [Window ] auto add sdl2 input provider [WARNING] [Modules ] Module <clipboard> not found [WARNING] [Modules ] Module <cutbuffer> not found [INFO ] [Window ] virtual keyboard not allowed, single mode, not docked [INFO ] [Base ] Start application main loop [INFO ] [GL ] NPOT texture support is available color.rgba is [0.0, 0.0, 0.0, 1.0] color.rgba is [0.0, 0.0, 0.0, 1.0] color.rgba is [0.0, 0.0, 0.0, 1.0] [INFO ] [Base ] Leaving application in progress... ```
kivy/kivy
diff --git a/kivy/tests/test_animations.py b/kivy/tests/test_animations.py index 222297cc1..e6e09d48b 100644 --- a/kivy/tests/test_animations.py +++ b/kivy/tests/test_animations.py @@ -2,139 +2,384 @@ Animations tests ================ ''' +import pytest -import unittest -import gc -from time import time, sleep -from kivy.animation import Animation, AnimationTransition -from kivy.uix.widget import Widget -from kivy.clock import Clock -from kivy.graphics import Scale -from kivy.weakproxy import WeakProxy [email protected](scope='module') +def ec_cls(): + class EventCounter: + def __init__(self, anim): + self.n_start = 0 + self.n_progress = 0 + self.n_complete = 0 + anim.bind(on_start=self.on_start, + on_progress=self.on_progress, + on_complete=self.on_complete) -class AnimationTestCase(unittest.TestCase): - def sleep(self, t): - start = time() - while time() < start + t: - sleep(.01) - Clock.tick() + def on_start(self, anim, widget): + self.n_start += 1 - def setUp(self): - self.assertEqual(len(Animation._instances), 0) - self.a = Animation(x=100, d=1, t='out_bounce') - self.w = Widget() + def on_progress(self, anim, widget, progress): + self.n_progress += 1 - def tearDown(self): - self.assertEqual(len(Animation._instances), 0) + def on_complete(self, anim, widget): + self.n_complete += 1 + + def assert_(self, n_start, n_progress_greater_than_zero, n_complete): + assert self.n_start == n_start + if n_progress_greater_than_zero: + assert self.n_progress > 0 + else: + assert self.n_progress == 0 + assert self.n_complete == n_complete + return EventCounter + + [email protected](autouse=True) +def cleanup(): + from kivy.animation import Animation + for anim in Animation._instances.copy(): + anim.cancel() + Animation._instances.clear() + + +def no_animations_being_played(): + from kivy.animation import Animation + return len(Animation._instances) == 0 + + +def sleep(t): + from time import time, sleep + from kivy.clock import Clock + tick = Clock.tick + start = time() + while time() < start + t: + sleep(.01) + tick() + + +class TestAnimation: def test_start_animation(self): - self.a.start(self.w) - self.sleep(1.5) - self.assertAlmostEqual(self.w.x, 100) + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100, d=1) + w = Widget() + a.start(w) + sleep(1.5) + assert w.x == pytest.approx(100) + assert no_animations_being_played() def test_animation_duration_0(self): + from kivy.animation import Animation + from kivy.uix.widget import Widget a = Animation(x=100, d=0) - a.start(self.w) - self.sleep(.5) + w = Widget() + a.start(w) + sleep(.5) + assert no_animations_being_played() def test_stop_animation(self): - self.a.start(self.w) - self.sleep(.5) - self.a.stop(self.w) - self.assertNotAlmostEqual(self.w.x, 100) - self.assertNotAlmostEqual(self.w.x, 0) + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100, d=1) + w = Widget() + a.start(w) + sleep(.5) + a.stop(w) + assert w.x != pytest.approx(100) + assert w.x != pytest.approx(0) + assert no_animations_being_played() def test_stop_all(self): - self.a.start(self.w) - self.sleep(.5) - Animation.stop_all(self.w) + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100, d=1) + w = Widget() + a.start(w) + sleep(.5) + Animation.stop_all(w) + assert no_animations_being_played() def test_stop_all_2(self): - self.a.start(self.w) - self.sleep(.5) - Animation.stop_all(self.w, 'x') + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100, d=1) + w = Widget() + a.start(w) + sleep(.5) + Animation.stop_all(w, 'x') + assert no_animations_being_played() def test_duration(self): - self.assertEqual(self.a.duration, 1) + from kivy.animation import Animation + a = Animation(x=100, d=1) + assert a.duration == 1 def test_transition(self): - self.assertEqual(self.a.transition, AnimationTransition.out_bounce) + from kivy.animation import Animation, AnimationTransition + a = Animation(x=100, t='out_bounce') + assert a.transition is AnimationTransition.out_bounce def test_animated_properties(self): - self.assertEqual(self.a.animated_properties['x'], 100) + from kivy.animation import Animation + a = Animation(x=100) + assert a.animated_properties == {'x': 100, } def test_animated_instruction(self): + from kivy.graphics import Scale + from kivy.animation import Animation + a = Animation(x=100, d=1) instruction = Scale(3) - self.a.start(instruction) - self.assertEqual(self.a.animated_properties['x'], 100) - self.assertAlmostEqual(instruction.x, 3) - self.sleep(1.5) - self.assertAlmostEqual(instruction.x, 100) + a.start(instruction) + assert a.animated_properties == {'x': 100, } + assert instruction.x == pytest.approx(3) + sleep(1.5) + assert instruction.x == pytest.approx(100) + assert no_animations_being_played() def test_weakref(self): - widget = Widget() - anim = Animation(x=100) - anim.start(widget.proxy_ref) - del widget + import gc + from kivy.animation import Animation + from kivy.uix.widget import Widget + w = Widget() + a = Animation(x=100) + a.start(w.proxy_ref) + del w gc.collect() try: - self.sleep(1.) + sleep(1.) except ReferenceError: pass + assert no_animations_being_played() -class SequentialAnimationTestCase(unittest.TestCase): - - def sleep(self, t): - start = time() - while time() < start + t: - sleep(.01) - Clock.tick() - - def setUp(self): - self.assertEqual(len(Animation._instances), 0) - self.a = Animation(x=100, d=1, t='out_bounce') - self.a += Animation(x=0, d=1, t='out_bounce') - self.w = Widget() - - def tearDown(self): - self.assertEqual(len(Animation._instances), 0) +class TestSequence: def test_cancel_all(self): - self.a.start(self.w) - self.sleep(.5) - Animation.cancel_all(self.w) + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100) + Animation(x=0) + w = Widget() + a.start(w) + sleep(.5) + Animation.cancel_all(w) + assert no_animations_being_played() def test_cancel_all_2(self): - self.a.start(self.w) - self.sleep(.5) - Animation.cancel_all(self.w, 'x') + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100) + Animation(x=0) + w = Widget() + a.start(w) + sleep(.5) + Animation.cancel_all(w, 'x') + assert no_animations_being_played() def test_stop_all(self): - self.a.start(self.w) - self.sleep(.5) - Animation.stop_all(self.w) + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100) + Animation(x=0) + w = Widget() + a.start(w) + sleep(.5) + Animation.stop_all(w) + assert no_animations_being_played() def test_stop_all_2(self): - self.a.start(self.w) - self.sleep(.5) - Animation.stop_all(self.w, 'x') - - def _test_on_progress(self, anim, widget, progress): - self._on_progress_called = True - - def _test_on_complete(self, anim, widget): - self._on_complete_called = True - - def test_events(self): - self._on_progress_called = False - self._on_complete_called = False - self.a.bind(on_progress=self._test_on_progress, - on_complete=self._test_on_complete) - self.a.start(self.w) - self.sleep(.5) - self.assertTrue(self._on_progress_called) - self.sleep(2) - self.assertTrue(self._on_progress_called) - self.assertTrue(self._on_complete_called) + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100) + Animation(x=0) + w = Widget() + a.start(w) + sleep(.5) + Animation.stop_all(w, 'x') + assert no_animations_being_played() + + def test_count_events(self, ec_cls): + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100, d=.5) + Animation(x=0, d=.5) + w = Widget() + ec = ec_cls(a) + ec1 = ec_cls(a.anim1) + ec2 = ec_cls(a.anim2) + a.start(w) + + # right after the animation starts + ec.assert_(1, False, 0) + ec1.assert_(1, False, 0) + ec2.assert_(0, False, 0) + sleep(.2) + + # during the first half of the animation + ec.assert_(1, True, 0) + ec1.assert_(1, True, 0) + ec2.assert_(0, False, 0) + sleep(.5) + + # during the second half of the animation + ec.assert_(1, True, 0) + ec1.assert_(1, True, 1) + ec2.assert_(1, True, 0) + sleep(.5) + + # after the animation completed + ec.assert_(1, True, 1) + ec1.assert_(1, True, 1) + ec2.assert_(1, True, 1) + assert no_animations_being_played() + + def test_have_properties_to_animate(self): + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100) + Animation(x=0) + w = Widget() + assert not a.have_properties_to_animate(w) + a.start(w) + assert a.have_properties_to_animate(w) + a.stop(w) + assert not a.have_properties_to_animate(w) + assert no_animations_being_played() + + def test_animated_properties(self): + from kivy.animation import Animation + a = Animation(x=100, y=200) + Animation(x=0) + assert a.animated_properties == {'x': 0, 'y': 200, } + + def test_transition(self): + from kivy.animation import Animation + a = Animation(x=100) + Animation(x=0) + with pytest.raises(AttributeError): + a.transition + + +class TestRepetitiveSequence: + + def test_stop(self): + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100) + Animation(x=0) + a.repeat = True + w = Widget() + a.start(w) + a.stop(w) + assert no_animations_being_played() + + def test_count_events(self, ec_cls): + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100, d=.5) + Animation(x=0, d=.5) + a.repeat = True + w = Widget() + ec = ec_cls(a) + ec1 = ec_cls(a.anim1) + ec2 = ec_cls(a.anim2) + a.start(w) + + # right after the animation starts + ec.assert_(1, False, 0) + ec1.assert_(1, False, 0) + ec2.assert_(0, False, 0) + sleep(.2) + + # during the first half of the first round of the animation + ec.assert_(1, True, 0) + ec1.assert_(1, True, 0) + ec2.assert_(0, False, 0) + sleep(.5) + + # during the second half of the first round of the animation + ec.assert_(1, True, 0) + ec1.assert_(1, True, 1) + ec2.assert_(1, True, 0) + sleep(.5) + + # during the first half of the second round of the animation + ec.assert_(1, True, 0) + ec1.assert_(2, True, 1) + ec2.assert_(1, True, 1) + sleep(.5) + + # during the second half of the second round of the animation + ec.assert_(1, True, 0) + ec1.assert_(2, True, 2) + ec2.assert_(2, True, 1) + a.stop(w) + + # after the animation stopped + ec.assert_(1, True, 1) + ec1.assert_(2, True, 2) + ec2.assert_(2, True, 2) + assert no_animations_being_played() + + +class TestParallel: + + def test_have_properties_to_animate(self): + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100) & Animation(y=100) + w = Widget() + assert not a.have_properties_to_animate(w) + a.start(w) + assert a.have_properties_to_animate(w) + a.stop(w) + assert not a.have_properties_to_animate(w) + assert no_animations_being_played() + + def test_cancel_property(self): + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100) & Animation(y=100) + w = Widget() + a.start(w) + a.cancel_property(w, 'x') + assert not no_animations_being_played() + a.stop(w) + assert no_animations_being_played() + + def test_animated_properties(self): + from kivy.animation import Animation + a = Animation(x=100) & Animation(y=100) + assert a.animated_properties == {'x': 100, 'y': 100, } + + def test_transition(self): + from kivy.animation import Animation + a = Animation(x=100) & Animation(y=100) + with pytest.raises(AttributeError): + a.transition + + def test_count_events(self, ec_cls): + from kivy.animation import Animation + from kivy.uix.widget import Widget + a = Animation(x=100) & Animation(y=100, d=.5) + w = Widget() + ec = ec_cls(a) + ec1 = ec_cls(a.anim1) + ec2 = ec_cls(a.anim2) + a.start(w) + + # right after the animation started + ec.assert_(1, False, 0) + ec1.assert_(1, False, 0) + ec2.assert_(1, False, 0) + sleep(.2) + + # during the first half of the animation + ec.assert_(1, False, 0) # n_progress is still 0 !! + ec1.assert_(1, True, 0) + ec2.assert_(1, True, 0) + sleep(.5) + + # during the second half of the animation + ec.assert_(1, False, 0) # n_progress is still 0 !! + ec1.assert_(1, True, 0) + ec2.assert_(1, True, 1) + sleep(.5) + + # after the animation compeleted + ec.assert_(1, False, 1) # n_progress is still 0 ! + ec1.assert_(1, True, 1) + ec2.assert_(1, True, 1) + assert no_animations_being_played()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 4 }
2.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev,full]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y libsdl2-dev libsdl2-ttf-dev libsdl2-image-dev libsdl2-mixer-dev libgstreamer1.0-dev gstreamer1.0-alsa gstreamer1.0-plugins-base libsmpeg-dev libswscale-dev libavformat-dev libavcodec-dev libjpeg-dev libtiff5-dev libx11-dev libmtdev-dev build-essential libgl1-mesa-dev libgles2-mesa-dev xvfb pulseaudio xsel" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
actdiag==3.0.0 alabaster==0.7.16 altgraph==0.17.4 babel==2.17.0 blockdiag==3.0.0 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 docutils==0.21.2 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work execnet==2.1.1 flake8==7.2.0 funcparserlib==2.0.0a0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work Jinja2==3.1.6 -e git+https://github.com/kivy/kivy.git@5cfa5bf6edeaaa6b3fc3948d76bb6d356e3d0150#egg=Kivy Kivy-Garden==0.1.5 MarkupSafe==3.0.2 mccabe==0.7.0 nwdiag==3.0.0 packaging @ file:///croot/packaging_1734472117206/work pillow==11.1.0 pluggy @ file:///croot/pluggy_1733169602837/work pycodestyle==2.13.0 pyflakes==3.3.2 Pygments==2.19.1 pyinstaller==6.12.0 pyinstaller-hooks-contrib==2025.2 pytest @ file:///croot/pytest_1738938843180/work pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-timeout==2.3.1 pytest-xdist==3.6.1 requests==2.32.3 seqdiag==3.0.0 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinxcontrib-actdiag==3.0.0 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-blockdiag==3.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-nwdiag==2.0.0 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-seqdiag==3.0.0 sphinxcontrib-serializinghtml==2.0.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions==4.13.0 urllib3==2.3.0 webcolors==24.11.1 zipp==3.21.0
name: kivy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - actdiag==3.0.0 - alabaster==0.7.16 - altgraph==0.17.4 - babel==2.17.0 - blockdiag==3.0.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - docutils==0.21.2 - execnet==2.1.1 - flake8==7.2.0 - funcparserlib==2.0.0a0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - jinja2==3.1.6 - kivy==2.0.0rc2 - kivy-garden==0.1.5 - markupsafe==3.0.2 - mccabe==0.7.0 - nwdiag==3.0.0 - pillow==11.1.0 - pycodestyle==2.13.0 - pyflakes==3.3.2 - pygments==2.19.1 - pyinstaller==6.12.0 - pyinstaller-hooks-contrib==2025.2 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-timeout==2.3.1 - pytest-xdist==3.6.1 - requests==2.32.3 - seqdiag==3.0.0 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinxcontrib-actdiag==3.0.0 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-blockdiag==3.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-nwdiag==2.0.0 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-seqdiag==3.0.0 - sphinxcontrib-serializinghtml==2.0.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - webcolors==24.11.1 - zipp==3.21.0 prefix: /opt/conda/envs/kivy
[ "kivy/tests/test_animations.py::TestSequence::test_animated_properties", "kivy/tests/test_animations.py::TestSequence::test_transition", "kivy/tests/test_animations.py::TestParallel::test_animated_properties", "kivy/tests/test_animations.py::TestParallel::test_transition" ]
[ "kivy/tests/test_animations.py::TestAnimation::test_start_animation", "kivy/tests/test_animations.py::TestAnimation::test_animation_duration_0", "kivy/tests/test_animations.py::TestAnimation::test_stop_animation", "kivy/tests/test_animations.py::TestAnimation::test_stop_all", "kivy/tests/test_animations.py::TestAnimation::test_stop_all_2", "kivy/tests/test_animations.py::TestAnimation::test_weakref", "kivy/tests/test_animations.py::TestSequence::test_cancel_all", "kivy/tests/test_animations.py::TestSequence::test_cancel_all_2", "kivy/tests/test_animations.py::TestSequence::test_stop_all", "kivy/tests/test_animations.py::TestSequence::test_stop_all_2", "kivy/tests/test_animations.py::TestSequence::test_count_events", "kivy/tests/test_animations.py::TestSequence::test_have_properties_to_animate", "kivy/tests/test_animations.py::TestRepetitiveSequence::test_stop", "kivy/tests/test_animations.py::TestRepetitiveSequence::test_count_events", "kivy/tests/test_animations.py::TestParallel::test_have_properties_to_animate", "kivy/tests/test_animations.py::TestParallel::test_cancel_property", "kivy/tests/test_animations.py::TestParallel::test_count_events" ]
[ "kivy/tests/test_animations.py::TestAnimation::test_duration", "kivy/tests/test_animations.py::TestAnimation::test_transition", "kivy/tests/test_animations.py::TestAnimation::test_animated_properties", "kivy/tests/test_animations.py::TestAnimation::test_animated_instruction" ]
[]
MIT License
3,001
[ "doc/sources/guide/packaging-ios-prerequisites.rst", "kivy/animation.py", "doc/sources/guide/packaging-ios.rst", "kivy/input/providers/hidinput.py" ]
[ "doc/sources/guide/packaging-ios-prerequisites.rst", "kivy/animation.py", "doc/sources/guide/packaging-ios.rst", "kivy/input/providers/hidinput.py" ]
zopefoundation__zope.schema-51
b6625652d6bdd0d3c6a6568bd1c9f592a2013ec6
2018-08-31 13:12:48
0a719f2ded189630a0a77e9292a66a3662c6512c
diff --git a/CHANGES.rst b/CHANGES.rst index d83b78b..500e178 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -30,6 +30,16 @@ compatibility problem. See `issue 36 <https://github.com/zopefoundation/zope.schema/issues/36>`_. +- ``Field`` instances are only equal when their ``.interface`` is + equal. In practice, this means that two otherwise identical fields + of separate schemas are not equal, do not hash the same, and can + both be members of the same ``dict`` or ``set``. Prior to this + release, when hashing was identity based but only worked on Python + 2, that was the typical behaviour. (Field objects that are *not* + members of a schema continue to compare and hash equal if they have + the same attributes and interfaces.) See `issue 40 + <https://github.com/zopefoundation/zope.schema/issues/40>`_. + - Orderable fields, including ``Int``, ``Float``, ``Decimal``, ``Timedelta``, ``Date`` and ``Time``, can now have a ``missing_value`` without needing to specify concrete ``min`` and diff --git a/src/zope/schema/_bootstrapfields.py b/src/zope/schema/_bootstrapfields.py index 6fee839..6d613b4 100644 --- a/src/zope/schema/_bootstrapfields.py +++ b/src/zope/schema/_bootstrapfields.py @@ -119,7 +119,7 @@ class Field(Attribute): default = DefaultProperty('default') # These were declared as slots in zope.interface, we override them here to - # get rid of the dedcriptors so they don't break .bind() + # get rid of the descriptors so they don't break .bind() __name__ = None interface = None _Element__tagged_values = None @@ -206,21 +206,24 @@ class Field(Attribute): names.update(getFields(interface)) # order will be different always, don't compare it - if 'order' in names: - del names['order'] + names.pop('order', None) return names def __hash__(self): # Equal objects should have equal hashes; # equal hashes does not imply equal objects. - value = (type(self),) + tuple(self.__get_property_names_to_compare()) + value = (type(self), self.interface) + tuple(self.__get_property_names_to_compare()) return hash(value) def __eq__(self, other): - # should be the same type - if type(self) != type(other): + # should be the same type and in the same interface (or no interface at all) + if self is other: + return True + + if type(self) != type(other) or self.interface != other.interface: return False + # should have the same properties names = self.__get_property_names_to_compare() # XXX: What about the property names of the other object? Even
Should Field objects include their owning `interface` in equality/hashing? Currently they don't include that, leaving open the possibility that two different Field objects from two different interfaces are equal, which may or may not be the intent. ```pycon >>> from zope.interface import Interface >>> from zope.schema import Int >>> class IOne(Interface): ... one = Int(title=u'title') ... >>> class ITwo(Interface): ... two = Int(title=u'title') ... >>> IOne['one'] == ITwo['two'] True ```
zopefoundation/zope.schema
diff --git a/src/zope/schema/tests/test__bootstrapfields.py b/src/zope/schema/tests/test__bootstrapfields.py index 3355a1e..3e6365a 100644 --- a/src/zope/schema/tests/test__bootstrapfields.py +++ b/src/zope/schema/tests/test__bootstrapfields.py @@ -142,7 +142,88 @@ class DefaultPropertyTests(unittest.TestCase): self.assertEqual(_called_with, [inst.context]) -class FieldTests(unittest.TestCase): +class EqualityTestsMixin(object): + + def _getTargetClass(self): + raise NotImplementedError + + def _makeOne(self, *args, **kw): + return self._getTargetClass()(*args, **kw) + + def test_is_hashable(self): + field = self._makeOne() + hash(field) # doesn't raise + + def test_equal_instances_have_same_hash(self): + # Equal objects should have equal hashes + field1 = self._makeOne() + field2 = self._makeOne() + self.assertIsNot(field1, field2) + self.assertEqual(field1, field2) + self.assertEqual(hash(field1), hash(field2)) + + def test_instances_in_different_interfaces_not_equal(self): + from zope import interface + + field1 = self._makeOne() + field2 = self._makeOne() + self.assertEqual(field1, field2) + self.assertEqual(hash(field1), hash(field2)) + + class IOne(interface.Interface): + one = field1 + + class ITwo(interface.Interface): + two = field2 + + self.assertEqual(field1, field1) + self.assertEqual(field2, field2) + self.assertNotEqual(field1, field2) + self.assertNotEqual(hash(field1), hash(field2)) + + def test_hash_across_unequal_instances(self): + # Hash equality does not imply equal objects. + # Our implementation only considers property names, + # not values. That's OK, a dict still does the right thing. + field1 = self._makeOne(title=u'foo') + field2 = self._makeOne(title=u'bar') + self.assertIsNot(field1, field2) + self.assertNotEqual(field1, field2) + self.assertEqual(hash(field1), hash(field2)) + + d = {field1: 42} + self.assertIn(field1, d) + self.assertEqual(42, d[field1]) + self.assertNotIn(field2, d) + with self.assertRaises(KeyError): + d.__getitem__(field2) + + def test___eq___different_type(self): + left = self._makeOne() + + class Derived(self._getTargetClass()): + pass + right = Derived() + self.assertNotEqual(left, right) + self.assertTrue(left != right) + + def test___eq___same_type_different_attrs(self): + left = self._makeOne(required=True) + right = self._makeOne(required=False) + self.assertNotEqual(left, right) + self.assertTrue(left != right) + + def test___eq___same_type_same_attrs(self): + left = self._makeOne() + self.assertEqual(left, left) + + right = self._makeOne() + self.assertEqual(left, right) + self.assertFalse(left != right) + + +class FieldTests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._bootstrapfields import Field @@ -304,27 +385,6 @@ class FieldTests(unittest.TestCase): field._type = int field.validate(1) # doesn't raise - def test___eq___different_type(self): - left = self._makeOne() - - class Derived(self._getTargetClass()): - pass - right = Derived() - self.assertEqual(left == right, False) - self.assertEqual(left != right, True) - - def test___eq___same_type_different_attrs(self): - left = self._makeOne(required=True) - right = self._makeOne(required=False) - self.assertEqual(left == right, False) - self.assertEqual(left != right, True) - - def test___eq___same_type_same_attrs(self): - left = self._makeOne() - right = self._makeOne() - self.assertEqual(left == right, True) - self.assertEqual(left != right, False) - def test_get_miss(self): field = self._makeOne(__name__='nonesuch') inst = DummyInst() @@ -364,44 +424,14 @@ class FieldTests(unittest.TestCase): field.set(inst, 'AFTER') self.assertEqual(inst.extant, 'AFTER') - def test_is_hashable(self): - field = self._makeOne() - hash(field) # doesn't raise - def test_equal_instances_have_same_hash(self): - # Equal objects should have equal hashes - field1 = self._makeOne() - field2 = self._makeOne() - self.assertIsNot(field1, field2) - self.assertEqual(field1, field2) - self.assertEqual(hash(field1), hash(field2)) - - def test_hash_across_unequal_instances(self): - # Hash equality does not imply equal objects. - # Our implementation only considers property names, - # not values. That's OK, a dict still does the right thing. - field1 = self._makeOne(title=u'foo') - field2 = self._makeOne(title=u'bar') - self.assertIsNot(field1, field2) - self.assertNotEqual(field1, field2) - self.assertEqual(hash(field1), hash(field2)) - - d = {field1: 42} - self.assertIn(field1, d) - self.assertEqual(42, d[field1]) - self.assertNotIn(field2, d) - with self.assertRaises(KeyError): - d[field2] - -class ContainerTests(unittest.TestCase): +class ContainerTests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._bootstrapfields import Container return Container - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - def test_validate_not_required(self): field = self._makeOne(required=False) field.validate(None) @@ -529,15 +559,13 @@ class MinMaxLenTests(unittest.TestCase): self.assertRaises(TooLong, mml._validate, (0, 1, 2)) -class TextTests(unittest.TestCase): +class TextTests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._bootstrapfields import Text return Text - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - def test_ctor_defaults(self): from zope.schema._compat import text_type txt = self._makeOne() @@ -593,15 +621,13 @@ class TextTests(unittest.TestCase): self.assertEqual(txt.fromUnicode(deadbeef), deadbeef) -class TextLineTests(unittest.TestCase): +class TextLineTests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._field import TextLine return TextLine - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - def test_class_conforms_to_ITextLine(self): from zope.interface.verify import verifyClass from zope.schema.interfaces import ITextLine @@ -711,15 +737,13 @@ class PasswordTests(unittest.TestCase): self.assertEqual(field.constraint(u'abc\ndef'), False) -class BoolTests(unittest.TestCase): +class BoolTests(EqualityTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._bootstrapfields import Bool return Bool - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - def test_ctor_defaults(self): txt = self._makeOne() self.assertEqual(txt._type, bool) @@ -774,16 +798,14 @@ class OrderableMissingValueMixin(object): self.assertEqual(self.mvm_missing_value, field.missing_value) -class IntTests(OrderableMissingValueMixin, +class IntTests(EqualityTestsMixin, + OrderableMissingValueMixin, unittest.TestCase): def _getTargetClass(self): from zope.schema._bootstrapfields import Int return Int - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - def test_ctor_defaults(self): from zope.schema._compat import integer_types txt = self._makeOne()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 2 }
4.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0 zope.event==4.6 zope.exceptions==4.6 zope.interface==5.5.2 -e git+https://github.com/zopefoundation/zope.schema.git@b6625652d6bdd0d3c6a6568bd1c9f592a2013ec6#egg=zope.schema zope.testing==5.0.1 zope.testrunner==5.6
name: zope.schema channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 - zope-event==4.6 - zope-exceptions==4.6 - zope-interface==5.5.2 - zope-testing==5.0.1 - zope-testrunner==5.6 prefix: /opt/conda/envs/zope.schema
[ "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_instances_in_different_interfaces_not_equal" ]
[]
[ "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___get__", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___not_missing_w_check", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___not_missing_wo_check", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___w_missing_wo_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___w_defaultFactory_not_ICAF_no_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___w_defaultFactory_w_ICAF_w_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___wo_defaultFactory_hit", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___wo_defaultFactory_miss", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test__get___wo_defaultFactory_in_dict", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_bind", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_order_madness", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_w_both_title_and_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_w_title_wo_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_wo_title_w_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_constraint_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_defaultFactory", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_defaultFactory_returning_missing_value", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_required_readonly_missingValue", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_get_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_get_miss", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_miss_no_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_miss_w_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_set_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_set_readonly", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_fails", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_raises_StopValidation", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_missing_and_required", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_missing_not_required", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_wrong_type", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_collection_but_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_not_collection_but_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_not_collection_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_w_collections", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_collection_but_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_not_collection_but_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_not_collection_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_w_collections", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_default_too_large", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_default_too_small", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_validate_too_long", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_validate_too_short", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_w_invalid_default", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_wrong_types", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_class_conforms_to_ITextLine", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_constraint", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_instance_conforms_to_ITextLine", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_wrong_types", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_constraint", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_set_normal", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_set_unchanged", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_unchanged_already_set", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_unchanged_not_already_set", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test__validate_w_int", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_set_w_int", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_required" ]
[]
Zope Public License 2.1
3,002
[ "src/zope/schema/_bootstrapfields.py", "CHANGES.rst" ]
[ "src/zope/schema/_bootstrapfields.py", "CHANGES.rst" ]
python-pillow__Pillow-3324
5af867df4a7a13a924590af49d24030225b5c93e
2018-08-31 22:27:25
78c8b1f341919a4f7e19e29056713d8f738c9c88
diff --git a/src/PIL/GifImagePlugin.py b/src/PIL/GifImagePlugin.py index fec2f7663..645bb26bf 100644 --- a/src/PIL/GifImagePlugin.py +++ b/src/PIL/GifImagePlugin.py @@ -166,6 +166,7 @@ class GifImageFile(ImageFile.ImageFile): from copy import copy self.palette = copy(self.global_palette) + info = {} while True: s = self.fp.read(1) @@ -184,8 +185,8 @@ class GifImageFile(ImageFile.ImageFile): # flags = i8(block[0]) if flags & 1: - self.info["transparency"] = i8(block[3]) - self.info["duration"] = i16(block[1:3]) * 10 + info["transparency"] = i8(block[3]) + info["duration"] = i16(block[1:3]) * 10 # disposal method - find the value of bits 4 - 6 dispose_bits = 0b00011100 & flags @@ -200,16 +201,16 @@ class GifImageFile(ImageFile.ImageFile): # # comment extension # - self.info["comment"] = block + info["comment"] = block elif i8(s) == 255: # # application extension # - self.info["extension"] = block, self.fp.tell() + info["extension"] = block, self.fp.tell() if block[:11] == b"NETSCAPE2.0": block = self.data() if len(block) >= 3 and i8(block[0]) == 1: - self.info["loop"] = i16(block[1:3]) + info["loop"] = i16(block[1:3]) while self.data(): pass @@ -268,6 +269,12 @@ class GifImageFile(ImageFile.ImageFile): # self.__fp = None raise EOFError + for k in ["transparency", "duration", "comment", "extension", "loop"]: + if k in info: + self.info[k] = info[k] + elif k in self.info: + del self.info[k] + self.mode = "L" if self.palette: self.mode = "P"
TypeError: object of type 'NoneType' has no len() ### TypeError: object of type 'NoneType' has no len() This image has a value "comment" is None. It raises that error. This is my PR https://github.com/python-pillow/Pillow/pull/3314 https://github.com/python-pillow/Pillow/blob/master/src/PIL/GifImagePlugin.py#L527 ```python > if "comment" in im.encoderinfo and 1 <= len(im.encoderinfo["comment"]) <= 255: E TypeError: object of type 'NoneType' has no len() ``` https://github.com/python-pillow/Pillow/blob/master/src/PIL/GifImagePlugin.py#L697 ```python def _get_global_header(im, info): """Return a list of strings representing a GIF header""" # Header Block # http://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp version = b"87a" for extensionKey in ["transparency", "duration", "loop", "comment"]: if info and extensionKey in info: if ((extensionKey == "duration" and info[extensionKey] == 0) or > (extensionKey == "comment" and not (1 <= len(info[extensionKey]) <= 255))): E TypeError: object of type 'NoneType' has no len() ``` ```python {'background': 160, 'comment': None, 'duration': 500, 'optimize': True, 'version': b'GIF89a'} ``` This code is reproducing that issue ```python from PIL import Image from io import BytesIO filename = 'test.gif' with open(filename, "rb") as fp: img = Image.open(fp) file_obj = BytesIO() print(img.info) getattr(img, "is_animated", False) print(img.info) img.save(file_obj, format="GIF") ``` ![test2](https://user-images.githubusercontent.com/9821654/44784628-f9408780-ab96-11e8-9e9b-038467b6058a.gif)
python-pillow/Pillow
diff --git a/Tests/test_file_gif.py b/Tests/test_file_gif.py index 086a0f5d0..917b36905 100644 --- a/Tests/test_file_gif.py +++ b/Tests/test_file_gif.py @@ -134,13 +134,15 @@ class TestFileGif(PillowTestCase): # Multiframe image im = Image.open("Tests/images/dispose_bgnd.gif") + info = im.info.copy() + out = self.tempfile('temp.gif') im.save(out, save_all=True) reread = Image.open(out) for header in important_headers: self.assertEqual( - im.info[header], + info[header], reread.info[header] ) @@ -207,6 +209,15 @@ class TestFileGif(PillowTestCase): except EOFError: self.assertEqual(framecount, 5) + def test_seek_info(self): + im = Image.open("Tests/images/iss634.gif") + info = im.info.copy() + + im.seek(1) + im.seek(0) + + self.assertEqual(im.info, info) + def test_n_frames(self): for path, n_frames in [ [TEST_GIF, 1],
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 1 }
5.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": [ "apt-get update", "apt-get install -y gcc libjpeg-dev zlib1g-dev libtiff5-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev libharfbuzz-dev libfribidi-dev libxcb1-dev" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 babel==2.17.0 blessed==1.20.0 build==1.2.2.post1 certifi==2025.1.31 charset-normalizer==3.4.1 check-manifest==0.50 cov-core==1.15.0 coverage==7.8.0 coveralls==4.0.1 docopt==0.6.2 docutils==0.21.2 exceptiongroup==1.2.2 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 jarn.viewdoc==2.7 Jinja2==3.1.6 MarkupSafe==3.0.2 olefile==0.47 packaging==24.2 -e git+https://github.com/python-pillow/Pillow.git@5af867df4a7a13a924590af49d24030225b5c93e#egg=Pillow pluggy==1.5.0 pycodestyle==2.13.0 pyflakes==3.3.1 Pygments==2.19.1 pyproject_hooks==1.2.0 pyroma==4.2 pytest==8.3.5 pytest-cov==6.0.0 pytz==2025.2 requests==2.32.3 six==1.17.0 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinx-rtd-theme==3.0.2 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 tomli==2.2.1 trove-classifiers==2025.3.19.19 urllib3==2.3.0 wcwidth==0.2.13 zipp==3.21.0
name: Pillow channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - babel==2.17.0 - blessed==1.20.0 - build==1.2.2.post1 - certifi==2025.1.31 - charset-normalizer==3.4.1 - check-manifest==0.50 - cov-core==1.15.0 - coverage==7.8.0 - coveralls==4.0.1 - docopt==0.6.2 - docutils==0.21.2 - exceptiongroup==1.2.2 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jarn-viewdoc==2.7 - jinja2==3.1.6 - markupsafe==3.0.2 - olefile==0.47 - packaging==24.2 - pluggy==1.5.0 - pycodestyle==2.13.0 - pyflakes==3.3.1 - pygments==2.19.1 - pyproject-hooks==1.2.0 - pyroma==4.2 - pytest==8.3.5 - pytest-cov==6.0.0 - pytz==2025.2 - requests==2.32.3 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinx-rtd-theme==3.0.2 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - tomli==2.2.1 - trove-classifiers==2025.3.19.19 - urllib3==2.3.0 - wcwidth==0.2.13 - zipp==3.21.0 prefix: /opt/conda/envs/Pillow
[ "Tests/test_file_gif.py::TestFileGif::test_seek_info", "Tests/test_file_gif.py::TestFileGif::test_transparent_optimize", "Tests/test_file_gif.py::TestFileGif::test_version" ]
[]
[ "Tests/test_file_gif.py::TestFileGif::test_append_images", "Tests/test_file_gif.py::TestFileGif::test_background", "Tests/test_file_gif.py::TestFileGif::test_bbox", "Tests/test_file_gif.py::TestFileGif::test_comment", "Tests/test_file_gif.py::TestFileGif::test_dispose_background", "Tests/test_file_gif.py::TestFileGif::test_dispose_none", "Tests/test_file_gif.py::TestFileGif::test_dispose_previous", "Tests/test_file_gif.py::TestFileGif::test_duration", "Tests/test_file_gif.py::TestFileGif::test_eoferror", "Tests/test_file_gif.py::TestFileGif::test_getdata", "Tests/test_file_gif.py::TestFileGif::test_headers_saving_for_animated_gifs", "Tests/test_file_gif.py::TestFileGif::test_identical_frames", "Tests/test_file_gif.py::TestFileGif::test_invalid_file", "Tests/test_file_gif.py::TestFileGif::test_iss634", "Tests/test_file_gif.py::TestFileGif::test_lzw_bits", "Tests/test_file_gif.py::TestFileGif::test_multiple_duration", "Tests/test_file_gif.py::TestFileGif::test_n_frames", "Tests/test_file_gif.py::TestFileGif::test_number_of_loops", "Tests/test_file_gif.py::TestFileGif::test_optimize", "Tests/test_file_gif.py::TestFileGif::test_optimize_correctness", "Tests/test_file_gif.py::TestFileGif::test_optimize_full_l", "Tests/test_file_gif.py::TestFileGif::test_palette_434", "Tests/test_file_gif.py::TestFileGif::test_palette_handling", "Tests/test_file_gif.py::TestFileGif::test_palette_save_ImagePalette", "Tests/test_file_gif.py::TestFileGif::test_palette_save_L", "Tests/test_file_gif.py::TestFileGif::test_palette_save_P", "Tests/test_file_gif.py::TestFileGif::test_roundtrip", "Tests/test_file_gif.py::TestFileGif::test_roundtrip2", "Tests/test_file_gif.py::TestFileGif::test_roundtrip_save_all", "Tests/test_file_gif.py::TestFileGif::test_sanity", "Tests/test_file_gif.py::TestFileGif::test_save_I", "Tests/test_file_gif.py::TestFileGif::test_save_dispose", "Tests/test_file_gif.py::TestFileGif::test_seek" ]
[]
MIT-CMU License
3,003
[ "src/PIL/GifImagePlugin.py" ]
[ "src/PIL/GifImagePlugin.py" ]
python-pillow__Pillow-3327
41954f244705b247667f1ea228e932ca6390bcd6
2018-09-01 13:24:45
78c8b1f341919a4f7e19e29056713d8f738c9c88
diff --git a/src/PIL/TgaImagePlugin.py b/src/PIL/TgaImagePlugin.py index 57b6ae2c8..02893e837 100644 --- a/src/PIL/TgaImagePlugin.py +++ b/src/PIL/TgaImagePlugin.py @@ -20,6 +20,8 @@ from . import Image, ImageFile, ImagePalette from ._binary import i8, i16le as i16, o8, o16le as o16 +import warnings + __version__ = "0.3" @@ -53,7 +55,7 @@ class TgaImageFile(ImageFile.ImageFile): # process header s = self.fp.read(18) - idlen = i8(s[0]) + id_len = i8(s[0]) colormaptype = i8(s[1]) imagetype = i8(s[2]) @@ -100,8 +102,8 @@ class TgaImageFile(ImageFile.ImageFile): if imagetype & 8: self.info["compression"] = "tga_rle" - if idlen: - self.info["id_section"] = self.fp.read(idlen) + if id_len: + self.info["id_section"] = self.fp.read(id_len) if colormaptype: # read palette @@ -151,11 +153,23 @@ def _save(im, fp, filename): except KeyError: raise IOError("cannot write mode %s as TGA" % im.mode) - rle = im.encoderinfo.get("rle", False) - + if "rle" in im.encoderinfo: + rle = im.encoderinfo["rle"] + else: + compression = im.encoderinfo.get("compression", + im.info.get("compression")) + rle = compression == "tga_rle" if rle: imagetype += 8 + id_section = im.encoderinfo.get("id_section", + im.info.get("id_section", "")) + id_len = len(id_section) + if id_len > 255: + id_len = 255 + id_section = id_section[:255] + warnings.warn("id_section has been trimmed to 255 characters") + if colormaptype: colormapfirst, colormaplength, colormapentry = 0, 256, 24 else: @@ -166,11 +180,12 @@ def _save(im, fp, filename): else: flags = 0 - orientation = im.info.get("orientation", -1) + orientation = im.encoderinfo.get("orientation", + im.info.get("orientation", -1)) if orientation > 0: flags = flags | 0x20 - fp.write(b"\000" + + fp.write(o8(id_len) + o8(colormaptype) + o8(imagetype) + o16(colormapfirst) + @@ -183,6 +198,9 @@ def _save(im, fp, filename): o8(bits) + o8(flags)) + if id_section: + fp.write(id_section) + if colormaptype: fp.write(im.im.getpalette("RGB", "BGR"))
Sanitize TGA Image.info Currently, the TGA image plugin sets `Image.info` "compression" to "tga_rle" on `load()`, while `save()` takes "rle" (bool) option. Both methods should use the same option for consistency. It probably makes sense to keep "rle" as TGA format doesn't support other compression methods, but "compression" may be more consistent with BMP and TIFF plugins. Neither of the options is documented, so there is no danger of breaking backward compatibility. Also, it's not very clear whether `save()` method should "inherit" info like TIFF and PNG plugins do: https://github.com/python-pillow/Pillow/blob/4407cb65079a7d1150277e3b9a144996f56357c9/src/PIL/TiffImagePlugin.py#L1399-L1400 Currently, TGA plugin only inherits "orientation" but doesn't allow to specify it as a keyword to `save()`, and "id_section" is ignored altogether.
python-pillow/Pillow
diff --git a/Tests/test_file_tga.py b/Tests/test_file_tga.py index 226b899dc..77695f2d1 100644 --- a/Tests/test_file_tga.py +++ b/Tests/test_file_tga.py @@ -53,10 +53,10 @@ class TestFileTga(PillowTestCase): # Generate a new test name every time so the # test will not fail with permission error # on Windows. - test_file = self.tempfile("temp.tga") + out = self.tempfile("temp.tga") - original_im.save(test_file, rle=rle) - saved_im = Image.open(test_file) + original_im.save(out, rle=rle) + saved_im = Image.open(out) if rle: self.assertEqual( saved_im.info["compression"], @@ -95,34 +95,93 @@ class TestFileTga(PillowTestCase): test_file = "Tests/images/tga_id_field.tga" im = Image.open(test_file) - test_file = self.tempfile("temp.tga") + out = self.tempfile("temp.tga") # Save - im.save(test_file) - test_im = Image.open(test_file) + im.save(out) + test_im = Image.open(out) self.assertEqual(test_im.size, (100, 100)) + self.assertEqual(test_im.info["id_section"], im.info["id_section"]) # RGBA save - im.convert("RGBA").save(test_file) - test_im = Image.open(test_file) + im.convert("RGBA").save(out) + test_im = Image.open(out) self.assertEqual(test_im.size, (100, 100)) + def test_save_id_section(self): + test_file = "Tests/images/rgb32rle.tga" + im = Image.open(test_file) + + out = self.tempfile("temp.tga") + + # Check there is no id section + im.save(out) + test_im = Image.open(out) + self.assertNotIn("id_section", test_im.info) + + # Save with custom id section + im.save(out, id_section=b"Test content") + test_im = Image.open(out) + self.assertEqual(test_im.info["id_section"], b"Test content") + + # Save with custom id section greater than 255 characters + id_section = b"Test content" * 25 + self.assert_warning(UserWarning, + lambda: im.save(out, id_section=id_section)) + test_im = Image.open(out) + self.assertEqual(test_im.info["id_section"], id_section[:255]) + + test_file = "Tests/images/tga_id_field.tga" + im = Image.open(test_file) + + # Save with no id section + im.save(out, id_section="") + test_im = Image.open(out) + self.assertNotIn("id_section", test_im.info) + + def test_save_orientation(self): + test_file = "Tests/images/rgb32rle.tga" + im = Image.open(test_file) + self.assertEqual(im.info["orientation"], -1) + + out = self.tempfile("temp.tga") + + im.save(out, orientation=1) + test_im = Image.open(out) + self.assertEqual(test_im.info["orientation"], 1) + def test_save_rle(self): test_file = "Tests/images/rgb32rle.tga" im = Image.open(test_file) + self.assertEqual(im.info["compression"], "tga_rle") - test_file = self.tempfile("temp.tga") + out = self.tempfile("temp.tga") # Save - im.save(test_file) - test_im = Image.open(test_file) + im.save(out) + test_im = Image.open(out) self.assertEqual(test_im.size, (199, 199)) + self.assertEqual(test_im.info["compression"], "tga_rle") + + # Save without compression + im.save(out, compression=None) + test_im = Image.open(out) + self.assertNotIn("compression", test_im.info) # RGBA save - im.convert("RGBA").save(test_file) - test_im = Image.open(test_file) + im.convert("RGBA").save(out) + test_im = Image.open(out) self.assertEqual(test_im.size, (199, 199)) + test_file = "Tests/images/tga_id_field.tga" + im = Image.open(test_file) + self.assertNotIn("compression", im.info) + + # Save with compression + im.save(out, compression="tga_rle") + test_im = Image.open(out) + self.assertEqual(test_im.info["compression"], "tga_rle") + def test_save_l_transparency(self): # There are 559 transparent pixels in la.tga. num_transparent = 559 @@ -133,10 +192,10 @@ class TestFileTga(PillowTestCase): self.assertEqual( im.getchannel("A").getcolors()[0][0], num_transparent) - test_file = self.tempfile("temp.tga") - im.save(test_file) + out = self.tempfile("temp.tga") + im.save(out) - test_im = Image.open(test_file) + test_im = Image.open(out) self.assertEqual(test_im.mode, "LA") self.assertEqual( test_im.getchannel("A").getcolors()[0][0], num_transparent)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
5.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": [ "apt-get update", "apt-get install -y gcc libjpeg-dev zlib1g-dev libtiff5-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev libharfbuzz-dev libfribidi-dev libxcb1-dev" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 babel==2.17.0 blessed==1.20.0 build==1.2.2.post1 certifi==2025.1.31 charset-normalizer==3.4.1 check-manifest==0.50 cov-core==1.15.0 coverage==7.8.0 coveralls==4.0.1 docopt==0.6.2 docutils==0.21.2 exceptiongroup==1.2.2 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 jarn.viewdoc==2.7 Jinja2==3.1.6 MarkupSafe==3.0.2 olefile==0.47 packaging==24.2 -e git+https://github.com/python-pillow/Pillow.git@41954f244705b247667f1ea228e932ca6390bcd6#egg=Pillow pluggy==1.5.0 pycodestyle==2.13.0 pyflakes==3.3.1 Pygments==2.19.1 pyproject_hooks==1.2.0 pyroma==4.2 pytest==8.3.5 pytest-cov==6.0.0 pytz==2025.2 requests==2.32.3 six==1.17.0 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinx-rtd-theme==3.0.2 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 tomli==2.2.1 trove-classifiers==2025.3.19.19 urllib3==2.3.0 wcwidth==0.2.13 zipp==3.21.0
name: Pillow channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - babel==2.17.0 - blessed==1.20.0 - build==1.2.2.post1 - certifi==2025.1.31 - charset-normalizer==3.4.1 - check-manifest==0.50 - cov-core==1.15.0 - coverage==7.8.0 - coveralls==4.0.1 - docopt==0.6.2 - docutils==0.21.2 - exceptiongroup==1.2.2 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jarn-viewdoc==2.7 - jinja2==3.1.6 - markupsafe==3.0.2 - olefile==0.47 - packaging==24.2 - pluggy==1.5.0 - pycodestyle==2.13.0 - pyflakes==3.3.1 - pygments==2.19.1 - pyproject-hooks==1.2.0 - pyroma==4.2 - pytest==8.3.5 - pytest-cov==6.0.0 - pytz==2025.2 - requests==2.32.3 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinx-rtd-theme==3.0.2 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - tomli==2.2.1 - trove-classifiers==2025.3.19.19 - urllib3==2.3.0 - wcwidth==0.2.13 - zipp==3.21.0 prefix: /opt/conda/envs/Pillow
[ "Tests/test_file_tga.py::TestFileTga::test_save", "Tests/test_file_tga.py::TestFileTga::test_save_id_section", "Tests/test_file_tga.py::TestFileTga::test_save_l_transparency", "Tests/test_file_tga.py::TestFileTga::test_save_orientation", "Tests/test_file_tga.py::TestFileTga::test_save_rle" ]
[]
[ "Tests/test_file_tga.py::TestFileTga::test_id_field", "Tests/test_file_tga.py::TestFileTga::test_id_field_rle", "Tests/test_file_tga.py::TestFileTga::test_sanity" ]
[]
MIT-CMU License
3,004
[ "src/PIL/TgaImagePlugin.py" ]
[ "src/PIL/TgaImagePlugin.py" ]
airspeed-velocity__asv-726
049f4fa74d981a4a1996b2c109b912ea79fd462f
2018-09-01 15:22:20
d069dc4a375a60402acc0bbe1b7df938768b0c16
diff --git a/asv/commands/compare.py b/asv/commands/compare.py index 1096249..164c2ed 100644 --- a/asv/commands/compare.py +++ b/asv/commands/compare.py @@ -7,6 +7,7 @@ from __future__ import (absolute_import, division, print_function, import itertools from . import Command +from ..benchmarks import Benchmarks from ..machine import iter_machine_files from ..results import iter_results_for_machine_and_hash from ..repo import get_repo, NoSuchNameError @@ -181,6 +182,9 @@ class Compare(Command): stats_2 = {} versions_1 = {} versions_2 = {} + units = {} + + benchmarks = Benchmarks.load(conf) if commit_names is None: commit_names = {} @@ -210,6 +214,7 @@ class Compare(Command): machine_env_name = "{}/{}".format(machine, env_name) machine_env_names.add(machine_env_name) for name, value, stats in unroll_result(key, params, value, stats): + units[(name, machine_env_name)] = benchmarks.get(key, {}).get('unit') results_1[(name, machine_env_name)] = value stats_1[(name, machine_env_name)] = stats versions_1[(name, machine_env_name)] = version @@ -218,6 +223,7 @@ class Compare(Command): machine_env_name = "{}/{}".format(machine, env_name) machine_env_names.add(machine_env_name) for name, value, stats in unroll_result(key, params, value, stats): + units[(name, machine_env_name)] = benchmarks.get(key, {}).get('unit') results_2[(name, machine_env_name)] = value stats_2[(name, machine_env_name)] = stats versions_2[(name, machine_env_name)] = version @@ -300,7 +306,7 @@ class Compare(Command): improved = True elif time_1 is None and time_2 is None: # both failed - color = 'red' + color = 'default' mark = ' ' elif _isna(time_1) or _isna(time_2): # either one was skipped @@ -330,10 +336,12 @@ class Compare(Command): if only_changed and mark in (' ', 'x'): continue + unit = units[benchmark] + details = "{0:1s} {1:>15s} {2:>15s} {3:>8s} ".format( mark, - human_value(time_1, "seconds", err=err_1), - human_value(time_2, "seconds", err=err_2), + human_value(time_1, unit, err=err_1), + human_value(time_2, unit, err=err_2), ratio) if split:
asv compare should use the unit attribute for displaying I have a benchmark that counts number of objects with a `track_` method, but `asv run` outputs minutes, hours, days.
airspeed-velocity/asv
diff --git a/test/example_results/benchmarks.json b/test/example_results/benchmarks.json index 3c7bfbb..03a5ab3 100644 --- a/test/example_results/benchmarks.json +++ b/test/example_results/benchmarks.json @@ -203,7 +203,7 @@ "param_names": [], "params": [], "timeout": 60.0, - "type": "time", + "type": "track", "unit": "unit" }, "time_AAA_failure": { @@ -213,7 +213,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_AAA_skip": { "code": "foo\n", @@ -222,7 +222,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_coordinates.time_latitude": { "code": "foo\n", @@ -231,7 +231,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_quantity.time_quantity_array_conversion": { "code": "foo\n", @@ -240,7 +240,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_quantity.time_quantity_init_array": { "code": "foo\n", @@ -249,7 +249,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_quantity.time_quantity_init_scalar": { "code": "foo\n", @@ -258,7 +258,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_quantity.time_quantity_scalar_conversion": { "code": "foo\n", @@ -267,7 +267,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_quantity.time_quantity_ufunc_sin": { "code": "foo\n", @@ -276,7 +276,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_units.mem_unit": { "code": "foo\n", @@ -285,7 +285,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_units.time_simple_unit_parse": { "code": "foo\n", @@ -294,7 +294,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_units.time_unit_compose": { "code": "foo\n", @@ -303,7 +303,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_units.time_unit_parse": { "code": "foo\n", @@ -312,7 +312,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_units.time_unit_to": { "code": "foo\n", @@ -321,7 +321,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_units.time_very_simple_unit_parse": { "code": "foo\n", @@ -330,7 +330,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_other.time_parameterized": { "code": "foo\n", @@ -339,7 +339,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_ci_small": { "code": "foo\n", @@ -348,7 +348,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_ci_big": { "code": "foo\n", @@ -357,16 +357,16 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit" + "unit": "seconds" }, "time_with_version_match": { "code": "foo\n", - "name": "time_with_version", + "name": "time_with_version_match", "param_names": [], "params": [], "timeout": 60.0, "type": "time", - "unit": "unit", + "unit": "seconds", "version": "1" }, "time_with_version_mismatch_bench": { @@ -376,7 +376,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit", + "unit": "seconds", "version": "2" }, "time_with_version_mismatch_other": { @@ -386,7 +386,7 @@ "params": [], "timeout": 60.0, "type": "time", - "unit": "unit", + "unit": "seconds", "version": "1" }, "version": 2 diff --git a/test/example_results/cheetah/22b920c6-py2.7-Cython-numpy1.8.json b/test/example_results/cheetah/22b920c6-py2.7-Cython-numpy1.8.json index ab06818..de9ed30 100644 --- a/test/example_results/cheetah/22b920c6-py2.7-Cython-numpy1.8.json +++ b/test/example_results/cheetah/22b920c6-py2.7-Cython-numpy1.8.json @@ -37,7 +37,8 @@ "time_ci_big": {"result": 1, "stats": [{"ci_99": [0.5, 2.5], "q_25": 0.5, "q_75": 2.5}]}, "time_with_version_match": 1, "time_with_version_mismatch_bench": 1, - "time_with_version_mismatch_other": 1 + "time_with_version_mismatch_other": 1, + "time_secondary.track_value": 42 }, "benchmark_version": { "time_with_version_match": "1", diff --git a/test/example_results/cheetah/fcf8c079-py2.7-Cython-numpy1.8.json b/test/example_results/cheetah/fcf8c079-py2.7-Cython-numpy1.8.json index 8319525..2dad502 100644 --- a/test/example_results/cheetah/fcf8c079-py2.7-Cython-numpy1.8.json +++ b/test/example_results/cheetah/fcf8c079-py2.7-Cython-numpy1.8.json @@ -34,11 +34,12 @@ "result": [1, 4, null] }, "params_examples.ParamSuite.track_value": null, - "time_ci_small": {"result": 3, "stats": [{"ci_99": [3.9, 3.1], "q_25": 3, "q_75": 3}]}, + "time_ci_small": {"result": 3, "stats": [{"ci_99": [3.1, 3.9], "q_25": 3, "q_75": 3}]}, "time_ci_big": {"result": 3, "stats": [{"ci_99": [1.5, 3.5], "q_25": 1.5, "q_75": 3.5}]}, "time_with_version_match": 3, "time_with_version_mismatch_bench": 3, - "time_with_version_mismatch_other": 3 + "time_with_version_mismatch_other": 3, + "time_secondary.track_value": 42 }, "benchmark_version": { "time_with_version_match": "1", diff --git a/test/test_compare.py b/test/test_compare.py index 0584855..00beca4 100644 --- a/test/test_compare.py +++ b/test/test_compare.py @@ -46,6 +46,7 @@ All benchmarks: 83.6μs 55.4μs 0.66 time_quantity.time_quantity_init_scalar 282μs 147μs 0.52 time_quantity.time_quantity_scalar_conversion + 1.31ms 7.75ms 5.91 time_quantity.time_quantity_ufunc_sin + 42 42 1.00 time_secondary.track_value 5.73m 5.73m 1.00 time_units.mem_unit + 125μs 3.81ms 30.42 time_units.time_simple_unit_parse 1.64ms 1.53ms 0.93 time_units.time_unit_compose @@ -68,12 +69,14 @@ Benchmarks that have stayed the same: before after ratio [22b920c6] [fcf8c079] + failed failed n/a time_AAA_failure n/a n/a n/a time_AAA_skip 1.00±1s 3.00±1s ~3.00 time_ci_big 1.00s 1.00s 1.00 time_other.time_parameterized(1) 2.00s 4.00s 2.00 time_other.time_parameterized(2) 83.6μs 55.4μs 0.66 time_quantity.time_quantity_init_scalar 282μs 147μs 0.52 time_quantity.time_quantity_scalar_conversion + 42 42 1.00 time_secondary.track_value 5.73m 5.73m 1.00 time_units.mem_unit 1.64ms 1.53ms 0.93 time_units.time_unit_compose 11.9μs 13.1μs 1.10 time_units.time_very_simple_unit_parse @@ -83,7 +86,6 @@ Benchmarks that have got worse: before after ratio [22b920c6] [fcf8c079] ! n/a failed n/a params_examples.ParamSuite.track_value - failed failed n/a time_AAA_failure + 1.00±0s 3.00±0s 3.00 time_ci_small ! 454μs failed n/a time_coordinates.time_latitude ! 3.00s failed n/a time_other.time_parameterized(3) @@ -198,6 +200,9 @@ def test_compare_name_lookup(dvcs_type, capsys, tmpdir): for fn in ['feea15ca-py2.7-Cython-numpy1.8.json', 'machine.json']: shutil.copyfile(os.path.join(src, fn), os.path.join(dst, fn)) + shutil.copyfile(os.path.join(RESULT_DIR, 'benchmarks.json'), + os.path.join(result_dir, 'benchmarks.json')) + # Copy to different commit fn_1 = os.path.join(dst, 'feea15ca-py2.7-Cython-numpy1.8.json') fn_2 = os.path.join(dst, commit_hash[:8] + '-py2.7-Cython-numpy1.8.json') diff --git a/test/test_show.py b/test/test_show.py index 3eaec04..08e74cc 100644 --- a/test/test_show.py +++ b/test/test_show.py @@ -39,7 +39,7 @@ def test_show(capsys, tmpdir): tools.run_asv_with_conf(conf, 'show', 'fcf8c079') text, err = capsys.readouterr() - assert "time_ci_small [cheetah/py2.7-numpy1.8]\n 3±0\n\n" in text + assert "time_ci_small [cheetah/py2.7-numpy1.8]\n 3.00±0s\n\n" in text tools.run_asv_with_conf(conf, 'show', 'fcf8c079', '--machine=cheetah', '--bench=time_ci', '--details') @@ -48,11 +48,11 @@ def test_show(capsys, tmpdir): Commit: fcf8c079 time_ci_big [cheetah/py2.7-numpy1.8] - 3±1 - ci_99: (1.5, 3.5) + 3.00±1s + ci_99: (1.50s, 3.50s) time_ci_small [cheetah/py2.7-numpy1.8] - 3±0 - ci_99: (3.9, 3.1) + 3.00±0s + ci_99: (3.10s, 3.90s) """) assert text.strip() == expected.strip() diff --git a/test/test_workflow.py b/test/test_workflow.py index d3c35e7..df75b43 100644 --- a/test/test_workflow.py +++ b/test/test_workflow.py @@ -189,7 +189,7 @@ def test_continuous(capfd, basic_conf): text, err = capfd.readouterr() assert "SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY" in text - assert "+ 1.00s 6.00s 6.00 params_examples.track_find_test(2)" in text + assert "+ 1 6 6.00 params_examples.track_find_test(2)" in text assert "params_examples.ClassOne" in text # Check processes were interleaved (timing benchmark was run twice)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
0.31
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-xdist", "pytest-rerunfailures", "feedparser", "python-hglib" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/airspeed-velocity/asv.git@049f4fa74d981a4a1996b2c109b912ea79fd462f#egg=asv exceptiongroup==1.2.2 execnet==2.1.1 feedparser==6.0.11 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-rerunfailures==15.0 pytest-xdist==3.6.1 python-hglib==2.6.2 sgmllib3k==1.0.0 six==1.17.0 tomli==2.2.1
name: asv channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - execnet==2.1.1 - feedparser==6.0.11 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-rerunfailures==15.0 - pytest-xdist==3.6.1 - python-hglib==2.6.2 - sgmllib3k==1.0.0 - six==1.17.0 - tomli==2.2.1 prefix: /opt/conda/envs/asv
[ "test/test_compare.py::test_compare" ]
[ "test/test_compare.py::test_compare_name_lookup[dvcs_type1]" ]
[ "test/test_compare.py::test_compare_name_lookup[git]", "test/test_show.py::test_show" ]
[]
BSD 3-Clause "New" or "Revised" License
3,005
[ "asv/commands/compare.py" ]
[ "asv/commands/compare.py" ]
zopefoundation__zope.schema-52
665e2e73f639d07620f421540100cc29fa3498f7
2018-09-01 21:01:24
0a719f2ded189630a0a77e9292a66a3662c6512c
diff --git a/CHANGES.rst b/CHANGES.rst index 624d87f..5eada5d 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -97,6 +97,16 @@ - Make ``Iterable`` and ``Container`` properly implement ``IIterable`` and ``IContainer``, respectively. +- Make ``SimpleVocabulary.fromItems`` accept triples to allow + specifying the title of terms. See `issue 18 + <https://github.com/zopefoundation/zope.schema/issues/18>`_. + +- Make ``TreeVocabulary.fromDict`` only create + ``ITitledTokenizedTerms`` when a title is actually provided. + +- Make ``SimpleVocabulary`` and ``SimpleTerm`` have value-based + equality and hashing methods. + 4.5.0 (2017-07-10) ================== diff --git a/src/zope/schema/vocabulary.py b/src/zope/schema/vocabulary.py index 9649547..a1211e4 100644 --- a/src/zope/schema/vocabulary.py +++ b/src/zope/schema/vocabulary.py @@ -17,6 +17,7 @@ from collections import OrderedDict from zope.interface import directlyProvides from zope.interface import implementer +from zope.interface import providedBy from zope.schema._compat import text_type from zope.schema.interfaces import ITitledTokenizedTerm @@ -32,14 +33,20 @@ _marker = object() @implementer(ITokenizedTerm) class SimpleTerm(object): - """Simple tokenized term used by SimpleVocabulary.""" + """ + Simple tokenized term used by SimpleVocabulary. + + .. versionchanged:: 4.6.0 + Implement equality and hashing based on the value, token and title. + """ def __init__(self, value, token=None, title=None): """Create a term for *value* and *token*. If *token* is omitted, str(value) is used for the token, escaping any non-ASCII characters. - If *title* is provided, term implements `ITitledTokenizedTerm`. + If *title* is provided, term implements + :class:`zope.schema.interfaces.ITitledTokenizedTerm`. """ self.value = value if token is None: @@ -64,10 +71,35 @@ class SimpleTerm(object): if title is not None: directlyProvides(self, ITitledTokenizedTerm) + def __eq__(self, other): + if other is self: + return True + + if not isinstance(other, SimpleTerm): + return False + + return ( + self.value == other.value + and self.token == other.token + and self.title == other.title + ) + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash((self.value, self.token, self.title)) + @implementer(IVocabularyTokenized) class SimpleVocabulary(object): - """Vocabulary that works from a sequence of terms.""" + """ + Vocabulary that works from a sequence of terms. + + .. versionchanged:: 4.6.0 + Implement equality and hashing based on the terms list + and interfaces implemented by this object. + """ def __init__(self, terms, *interfaces, **kwargs): """Initialize the vocabulary given a list of terms. @@ -80,14 +112,14 @@ class SimpleVocabulary(object): By default, ValueErrors are thrown if duplicate values or tokens are passed in. If you want to swallow these exceptions, pass - in swallow_duplicates=True. In this case, the values will + in ``swallow_duplicates=True``. In this case, the values will override themselves. """ self.by_value = {} self.by_token = {} self._terms = terms + swallow_dupes = kwargs.get('swallow_duplicates', False) for term in self._terms: - swallow_dupes = kwargs.get('swallow_duplicates', False) if not swallow_dupes: if term.value in self.by_value: raise ValueError( @@ -102,16 +134,23 @@ class SimpleVocabulary(object): @classmethod def fromItems(cls, items, *interfaces): - """Construct a vocabulary from a list of (token, value) pairs. + """ + Construct a vocabulary from a list of (token, value) pairs or + (token, value, title) triples. The list does not have to be + homogeneous. The order of the items is preserved as the order of the terms - in the vocabulary. Terms are created by calling the class - method createTerm() with the pair (value, token). + in the vocabulary. Terms are created by calling the class + method :meth:`createTerm`` with the pair or triple. One or more interfaces may also be provided so that alternate widgets may be bound without subclassing. + + .. versionchanged:: 4.6.0 + Allow passing in triples to set item titles. """ - terms = [cls.createTerm(value, token) for (token, value) in items] + terms = [cls.createTerm(item[1], item[0], *item[2:]) + for item in items] return cls(terms, *interfaces) @classmethod @@ -119,10 +158,10 @@ class SimpleVocabulary(object): """Construct a vocabulary from a simple list. Values of the list become both the tokens and values of the - terms in the vocabulary. The order of the values is preserved - as the order of the terms in the vocabulary. Tokens are - created by calling the class method createTerm() with the - value as the only parameter. + terms in the vocabulary. The order of the values is preserved + as the order of the terms in the vocabulary. Tokens are + created by calling the class method :meth:`createTerm()` with + the value as the only parameter. One or more interfaces may also be provided so that alternate widgets may be bound without subclassing. @@ -169,6 +208,21 @@ class SimpleVocabulary(object): """See zope.schema.interfaces.IIterableVocabulary""" return len(self.by_value) + def __eq__(self, other): + if other is self: + return True + + if not isinstance(other, SimpleVocabulary): + return False + + return self._terms == other._terms and providedBy(self) == providedBy(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(tuple(self._terms)) + def _createTermTree(ttree, dict_): """ Helper method that creates a tree-like dict with ITokenizedTerm @@ -177,7 +231,7 @@ def _createTermTree(ttree, dict_): See fromDict for more details. """ for key in sorted(dict_.keys()): - term = SimpleTerm(key[1], key[0], key[-1]) + term = SimpleTerm(key[1], key[0], *key[2:]) ttree[term] = TreeVocabulary.terms_factory() _createTermTree(ttree[term], dict_[key]) return ttree @@ -272,7 +326,8 @@ class TreeVocabulary(object): OrderedDict), that has tuples for keys. The tuples should have either 2 or 3 values, i.e: - (token, value, title) or (token, value) + (token, value, title) or (token, value). Only tuples that have + three values will create a :class:`zope.schema.interfaces.ITitledTokenizedTerm`. For example, a dict with 2-valued tuples: @@ -290,6 +345,10 @@ class TreeVocabulary(object): } One or more interfaces may also be provided so that alternate widgets may be bound without subclassing. + + .. versionchanged:: 4.6.0 + Only create ``ITitledTokenizedTerm`` when a title is actually + provided. """ return cls(_createTermTree(cls.terms_factory(), dict_), *interfaces)
Creating vocabularies There are a few helper methods in SimpleVocabulary that create vocabs for you. None of these appear to allow one to pass through a title to the SimpleTerm objects that are created. I find myself writing a method that allows me to pass in a title and create a simple vocab quite often. It would be nice to have this added to the SimpleVocabulary class.
zopefoundation/zope.schema
diff --git a/src/zope/schema/tests/test__field.py b/src/zope/schema/tests/test__field.py index cb0e127..0d12ad7 100644 --- a/src/zope/schema/tests/test__field.py +++ b/src/zope/schema/tests/test__field.py @@ -705,16 +705,13 @@ class ChoiceTests(EqualityTestsMixin, from zope.schema._field import Choice return Choice - from zope.schema.vocabulary import SimpleVocabulary - # SimpleVocabulary uses identity semantics for equality - _default_vocabulary = SimpleVocabulary.fromValues([1, 2, 3]) - def _makeOneFromClass(self, cls, *args, **kwargs): if (not args and 'vocabulary' not in kwargs and 'values' not in kwargs and 'source' not in kwargs): - kwargs['vocabulary'] = self._default_vocabulary + from zope.schema.vocabulary import SimpleVocabulary + kwargs['vocabulary'] = SimpleVocabulary.fromValues([1, 2, 3]) return super(ChoiceTests, self)._makeOneFromClass(cls, *args, **kwargs) def _getTargetInterface(self): diff --git a/src/zope/schema/tests/test_vocabulary.py b/src/zope/schema/tests/test_vocabulary.py index 86e8164..63904b6 100644 --- a/src/zope/schema/tests/test_vocabulary.py +++ b/src/zope/schema/tests/test_vocabulary.py @@ -66,6 +66,41 @@ class SimpleTermTests(unittest.TestCase): self.assertFalse(ITitledTokenizedTerm.providedBy(term)) + def test__eq__and__hash__(self): + from zope import interface + + term = self._makeOne('value') + # Equal to itself + self.assertEqual(term, term) + # Not equal to a different class + self.assertNotEqual(term, object()) + self.assertNotEqual(object(), term) + + term2 = self._makeOne('value') + # Equal to another with the same value + self.assertEqual(term, term2) + # equal objects hash the same + self.assertEqual(hash(term), hash(term2)) + + # Providing tokens or titles that differ + # changes equality + term = self._makeOne('value', 'token') + self.assertNotEqual(term, term2) + self.assertNotEqual(hash(term), hash(term2)) + + term2 = self._makeOne('value', 'token') + self.assertEqual(term, term2) + self.assertEqual(hash(term), hash(term2)) + + term = self._makeOne('value', 'token', 'title') + self.assertNotEqual(term, term2) + self.assertNotEqual(hash(term), hash(term2)) + + term2 = self._makeOne('value', 'token', 'title') + self.assertEqual(term, term2) + self.assertEqual(hash(term), hash(term2)) + + class SimpleVocabularyTests(unittest.TestCase): def _getTargetClass(self): @@ -102,8 +137,8 @@ class SimpleVocabularyTests(unittest.TestCase): self.assertTrue(value in vocabulary) self.assertFalse('ABC' in vocabulary) for term in vocabulary: - self.assertTrue(vocabulary.getTerm(term.value) is term) - self.assertTrue(vocabulary.getTermByToken(term.token) is term) + self.assertIs(vocabulary.getTerm(term.value), term) + self.assertIs(vocabulary.getTermByToken(term.token), term) def test_fromValues(self): from zope.interface import Interface @@ -119,7 +154,7 @@ class SimpleVocabularyTests(unittest.TestCase): self.assertTrue(ITokenizedTerm.providedBy(term)) self.assertEqual(term.value, value) for value in VALUES: - self.assertTrue(value in vocabulary) + self.assertIn(value, vocabulary) def test_fromItems(self): from zope.interface import Interface @@ -136,7 +171,30 @@ class SimpleVocabularyTests(unittest.TestCase): self.assertEqual(term.token, item[0]) self.assertEqual(term.value, item[1]) for item in ITEMS: - self.assertTrue(item[1] in vocabulary) + self.assertIn(item[1], vocabulary) + + def test_fromItems_triples(self): + from zope.interface import Interface + from zope.schema.interfaces import ITitledTokenizedTerm + + class IStupid(Interface): + pass + + ITEMS = [ + ('one', 1, 'title 1'), + ('two', 2, 'title 2'), + ('three', 3, 'title 3'), + ('fore!', 4, 'title four') + ] + vocabulary = self._getTargetClass().fromItems(ITEMS) + self.assertEqual(len(vocabulary), len(ITEMS)) + for item, term in zip(ITEMS, vocabulary): + self.assertTrue(ITitledTokenizedTerm.providedBy(term)) + self.assertEqual(term.token, item[0]) + self.assertEqual(term.value, item[1]) + self.assertEqual(term.title, item[2]) + for item in ITEMS: + self.assertIn(item[1], vocabulary) def test_createTerm(self): from zope.schema.vocabulary import SimpleTerm @@ -204,6 +262,38 @@ class SimpleVocabularyTests(unittest.TestCase): for term in vocab: self.assertEqual(term.value + 1, term.nextvalue) + def test__eq__and__hash__(self): + from zope import interface + + values = [1, 4, 2, 9] + vocabulary = self._getTargetClass().fromValues(values) + + # Equal to itself + self.assertEqual(vocabulary, vocabulary) + # Not to other classes + self.assertNotEqual(vocabulary, object()) + self.assertNotEqual(object(), vocabulary) + + # Equal to another object with the same values + vocabulary2 = self._getTargetClass().fromValues(values) + self.assertEqual(vocabulary, vocabulary2) + self.assertEqual(hash(vocabulary), hash(vocabulary2)) + + # Changing the values or the interfaces changes + # equality + class IFoo(interface.Interface): + "an interface" + + vocabulary = self._getTargetClass().fromValues(values, IFoo) + self.assertNotEqual(vocabulary, vocabulary2) + # Interfaces are not taken into account in the hash; that's + # OK: equal hashes do not imply equal objects + self.assertEqual(hash(vocabulary), hash(vocabulary2)) + + vocabulary2 = self._getTargetClass().fromValues(values, IFoo) + self.assertEqual(vocabulary, vocabulary2) + self.assertEqual(hash(vocabulary), hash(vocabulary2)) + # Test _createTermTree via TreeVocabulary.fromDict @@ -256,6 +346,18 @@ class TreeVocabularyTests(unittest.TestCase): def tree_vocab_3(self): return self._getTargetClass().fromDict(self.business_tree()) + def test_only_titled_if_triples(self): + from zope.schema.interfaces import ITitledTokenizedTerm + no_titles = self.tree_vocab_2() + for term in no_titles: + self.assertIsNone(term.title) + self.assertFalse(ITitledTokenizedTerm.providedBy(term)) + + all_titles = self.tree_vocab_3() + for term in all_titles: + self.assertIsNotNone(term.title) + self.assertTrue(ITitledTokenizedTerm.providedBy(term)) + def test_implementation(self): from zope.interface.verify import verifyObject from zope.interface.common.mapping import IEnumerableMapping
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 2 }
4.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "coverage", "sphinx", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 docutils==0.18.1 idna==3.10 imagesize==1.4.1 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work Jinja2==3.0.3 MarkupSafe==2.0.1 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work Pygments==2.14.0 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytz==2025.2 requests==2.27.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work zope.event==4.6 zope.exceptions==4.6 zope.interface==5.5.2 -e git+https://github.com/zopefoundation/zope.schema.git@665e2e73f639d07620f421540100cc29fa3498f7#egg=zope.schema zope.testing==5.0.1 zope.testrunner==5.6
name: zope.schema channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - babel==2.11.0 - charset-normalizer==2.0.12 - coverage==6.2 - docutils==0.18.1 - idna==3.10 - imagesize==1.4.1 - jinja2==3.0.3 - markupsafe==2.0.1 - pygments==2.14.0 - pytz==2025.2 - requests==2.27.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - urllib3==1.26.20 - zope-event==4.6 - zope-exceptions==4.6 - zope-interface==5.5.2 - zope-testing==5.0.1 - zope-testrunner==5.6 prefix: /opt/conda/envs/zope.schema
[ "src/zope/schema/tests/test__field.py::ChoiceTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ChoiceTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ChoiceTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test_vocabulary.py::SimpleTermTests::test__eq__and__hash__", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test__eq__and__hash__", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_fromItems_triples", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_only_titled_if_triples" ]
[]
[ "src/zope/schema/tests/test__field.py::BytesTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::BytesTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::BytesTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::BytesTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::BytesTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::BytesTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::BytesTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::BytesTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::BytesTests::test_is_hashable", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_required", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_w_invalid_default", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ASCIITests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ASCIITests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ASCIITests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_empty", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_non_empty_hit", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_non_empty_miss", "src/zope/schema/tests/test__field.py::ASCIITests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIITests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ASCIITests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ASCIITests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIITests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ASCIITests::test_is_hashable", "src/zope/schema/tests/test__field.py::ASCIITests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::BytesLineTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::BytesLineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::BytesLineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::BytesLineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesLineTests::test_constraint", "src/zope/schema/tests/test__field.py::BytesLineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::BytesLineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::BytesLineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesLineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::BytesLineTests::test_is_hashable", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_required", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ASCIILineTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ASCIILineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ASCIILineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_constraint", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_is_hashable", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_required", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::FloatTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::FloatTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::FloatTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::FloatTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::FloatTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::FloatTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::FloatTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::FloatTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::FloatTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::FloatTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::FloatTests::test_is_hashable", "src/zope/schema/tests/test__field.py::FloatTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_min", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_required", "src/zope/schema/tests/test__field.py::DecimalTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DecimalTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DecimalTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DecimalTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DecimalTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DecimalTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::DecimalTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::DecimalTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DecimalTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DecimalTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DecimalTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DecimalTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_min", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DatetimeTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DatetimeTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DatetimeTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DatetimeTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DatetimeTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DatetimeTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DatetimeTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DatetimeTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DatetimeTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_w_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_w_min", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_w_min_and_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DateTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DateTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DateTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DateTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DateTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DateTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DateTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DateTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DateTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DateTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DateTests::test_validate_required", "src/zope/schema/tests/test__field.py::DateTests::test_validate_w_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_w_min", "src/zope/schema/tests/test__field.py::DateTests::test_validate_w_min_and_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::TimedeltaTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::TimedeltaTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::TimedeltaTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_is_hashable", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_min", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_required", "src/zope/schema/tests/test__field.py::TimeTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::TimeTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::TimeTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::TimeTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimeTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::TimeTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::TimeTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimeTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::TimeTests::test_is_hashable", "src/zope/schema/tests/test__field.py::TimeTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_min", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_required", "src/zope/schema/tests/test__field.py::ChoiceTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ChoiceTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_int", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_mixed", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_source_is_ICSB_bound", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_source_is_ICSB_unbound", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_string", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_tuple", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary_invalid", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_preconstructed_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_is_ICSB", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_is_ICSB_but_not_ISource", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_not_ICSB", "src/zope/schema/tests/test__field.py::ChoiceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_both_vocabulary_and_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_both_vocabulary_and_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_invalid_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_invalid_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_named_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_preconstructed_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_unicode_non_ascii_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_wo_values_vocabulary_or_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::ChoiceTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::ChoiceTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ChoiceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ChoiceTests::test_is_hashable", "src/zope/schema/tests/test__field.py::URITests::test___eq___different_type", "src/zope/schema/tests/test__field.py::URITests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::URITests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::URITests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::URITests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::URITests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::URITests::test_fromUnicode_ok", "src/zope/schema/tests/test__field.py::URITests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::URITests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::URITests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::URITests::test_is_hashable", "src/zope/schema/tests/test__field.py::URITests::test_validate_not_a_uri", "src/zope/schema/tests/test__field.py::URITests::test_validate_not_required", "src/zope/schema/tests/test__field.py::URITests::test_validate_required", "src/zope/schema/tests/test__field.py::URITests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DottedNameTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DottedNameTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DottedNameTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DottedNameTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_max_dots_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_max_dots_valid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_min_dots_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_min_dots_valid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DottedNameTests::test_fromUnicode_dotted_name_ok", "src/zope/schema/tests/test__field.py::DottedNameTests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DottedNameTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DottedNameTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DottedNameTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_not_a_dotted_name", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_required", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_w_max_dots", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_w_min_dots", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::IdTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::IdTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::IdTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::IdTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::IdTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_dotted_name_ok", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_url_ok", "src/zope/schema/tests/test__field.py::IdTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::IdTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::IdTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::IdTests::test_is_hashable", "src/zope/schema/tests/test__field.py::IdTests::test_validate_not_a_uri", "src/zope/schema/tests/test__field.py::IdTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::IdTests::test_validate_required", "src/zope/schema/tests/test__field.py::IdTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_is_hashable", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_required", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::CollectionTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::CollectionTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::CollectionTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::CollectionTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::CollectionTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::CollectionTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::CollectionTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::CollectionTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::CollectionTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::CollectionTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::CollectionTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::CollectionTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::CollectionTests::test_is_hashable", "src/zope/schema/tests/test__field.py::CollectionTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_required", "src/zope/schema/tests/test__field.py::SequenceTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::SequenceTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::SequenceTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::SequenceTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::SequenceTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::SequenceTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::SequenceTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::SequenceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::SequenceTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::SequenceTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::SequenceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::SequenceTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::SequenceTests::test_is_hashable", "src/zope/schema/tests/test__field.py::SequenceTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::SequenceTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::SequenceTests::test_sequence", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_required", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::TupleTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::TupleTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::TupleTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::TupleTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::TupleTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::TupleTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::TupleTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::TupleTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::TupleTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::TupleTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::TupleTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TupleTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::TupleTests::test_is_hashable", "src/zope/schema/tests/test__field.py::TupleTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::TupleTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::TupleTests::test_sequence", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_required", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_is_hashable", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_sequence", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_required", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ListTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ListTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ListTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ListTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::ListTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::ListTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::ListTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::ListTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::ListTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ListTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ListTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ListTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ListTests::test_is_hashable", "src/zope/schema/tests/test__field.py::ListTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::ListTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::ListTests::test_sequence", "src/zope/schema/tests/test__field.py::ListTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ListTests::test_validate_required", "src/zope/schema/tests/test__field.py::ListTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::SetTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::SetTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::SetTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::SetTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::SetTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::SetTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::SetTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::SetTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_disallows_unique", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::SetTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::SetTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::SetTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::SetTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::SetTests::test_is_hashable", "src/zope/schema/tests/test__field.py::SetTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::SetTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::SetTests::test_validate_required", "src/zope/schema/tests/test__field.py::SetTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::FrozenSetTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::FrozenSetTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::FrozenSetTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::FrozenSetTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_disallows_unique", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_is_hashable", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_required", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ObjectTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ObjectTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ObjectTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_empty_schema", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_value_not_providing_schema", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_value_providing_schema", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_value_providing_schema_but_invalid_fields", "src/zope/schema/tests/test__field.py::ObjectTests::test__validate_w_value_providing_schema_but_missing_fields", "src/zope/schema/tests/test__field.py::ObjectTests::test_class_conforms_to_IObject", "src/zope/schema/tests/test__field.py::ObjectTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ObjectTests::test_ctor_w_bad_schema", "src/zope/schema/tests/test__field.py::ObjectTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ObjectTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ObjectTests::test_instance_conforms_to_IObject", "src/zope/schema/tests/test__field.py::ObjectTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ObjectTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ObjectTests::test_is_hashable", "src/zope/schema/tests/test__field.py::ObjectTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::ObjectTests::test_set_allows_IBOAE_subscr_to_replace_value", "src/zope/schema/tests/test__field.py::ObjectTests::test_set_emits_IBOAE", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_required", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_w_cycles", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_w_cycles_collection_not_valid", "src/zope/schema/tests/test__field.py::ObjectTests::test_validate_w_cycles_object_not_valid", "src/zope/schema/tests/test__field.py::ObjectTests::test_validates_invariants_by_default", "src/zope/schema/tests/test__field.py::MappingTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::MappingTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::MappingTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::MappingTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::MappingTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MappingTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::MappingTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::MappingTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::MappingTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::MappingTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MappingTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::MappingTests::test_is_hashable", "src/zope/schema/tests/test__field.py::MappingTests::test_mapping", "src/zope/schema/tests/test__field.py::MappingTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_required", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::MutableMappingTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::MutableMappingTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_is_hashable", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_mapping", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_required", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DictTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DictTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DictTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DictTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::DictTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DictTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::DictTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::DictTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DictTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DictTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DictTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DictTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DictTests::test_mapping", "src/zope/schema/tests/test__field.py::DictTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::DictTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::DictTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::DictTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DictTests::test_validate_required", "src/zope/schema/tests/test__field.py::DictTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::test_suite", "src/zope/schema/tests/test_vocabulary.py::SimpleTermTests::test_bytes_value", "src/zope/schema/tests/test_vocabulary.py::SimpleTermTests::test_class_conforms_to_ITokenizedTerm", "src/zope/schema/tests/test_vocabulary.py::SimpleTermTests::test_ctor_defaults", "src/zope/schema/tests/test_vocabulary.py::SimpleTermTests::test_ctor_explicit", "src/zope/schema/tests/test_vocabulary.py::SimpleTermTests::test_instance_conforms_to_ITokenizedTerm", "src/zope/schema/tests/test_vocabulary.py::SimpleTermTests::test_unicode_non_ascii_value", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_class_conforms_to_IVocabularyTokenized", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_createTerm", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_ctor_additional_interfaces", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_fromItems", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_fromValues", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_getTermByToken_miss", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_getTerm_miss", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_instance_conforms_to_IVocabularyTokenized", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_nonunique_token_message", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_nonunique_token_messages", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_nonunique_tokens", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_nonunique_tokens_swallow", "src/zope/schema/tests/test_vocabulary.py::SimpleVocabularyTests::test_overriding_createTerm", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_additional_interfaces", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_contains", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_get", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_get_term", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_implementation", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_indexes", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_len", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_nonunique_token_message", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_nonunique_value_message", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_nonunique_values_and_tokens", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_ordering", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_recursive_methods", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_termpath", "src/zope/schema/tests/test_vocabulary.py::TreeVocabularyTests::test_values_and_items", "src/zope/schema/tests/test_vocabulary.py::RegistryTests::test_getVocabularyRegistry", "src/zope/schema/tests/test_vocabulary.py::RegistryTests::test_setVocabularyRegistry" ]
[]
Zope Public License 2.1
3,007
[ "src/zope/schema/vocabulary.py", "CHANGES.rst" ]
[ "src/zope/schema/vocabulary.py", "CHANGES.rst" ]
LordGolias__sqf-47
a9a1de31e439742cc5391c6ca7e4cd7f2c2bdf15
2018-09-02 14:59:49
7e5aa26dd89b60e1f2ab3c96d6a31c850b0e5ef1
coveralls: [![Coverage Status](https://coveralls.io/builds/18793148/badge)](https://coveralls.io/builds/18793148) Coverage increased (+0.001%) to 98.92% when pulling **85dd0013e90f63127e2afd18cc4f3631b3c03a1a on gruppe-adler:scopefix** into **a9a1de31e439742cc5391c6ca7e4cd7f2c2bdf15 on LordGolias:master**. LordGolias: Thanks a lot for this PR. I like the idea. I think that we can do better than this though: when spawn is called, `_this` has the type passed by its argument. Therefore, we know the type of `_this`. For example, ``` "foo" spawn {sleep _this} ``` should return a type error as `_this` is of type String instead of Number. What do you think? Do you see any reason to not have this altogether? Fusselwurm: @LordGolias works, I guess :)
diff --git a/sqf/analyzer.py b/sqf/analyzer.py index 1c55885..b4ceea7 100644 --- a/sqf/analyzer.py +++ b/sqf/analyzer.py @@ -426,7 +426,7 @@ class Analyzer(BaseInterpreter): elif case_found.keyword == Keyword('catch'): extra_scope = {'_exception': Anything()} elif case_found.keyword == Keyword('spawn'): - extra_scope = {'_thisScript': Script()} + extra_scope = {'_thisScript': Script(), '_this': values[0]} elif case_found.keyword == Keyword('do') and type(values[0]) == ForType: extra_scope = {values[0].variable.value: Number()} for value, t_or_v in zip(values, case_found.types_or_values):
scope warning when using _this in spawned code block `[2,13]:warning:Local variable "_this" is not from this scope (not private)` when running sqflint over ```sqf _this spawn { hint str _this; }; ``` it does *not* complain when the code block is *call*ed, though.
LordGolias/sqf
diff --git a/tests/test_analyzer.py b/tests/test_analyzer.py index b12b769..91bc548 100644 --- a/tests/test_analyzer.py +++ b/tests/test_analyzer.py @@ -1052,11 +1052,21 @@ class SpecialContext(TestCase): analyzer = analyze(parse(code)) self.assertEqual(analyzer.exceptions, []) - def test_spawn(self): + def test_spawn_thisScript(self): code = '[] spawn {x = _thisScript}' analyzer = analyze(parse(code)) self.assertEqual(len(analyzer.exceptions), 0) + def test_spawn_this_typing_correct(self): + code = '"" spawn {hint _this}' + analyzer = analyze(parse(code)) + self.assertEqual(len(analyzer.exceptions), 0) + + def test_spawn_this_typing_error(self): + code = '[] spawn {hint _this}' + analyzer = analyze(parse(code)) + self.assertEqual(len(analyzer.exceptions), 1) + def test_spawn_local_vars(self): # spawn should not use local variables because they may become undefined (error 1). Likewise, # the local variable becomes unused (error 2).
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 1 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work -e git+https://github.com/LordGolias/sqf.git@a9a1de31e439742cc5391c6ca7e4cd7f2c2bdf15#egg=sqflint tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: sqf channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 prefix: /opt/conda/envs/sqf
[ "tests/test_analyzer.py::SpecialContext::test_spawn_this_typing_correct" ]
[]
[ "tests/test_analyzer.py::GeneralTestCase::test__this", "tests/test_analyzer.py::GeneralTestCase::test_assign_wrong", "tests/test_analyzer.py::GeneralTestCase::test_call", "tests/test_analyzer.py::GeneralTestCase::test_call_recursive", "tests/test_analyzer.py::GeneralTestCase::test_change_types", "tests/test_analyzer.py::GeneralTestCase::test_count_with_config_and_minus", "tests/test_analyzer.py::GeneralTestCase::test_custom_function", "tests/test_analyzer.py::GeneralTestCase::test_error_bla", "tests/test_analyzer.py::GeneralTestCase::test_error_double", "tests/test_analyzer.py::GeneralTestCase::test_error_in", "tests/test_analyzer.py::GeneralTestCase::test_error_message_binary", "tests/test_analyzer.py::GeneralTestCase::test_error_message_unary", "tests/test_analyzer.py::GeneralTestCase::test_evaluate", "tests/test_analyzer.py::GeneralTestCase::test_for_code", "tests/test_analyzer.py::GeneralTestCase::test_for_missing_do", "tests/test_analyzer.py::GeneralTestCase::test_for_specs", "tests/test_analyzer.py::GeneralTestCase::test_foreach", "tests/test_analyzer.py::GeneralTestCase::test_foreach_error", "tests/test_analyzer.py::GeneralTestCase::test_foreach_no_error", "tests/test_analyzer.py::GeneralTestCase::test_foreach_no_errors", "tests/test_analyzer.py::GeneralTestCase::test_foreach_variable", "tests/test_analyzer.py::GeneralTestCase::test_getConfig", "tests/test_analyzer.py::GeneralTestCase::test_get_variable_unknown_first_element", "tests/test_analyzer.py::GeneralTestCase::test_getset_variable_undefined", "tests/test_analyzer.py::GeneralTestCase::test_global", "tests/test_analyzer.py::GeneralTestCase::test_if", "tests/test_analyzer.py::GeneralTestCase::test_if_missing_then", "tests/test_analyzer.py::GeneralTestCase::test_if_then", "tests/test_analyzer.py::GeneralTestCase::test_if_then_else", "tests/test_analyzer.py::GeneralTestCase::test_if_then_specs", "tests/test_analyzer.py::GeneralTestCase::test_insensitive_variables", "tests/test_analyzer.py::GeneralTestCase::test_logical_with_nothing", "tests/test_analyzer.py::GeneralTestCase::test_lowercase", "tests/test_analyzer.py::GeneralTestCase::test_missing_op", "tests/test_analyzer.py::GeneralTestCase::test_missing_semi_colon", "tests/test_analyzer.py::GeneralTestCase::test_missing_while_bracket", "tests/test_analyzer.py::GeneralTestCase::test_multiple_returns_is_nothing", "tests/test_analyzer.py::GeneralTestCase::test_negation_priority", "tests/test_analyzer.py::GeneralTestCase::test_precedence_fail", "tests/test_analyzer.py::GeneralTestCase::test_precedence_nullary", "tests/test_analyzer.py::GeneralTestCase::test_precedence_various", "tests/test_analyzer.py::GeneralTestCase::test_priority", "tests/test_analyzer.py::GeneralTestCase::test_private_empty", "tests/test_analyzer.py::GeneralTestCase::test_private_eq1", "tests/test_analyzer.py::GeneralTestCase::test_private_global", "tests/test_analyzer.py::GeneralTestCase::test_private_many", "tests/test_analyzer.py::GeneralTestCase::test_private_no_errors", "tests/test_analyzer.py::GeneralTestCase::test_private_single", "tests/test_analyzer.py::GeneralTestCase::test_private_var_error", "tests/test_analyzer.py::GeneralTestCase::test_private_wrong", "tests/test_analyzer.py::GeneralTestCase::test_private_wrong_exp", "tests/test_analyzer.py::GeneralTestCase::test_recursive_assign", "tests/test_analyzer.py::GeneralTestCase::test_set_and_get_together", "tests/test_analyzer.py::GeneralTestCase::test_statement", "tests/test_analyzer.py::GeneralTestCase::test_throw", "tests/test_analyzer.py::GeneralTestCase::test_undefined_sum", "tests/test_analyzer.py::GeneralTestCase::test_warn_not_in_scope", "tests/test_analyzer.py::GeneralTestCase::test_while", "tests/test_analyzer.py::GeneralTestCase::test_while_no_errors", "tests/test_analyzer.py::GeneralTestCase::test_with_namespace", "tests/test_analyzer.py::GeneralTestCase::test_with_namespace_simple", "tests/test_analyzer.py::GeneralTestCase::test_wrong_if", "tests/test_analyzer.py::GeneralTestCase::test_wrong_semi_colon", "tests/test_analyzer.py::GeneralTestCase::test_wrong_sum", "tests/test_analyzer.py::PreprocessorDefine::test_define_array", "tests/test_analyzer.py::PreprocessorDefine::test_define_complex", "tests/test_analyzer.py::PreprocessorDefine::test_define_correct", "tests/test_analyzer.py::PreprocessorDefine::test_define_expression", "tests/test_analyzer.py::PreprocessorDefine::test_define_fnc", "tests/test_analyzer.py::PreprocessorDefine::test_define_no_error", "tests/test_analyzer.py::PreprocessorDefine::test_define_of_define", "tests/test_analyzer.py::PreprocessorDefine::test_define_replace_in_statement", "tests/test_analyzer.py::PreprocessorDefine::test_define_unused", "tests/test_analyzer.py::PreprocessorDefine::test_define_with_args_usage", "tests/test_analyzer.py::PreprocessorDefine::test_define_with_define", "tests/test_analyzer.py::PreprocessorDefine::test_defines", "tests/test_analyzer.py::PreprocessorDefine::test_defines_in_array", "tests/test_analyzer.py::PreprocessorDefine::test_defines_in_array2", "tests/test_analyzer.py::PreprocessorDefine::test_defines_in_unexecuted_code", "tests/test_analyzer.py::PreprocessorDefine::test_defines_underscored", "tests/test_analyzer.py::PreprocessorDefine::test_simple", "tests/test_analyzer.py::Preprocessor::test_assign_to_global", "tests/test_analyzer.py::Preprocessor::test_assign_to_global_after_space", "tests/test_analyzer.py::Preprocessor::test_double_ifdef", "tests/test_analyzer.py::Preprocessor::test_ifdef_endif", "tests/test_analyzer.py::Preprocessor::test_ifdef_endif_with_defines", "tests/test_analyzer.py::Preprocessor::test_ifdef_with_error_in_code", "tests/test_analyzer.py::Preprocessor::test_ifdef_with_error_pos_in", "tests/test_analyzer.py::Preprocessor::test_ifdef_with_error_pos_out", "tests/test_analyzer.py::Preprocessor::test_include", "tests/test_analyzer.py::Preprocessor::test_include_error", "tests/test_analyzer.py::Preprocessor::test_include_error_len", "tests/test_analyzer.py::Preprocessor::test_include_with_semi_colon", "tests/test_analyzer.py::Preprocessor::test_macros", "tests/test_analyzer.py::Preprocessor::test_nested", "tests/test_analyzer.py::Preprocessor::test_same_name", "tests/test_analyzer.py::Preprocessor::test_sequential", "tests/test_analyzer.py::Preprocessor::test_some_cases", "tests/test_analyzer.py::Preprocessor::test_undef_else", "tests/test_analyzer.py::Preprocessor::test_undef_else_2", "tests/test_analyzer.py::Preprocessor::test_undefined_define_with_space", "tests/test_analyzer.py::Preprocessor::test_upper_cased_keywords", "tests/test_analyzer.py::Arrays::test_array_from_function", "tests/test_analyzer.py::Arrays::test_basic", "tests/test_analyzer.py::Arrays::test_error_inside_array", "tests/test_analyzer.py::Arrays::test_no_space", "tests/test_analyzer.py::Arrays::test_position_of_array", "tests/test_analyzer.py::Arrays::test_strange", "tests/test_analyzer.py::Switch::test_case_by_variable", "tests/test_analyzer.py::Switch::test_case_not_code", "tests/test_analyzer.py::Switch::test_case_with_variable_code", "tests/test_analyzer.py::Switch::test_default", "tests/test_analyzer.py::Switch::test_default_error", "tests/test_analyzer.py::Switch::test_error_in_case", "tests/test_analyzer.py::Switch::test_incomplete_case", "tests/test_analyzer.py::Switch::test_missing_do", "tests/test_analyzer.py::Switch::test_not_statement", "tests/test_analyzer.py::Switch::test_switch_alone", "tests/test_analyzer.py::Switch::test_switch_statement_without_parenthesis", "tests/test_analyzer.py::NestedCode::test_array", "tests/test_analyzer.py::NestedCode::test_array_after_then", "tests/test_analyzer.py::NestedCode::test_call_within", "tests/test_analyzer.py::NestedCode::test_call_within_this", "tests/test_analyzer.py::NestedCode::test_code", "tests/test_analyzer.py::NestedCode::test_code_with_expression", "tests/test_analyzer.py::NestedCode::test_code_with_if", "tests/test_analyzer.py::NestedCode::test_code_with_private", "tests/test_analyzer.py::NestedCode::test_private", "tests/test_analyzer.py::NestedCode::test_private_in_exitwith", "tests/test_analyzer.py::Params::test_array_wrong", "tests/test_analyzer.py::Params::test_lhs_rhs_single_var", "tests/test_analyzer.py::Params::test_many", "tests/test_analyzer.py::Params::test_many_call", "tests/test_analyzer.py::Params::test_missing_arg", "tests/test_analyzer.py::Params::test_missing_default_arg", "tests/test_analyzer.py::Params::test_other", "tests/test_analyzer.py::Params::test_params_select", "tests/test_analyzer.py::Params::test_rhs_larger_lhs", "tests/test_analyzer.py::Params::test_single", "tests/test_analyzer.py::Params::test_single_array", "tests/test_analyzer.py::Params::test_undefined_array", "tests/test_analyzer.py::Params::test_with_default", "tests/test_analyzer.py::Params::test_with_default_unused", "tests/test_analyzer.py::Params::test_with_empty_string", "tests/test_analyzer.py::Params::test_wrong_argument", "tests/test_analyzer.py::Params::test_wrong_array_element", "tests/test_analyzer.py::SpecialContext::test_code_not_executed_in_loop", "tests/test_analyzer.py::SpecialContext::test_double_code", "tests/test_analyzer.py::SpecialContext::test_for_scope", "tests/test_analyzer.py::SpecialContext::test_for_scope_new", "tests/test_analyzer.py::SpecialContext::test_foreach", "tests/test_analyzer.py::SpecialContext::test_insensitive__foreachindex", "tests/test_analyzer.py::SpecialContext::test_issue5", "tests/test_analyzer.py::SpecialContext::test_select_count_apply", "tests/test_analyzer.py::SpecialContext::test_spawn_local_vars", "tests/test_analyzer.py::SpecialContext::test_spawn_thisScript", "tests/test_analyzer.py::SpecialContext::test_spawn_this_typing_error", "tests/test_analyzer.py::SpecialContext::test_try_catch", "tests/test_analyzer.py::UndefinedValues::test_anything", "tests/test_analyzer.py::UndefinedValues::test_anything_for_undecided", "tests/test_analyzer.py::UndefinedValues::test_array", "tests/test_analyzer.py::UndefinedValues::test_array2", "tests/test_analyzer.py::UndefinedValues::test_assign_if", "tests/test_analyzer.py::UndefinedValues::test_boolean", "tests/test_analyzer.py::UndefinedValues::test_code_in_namespace", "tests/test_analyzer.py::UndefinedValues::test_delayed_execution", "tests/test_analyzer.py::UndefinedValues::test_for", "tests/test_analyzer.py::UndefinedValues::test_for1", "tests/test_analyzer.py::UndefinedValues::test_forspecs", "tests/test_analyzer.py::UndefinedValues::test_if", "tests/test_analyzer.py::UndefinedValues::test_if1", "tests/test_analyzer.py::UndefinedValues::test_if_else_private", "tests/test_analyzer.py::UndefinedValues::test_if_else_with_global", "tests/test_analyzer.py::UndefinedValues::test_if_then", "tests/test_analyzer.py::UndefinedValues::test_if_then_else", "tests/test_analyzer.py::UndefinedValues::test_if_then_private", "tests/test_analyzer.py::UndefinedValues::test_if_with_global", "tests/test_analyzer.py::UndefinedValues::test_if_with_private_with_scoping", "tests/test_analyzer.py::UndefinedValues::test_many_scopes", "tests/test_analyzer.py::UndefinedValues::test_many_scopes2", "tests/test_analyzer.py::UndefinedValues::test_nothing_is_not_anything", "tests/test_analyzer.py::UndefinedValues::test_number", "tests/test_analyzer.py::UndefinedValues::test_private_is_nothing", "tests/test_analyzer.py::UndefinedValues::test_private_undefined", "tests/test_analyzer.py::UndefinedValues::test_string", "tests/test_analyzer.py::UndefinedValues::test_two_scopes", "tests/test_analyzer.py::UndefinedValues::test_unexecuted_code_in_namespace", "tests/test_analyzer.py::UndefinedValues::test_while", "tests/test_analyzer.py::SpecialComment::test_string1", "tests/test_analyzer.py::SpecialComment::test_string2", "tests/test_analyzer.py::SpecialComment::test_string2_fail", "tests/test_analyzer.py::SpecialComment::test_var_type", "tests/test_analyzer.py::SpecialComment::test_with_space", "tests/test_analyzer.py::UnusedVariables::test_nested", "tests/test_analyzer.py::UnusedVariables::test_simple", "tests/test_analyzer.py::UnusedVariables::test_with_globals", "tests/test_analyzer.py::StringAsCodeFunctions::test_configClass", "tests/test_analyzer.py::StringAsCodeFunctions::test_isNil", "tests/test_analyzer.py::StringAsCodeFunctions::test_isNil_error", "tests/test_analyzer.py::StringAsCodeFunctions::test_isNil_function", "tests/test_analyzer.py::StringAsCodeFunctions::test_isNil_undefined" ]
[]
BSD 3-Clause "New" or "Revised" License
3,009
[ "sqf/analyzer.py" ]
[ "sqf/analyzer.py" ]
pybel__pybel-338
6b0eb5dcb19400f3a64ac4830747bfe8dcbe8141
2018-09-02 15:00:08
6b0eb5dcb19400f3a64ac4830747bfe8dcbe8141
codecov[bot]: # [Codecov](https://codecov.io/gh/pybel/pybel/pull/338?src=pr&el=h1) Report > Merging [#338](https://codecov.io/gh/pybel/pybel/pull/338?src=pr&el=desc) into [develop](https://codecov.io/gh/pybel/pybel/commit/6b0eb5dcb19400f3a64ac4830747bfe8dcbe8141?src=pr&el=desc) will **increase** coverage by `0.43%`. > The diff coverage is `73.45%`. [![Impacted file tree graph](https://codecov.io/gh/pybel/pybel/pull/338/graphs/tree.svg?width=650&token=J7joRTRygG&height=150&src=pr)](https://codecov.io/gh/pybel/pybel/pull/338?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## develop #338 +/- ## ========================================== + Coverage 85.66% 86.1% +0.43% ========================================== Files 132 139 +7 Lines 6209 6405 +196 Branches 902 925 +23 ========================================== + Hits 5319 5515 +196 + Misses 690 687 -3 - Partials 200 203 +3 ``` | [Impacted Files](https://codecov.io/gh/pybel/pybel/pull/338?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/pybel/dsl/namespaces.py](https://codecov.io/gh/pybel/pybel/pull/338/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL2RzbC9uYW1lc3BhY2VzLnB5) | `100% <ø> (ø)` | :arrow_up: | | [src/pybel/tokens.py](https://codecov.io/gh/pybel/pybel/pull/338/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3Rva2Vucy5weQ==) | `86.66% <ø> (ø)` | :arrow_up: | | [src/pybel/struct/filters/edge\_filters.py](https://codecov.io/gh/pybel/pybel/pull/338/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9maWx0ZXJzL2VkZ2VfZmlsdGVycy5weQ==) | `100% <ø> (ø)` | :arrow_up: | | [src/pybel/struct/utils.py](https://codecov.io/gh/pybel/pybel/pull/338/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC91dGlscy5weQ==) | `59.09% <0%> (+32%)` | :arrow_up: | | [src/pybel/manager/query\_manager.py](https://codecov.io/gh/pybel/pybel/pull/338/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL21hbmFnZXIvcXVlcnlfbWFuYWdlci5weQ==) | `68.85% <0%> (ø)` | :arrow_up: | | [src/pybel/struct/query/constants.py](https://codecov.io/gh/pybel/pybel/pull/338/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9xdWVyeS9jb25zdGFudHMucHk=) | `100% <100%> (ø)` | | | [src/pybel/struct/mutation/induction/annotations.py](https://codecov.io/gh/pybel/pybel/pull/338/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9tdXRhdGlvbi9pbmR1Y3Rpb24vYW5ub3RhdGlvbnMucHk=) | `100% <100%> (ø)` | :arrow_up: | | [src/pybel/struct/pipeline/pipeline.py](https://codecov.io/gh/pybel/pybel/pull/338/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9waXBlbGluZS9waXBlbGluZS5weQ==) | `87.37% <100%> (+9.28%)` | :arrow_up: | | [src/pybel/struct/query/exc.py](https://codecov.io/gh/pybel/pybel/pull/338/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9xdWVyeS9leGMucHk=) | `100% <100%> (ø)` | | | [src/pybel/struct/mutation/induction/paths.py](https://codecov.io/gh/pybel/pybel/pull/338/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9tdXRhdGlvbi9pbmR1Y3Rpb24vcGF0aHMucHk=) | `66.66% <100%> (-0.58%)` | :arrow_down: | | ... and [21 more](https://codecov.io/gh/pybel/pybel/pull/338/diff?src=pr&el=tree-more) | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/pybel/pybel/pull/338?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/pybel/pybel/pull/338?src=pr&el=footer). Last update [6b0eb5d...a54b926](https://codecov.io/gh/pybel/pybel/pull/338?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/src/pybel/canonicalize.py b/src/pybel/canonicalize.py index 1cccbea3..d207a23c 100644 --- a/src/pybel/canonicalize.py +++ b/src/pybel/canonicalize.py @@ -286,10 +286,10 @@ def _to_bel_lines_footer(graph): yield 'SET SupportingText = "{}"'.format(PYBEL_AUTOEVIDENCE) for u, v, data in unqualified_edges_to_serialize: - yield '{} {} {}'.format(graph.node_to_bel(u), data[RELATION], graph.node_to_bel(v)) + yield '{} {} {}'.format(u.as_bel(), data[RELATION], v.as_bel()) for node in isolated_nodes_to_serialize: - yield graph.node_to_bel(node) + yield node.as_bel() yield 'UNSET SupportingText' yield 'UNSET Citation' diff --git a/src/pybel/dsl/namespaces.py b/src/pybel/dsl/namespaces.py index c4d0cf57..3698eadc 100644 --- a/src/pybel/dsl/namespaces.py +++ b/src/pybel/dsl/namespaces.py @@ -11,10 +11,16 @@ __all__ = [ def chebi(name=None, identifier=None): - """Build a ChEBI abundance node.""" + """Build a ChEBI abundance node. + + :rtype: Abundance + """ return Abundance(namespace='CHEBI', name=name, identifier=identifier) def hgnc(name=None, identifier=None): - """Build an HGNC protein node.""" + """Build an HGNC protein node. + + :rtype: Protein + """ return Protein(namespace='HGNC', name=name, identifier=identifier) diff --git a/src/pybel/manager/models.py b/src/pybel/manager/models.py index c49ef096..6f188542 100644 --- a/src/pybel/manager/models.py +++ b/src/pybel/manager/models.py @@ -270,7 +270,7 @@ class Network(Base): id = Column(Integer, primary_key=True) name = Column(String(255), nullable=False, index=True, doc='Name of the given Network (from the BEL file)') - version = Column(String(16), nullable=False, doc='Release version of the given Network (from the BEL file)') + version = Column(String(255), nullable=False, doc='Release version of the given Network (from the BEL file)') authors = Column(Text, nullable=True, doc='Authors of the underlying BEL file') contact = Column(String(255), nullable=True, doc='Contact email from the underlying BEL file') @@ -608,10 +608,11 @@ class Author(Base): id = Column(Integer, primary_key=True) name = Column(String(255), nullable=False, unique=True, index=True) - sha512 = Column(String(255), nullable=False, index=True, unique=True, ) + sha512 = Column(String(255), nullable=False, index=True, unique=True) @classmethod def from_name(cls, name): + """Create an author by name, automatically populating the hash.""" return Author(name=name, sha512=cls.hash_name(name)) @staticmethod @@ -633,9 +634,20 @@ class Author(Base): @classmethod def has_name(cls, name): - """Build a filter for if an author has a name.""" + """Build a filter for if an author has a name. + + :type name: str + """ return cls.sha512 == cls.hash_name(name) + @classmethod + def has_name_in(cls, names): + """Build a filter if the author has any of the given names""" + return cls.sha512.in_({ + cls.hash_name(name) + for name in names + }) + def __str__(self): return self.name diff --git a/src/pybel/manager/query_manager.py b/src/pybel/manager/query_manager.py index 50730fe8..dc673f1c 100644 --- a/src/pybel/manager/query_manager.py +++ b/src/pybel/manager/query_manager.py @@ -211,7 +211,7 @@ class QueryManager(LookupManager): if isinstance(author, string_types): query = query.filter(Author.name.like(author)) elif isinstance(author, Iterable): - query = query.filter(Author.name.in_(set(author))) + query = query.filter(Author.has_name_in(set(author))) else: raise TypeError diff --git a/src/pybel/struct/filters/edge_filters.py b/src/pybel/struct/filters/edge_filters.py index 1c58da6b..f34aff16 100644 --- a/src/pybel/struct/filters/edge_filters.py +++ b/src/pybel/struct/filters/edge_filters.py @@ -27,8 +27,8 @@ def invert_edge_predicate(edge_predicate): """Build an edge predicate that is the inverse of the given edge predicate. :param edge_predicate: An edge predicate - :type edge_predicate: (pybel.BELGraph, tuple, tuple, int) -> bool - :rtype: (pybel.BELGraph, tuple, tuple, int) -> bool + :type edge_predicate: (pybel.BELGraph, BaseEntity, BaseEntity, str) -> bool + :rtype: (pybel.BELGraph, BaseEntity, BaseEntity, str) -> bool """ def _inverse_filter(graph, u, v, k): @@ -41,9 +41,9 @@ def and_edge_predicates(edge_predicates=None): """Concatenate multiple edge predicates to a new predicate that requires all predicates to be met. :param edge_predicates: a list of predicates (graph, node, node, key, data) -> bool - :type edge_predicates: Optional[(pybel.BELGraph, tuple, tuple, int) -> bool or iter[(pybel.BELGraph, tuple, tuple, int) -> bool]] + :type edge_predicates: Optional[(pybel.BELGraph, BaseEntity, BaseEntity, str) -> bool or iter[(pybel.BELGraph, BaseEntity, BaseEntity, str) -> bool]] :return: A combine filter - :rtype: (pybel.BELGraph, tuple, tuple, int) -> bool + :rtype: (pybel.BELGraph, BaseEntity, BaseEntity, str) -> bool """ # If no filters are given, then return the trivially permissive filter @@ -64,9 +64,9 @@ def and_edge_predicates(edge_predicates=None): """Pass only for an edge that pass all enclosed predicates. :param BELGraph graph: A BEL Graph - :param tuple u: A BEL node - :param tuple v: A BEL node - :param int k: The edge key between the given nodes + :param BaseEntity u: A BEL node + :param BaseEntity v: A BEL node + :param str k: The edge key between the given nodes :return: If the edge passes all enclosed predicates :rtype: bool """ @@ -83,9 +83,9 @@ def filter_edges(graph, edge_predicates=None): :param BELGraph graph: A BEL graph :param edge_predicates: A predicate or list of predicates - :type edge_predicates: None or ((pybel.BELGraph, tuple, tuple, int) -> bool) or iter[(pybel.BELGraph, tuple, tuple, int) -> bool] + :type edge_predicates: None or ((pybel.BELGraph, BaseEntity, BaseEntity, str) -> bool) or iter[(pybel.BELGraph, BaseEntity, BaseEntity, str) -> bool] :return: An iterable of edges that pass all predicates - :rtype: iter[tuple,tuple,int] + :rtype: iter[BaseEntity, BaseEntity, str] """ # If no predicates are given, return the standard edge iterator @@ -104,7 +104,7 @@ def count_passed_edge_filter(graph, edge_predicates=None): :param pybel.BELGraph graph: A BEL graph :param edge_predicates: A predicate or list of predicates - :type edge_predicates: Optional[(pybel.BELGraph, tuple, tuple, int) -> bool or iter[(pybel.BELGraph, tuple, tuple, int) -> bool]] + :type edge_predicates: Optional[(pybel.BELGraph, BaseEntity, BaseEntity, str) -> bool or iter[(pybel.BELGraph, BaseEntity, BaseEntity, str) -> bool]] :return: The number of edges passing a given set of predicates :rtype: int """ diff --git a/src/pybel/struct/filters/edge_predicates.py b/src/pybel/struct/filters/edge_predicates.py index 89871232..9976fff4 100644 --- a/src/pybel/struct/filters/edge_predicates.py +++ b/src/pybel/struct/filters/edge_predicates.py @@ -11,6 +11,7 @@ from ...constants import ( CITATION_TYPE_PUBMED, DEGRADATION, DIRECT_CAUSAL_RELATIONS, EVIDENCE, OBJECT, POLAR_RELATIONS, RELATION, SUBJECT, TRANSLOCATION, ) +from ...dsl import BiologicalProcess, Pathology __all__ = [ 'edge_predicate', @@ -26,6 +27,7 @@ __all__ = [ 'edge_has_degradation', 'edge_has_translocation', 'edge_has_annotation', + 'has_pathology_causal', ] @@ -203,3 +205,20 @@ def edge_has_annotation(data, key): return return annotations.get(key) + + +def has_pathology_causal(graph, u, v, k): + """Check if the subject is a pathology and has a causal relationship with a non bioprocess/pathology. + + :param pybel.BELGraph graph: A BEL Graph + :param BaseEntity u: A BEL node + :param BaseEntity v: A BEL node + :param str k: The edge key between the given nodes + :return: If the subject of this edge is a pathology and it participates in a causal reaction. + :rtype: bool + """ + return ( + isinstance(u, Pathology) and + is_causal_relation(graph, u, v, k) and + not isinstance(v, (Pathology, BiologicalProcess)) + ) diff --git a/src/pybel/struct/graph.py b/src/pybel/struct/graph.py index 2c3786bf..8f70cb60 100644 --- a/src/pybel/struct/graph.py +++ b/src/pybel/struct/graph.py @@ -16,9 +16,9 @@ from ..constants import ( ANNOTATIONS, ASSOCIATION, CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED, DECREASES, DESCRIPTION, DIRECTLY_DECREASES, DIRECTLY_INCREASES, EQUIVALENT_TO, EVIDENCE, GRAPH_ANNOTATION_LIST, GRAPH_ANNOTATION_PATTERN, GRAPH_ANNOTATION_URL, GRAPH_METADATA, GRAPH_NAMESPACE_PATTERN, GRAPH_NAMESPACE_URL, GRAPH_PYBEL_VERSION, - GRAPH_UNCACHED_NAMESPACES, HAS_COMPONENT, HAS_MEMBER, HAS_PRODUCT, HAS_REACTANT, HAS_VARIANT, IDENTIFIER, - INCREASES, IS_A, MEMBERS, METADATA_AUTHORS, METADATA_CONTACT, METADATA_COPYRIGHT, METADATA_DESCRIPTION, - METADATA_DISCLAIMER, METADATA_LICENSES, METADATA_NAME, METADATA_VERSION, NAME, NAMESPACE, OBJECT, + GRAPH_UNCACHED_NAMESPACES, HAS_COMPONENT, HAS_MEMBER, HAS_PRODUCT, HAS_REACTANT, HAS_VARIANT, INCREASES, IS_A, + MEMBERS, METADATA_AUTHORS, METADATA_CONTACT, METADATA_COPYRIGHT, METADATA_DESCRIPTION, + METADATA_DISCLAIMER, METADATA_LICENSES, METADATA_NAME, METADATA_VERSION, NAMESPACE, OBJECT, ORTHOLOGOUS, PART_OF, PRODUCTS, REACTANTS, RELATION, SUBJECT, TRANSCRIBED_TO, TRANSLATED_TO, VARIANTS, ) from ..dsl import BaseEntity, activity @@ -362,11 +362,6 @@ class BELGraph(nx.MultiDiGraph): :type attr: dict :return: str """ - if not isinstance(u, BaseEntity): - raise TypeError('subject is not BaseEntity: {}'.format(u)) - if not isinstance(v, BaseEntity): - raise TypeError('object is not BaseEntity: {}'.format(v)) - self.add_node_from_data(u) self.add_node_from_data(v) @@ -587,17 +582,6 @@ class BELGraph(nx.MultiDiGraph): annotations=annotations, subject_modifier=subject_modifier, object_modifier=object_modifier, **attr) - def has_node(self, n): - """Check if the graph contains the given node tuple or BaseEntity. - - :param n: A node - :rtype: bool - """ - if not isinstance(n, BaseEntity): - raise TypeError('Not a base entity: {}'.format(n)) - - return super(BELGraph, self).has_node(n) - def copy(self, as_view=False): # TODO delete this so it uses base implementation """Copy this graph. @@ -626,8 +610,7 @@ class BELGraph(nx.MultiDiGraph): :param BaseEntity node: A PyBEL node data dictionary :rtype: BaseEntity """ - if not isinstance(node, BaseEntity): - raise TypeError('not BaseEntity: {}'.format(node)) + assert isinstance(node, BaseEntity) if node in self: return node @@ -729,15 +712,14 @@ class BELGraph(nx.MultiDiGraph): def _has_edge_attr(self, u, v, key, attr): """ - - :type u: BaseEntity or tuple - :type v: BaseEntity or tuple + :type u: BaseEntity + :type v: BaseEntity :type key: str :type attr: str :rtype: bool """ - if not isinstance(u, BaseEntity): - raise TypeError + assert isinstance(u, BaseEntity) + assert isinstance(v, BaseEntity) return attr in self[u][v][key] @@ -780,22 +762,17 @@ class BELGraph(nx.MultiDiGraph): return self._get_edge_attr(u, v, key, ANNOTATIONS) def _get_node_attr(self, node, attr): - if not isinstance(node, BaseEntity): - raise TypeError - + assert isinstance(node, BaseEntity) return self.nodes[node].get(attr) def _has_node_attr(self, node, attr): - if not isinstance(node, BaseEntity): - raise TypeError + assert isinstance(node, BaseEntity) return attr in self.nodes[node] def _set_node_attr(self, node, attr, value): - if not isinstance(node, BaseEntity): - raise TypeError + assert isinstance(node, BaseEntity) self.nodes[node][attr] = value - def get_node_description(self, node): """Get the description for a given node. @@ -926,23 +903,18 @@ class BELGraph(nx.MultiDiGraph): :type n: tuple or BaseEntity :rtype: str """ - if not isinstance(n, BaseEntity): - raise TypeError - return n.as_bel() def edge_to_bel(self, u, v, data, sep=None): """Serialize a pair of nodes and related edge data as a BEL relation. - :type u: BaseEntity or tuple - :type v: BaseEntity or tuple + :type u: BaseEntity + :type v: BaseEntity :param dict data: A PyBEL edge data dictionary :param Optional[str] sep: The separator between the source, relation, and target. Defaults to ' ' :rtype: str """ - source = u if isinstance(u, BaseEntity) else self.node[u] - target = v if isinstance(v, BaseEntity) else self.node[v] - return edge_to_bel(source, target, data=data, sep=sep) + return edge_to_bel(u, v, data=data, sep=sep) def _has_no_equivalent_edge(self, u, v): return not any( diff --git a/src/pybel/struct/mutation/induction/annotations.py b/src/pybel/struct/mutation/induction/annotations.py index a7679a55..3c4d8706 100644 --- a/src/pybel/struct/mutation/induction/annotations.py +++ b/src/pybel/struct/mutation/induction/annotations.py @@ -39,17 +39,17 @@ def get_subgraph_by_annotations(graph, annotations, or_=None): @transformation -def get_subgraph_by_annotation_value(graph, annotation, value): +def get_subgraph_by_annotation_value(graph, annotation, values): """Induce a sub-graph over all edges whose annotations match the given key and value. :param pybel.BELGraph graph: A BEL graph :param str annotation: The annotation to group by - :param value: The value(s) for the annotation - :type value: str or iter[str] + :param values: The value(s) for the annotation + :type values: str or iter[str] :return: A subgraph of the original BEL graph :rtype: pybel.BELGraph """ - if isinstance(value, string_types): - value = set(value) + if isinstance(values, string_types): + values = {values} - return get_subgraph_by_annotations(graph, {annotation: value}) + return get_subgraph_by_annotations(graph, {annotation: values}) diff --git a/src/pybel/struct/mutation/induction/neighborhood.py b/src/pybel/struct/mutation/induction/neighborhood.py index 08969c6a..6c6c6a70 100644 --- a/src/pybel/struct/mutation/induction/neighborhood.py +++ b/src/pybel/struct/mutation/induction/neighborhood.py @@ -27,18 +27,10 @@ def get_subgraph_by_neighborhood(graph, nodes): return rv = graph.fresh_copy() - - rv.add_edges_from( - ( - (u, v, k, d) - if k < 0 else - (u, v, d) - ) - for u, v, k, d in itt.chain( - graph.in_edges_iter(nodes, keys=True, data=True), - graph.out_edges_iter(nodes, keys=True, data=True) - ) - ) + rv.add_edges_from(itt.chain( + graph.in_edges(nodes, keys=True, data=True), + graph.out_edges(nodes, keys=True, data=True), + )) update_node_helper(graph, rv) update_metadata(graph, rv) diff --git a/src/pybel/struct/mutation/induction/paths.py b/src/pybel/struct/mutation/induction/paths.py index 12bca138..74f6fb76 100644 --- a/src/pybel/struct/mutation/induction/paths.py +++ b/src/pybel/struct/mutation/induction/paths.py @@ -128,9 +128,4 @@ def get_random_path(graph): if tries == sentinel_tries: return [source] - shortest_path = nx.shortest_path(wg, source=source, target=target) - - return [ - graph.nodes[node] - for node in shortest_path - ] + return nx.shortest_path(wg, source=source, target=target) diff --git a/src/pybel/struct/pipeline/pipeline.py b/src/pybel/struct/pipeline/pipeline.py index 84621c84..0bac2af6 100644 --- a/src/pybel/struct/pipeline/pipeline.py +++ b/src/pybel/struct/pipeline/pipeline.py @@ -11,6 +11,7 @@ from .decorators import get_transformation, in_place_map, mapped, universe_map from .exc import MetaValueError, MissingPipelineFunctionError, MissingUniverseError from ..operations import node_intersection, union + __all__ = [ 'Pipeline', ] @@ -30,7 +31,7 @@ def _get_protocol_tuple(data): return data['function'], data.get('args', []), data.get('kwargs', {}) -class Pipeline(object): +class Pipeline: """Builds and runs analytical pipelines on BEL graphs. Example usage: @@ -45,13 +46,12 @@ class Pipeline(object): >>> result = example.run(graph) """ - def __init__(self, protocol=None, universe=None): + def __init__(self, protocol=None): """ :param iter[dict] protocol: An iterable of dictionaries describing how to transform a network - :param pybel.BELGraph universe: The entire set of known knowledge to draw from """ - self.universe = universe - self.protocol = [] if protocol is None else protocol + self.universe = None + self.protocol = protocol or [] def __len__(self): return len(self.protocol) @@ -200,19 +200,7 @@ class Pipeline(object): return result - def _can_be_run_in_place(self): - """Checks if this pipeline can be run in place. - - Requirements: - - - All functions have the "in place" tag - - No splitting, unioning, or other exotic things happen. - - :rtype: bool - """ - raise NotImplementedError # TODO implement - - def run(self, graph, universe=None, in_place=True): + def run(self, graph, universe=None): """Run the contained protocol on a seed graph. :param pybel.BELGraph graph: The seed BEL graph @@ -222,13 +210,10 @@ class Pipeline(object): :return: The new graph is returned if not applied in-place :rtype: pybel.BELGraph """ - self.universe = graph.copy() if universe is None else universe + self.universe = universe or graph.copy() + return self._run_helper(graph.copy(), self.protocol) - result = graph if in_place else graph.copy() - result = self._run_helper(result, self.protocol) - return result - - def __call__(self, graph, universe=None, in_place=True): + def __call__(self, graph, universe=None): """Call :meth:`Pipeline.run`. :param pybel.BELGraph graph: The seed BEL graph @@ -247,7 +232,7 @@ class Pipeline(object): >>> graph = BELGraph() ... >>> new_graph = pipe(graph) """ - return self.run(graph=graph, universe=universe, in_place=in_place) + return self.run(graph=graph, universe=universe) def _wrap_universe(self, func): """Take a function that needs a universe graph as the first argument and returns a wrapped one.""" @@ -275,19 +260,26 @@ class Pipeline(object): return wrapper + def to_json(self): + """Return this pipeline as a JSON list. + + :rtype: list + """ + return self.protocol + def dumps(self, **kwargs): - """Give this pipeline as a JSON string. + """Dump this pipeline as a JSON string. :rtype: str """ - return json.dumps(self.protocol, **kwargs) + return json.dumps(self.to_json(), **kwargs) - def dump(self, file): + def dump(self, file, **kwargs): """Dump this protocol to a file in JSON. :param file: A file or file-like to pass to :func:`json.dump` """ - return json.dump(self.protocol, file) + return json.dump(self.to_json(), file, **kwargs) @staticmethod def load(file): @@ -298,7 +290,7 @@ class Pipeline(object): :rtype: Pipeline :raises MissingPipelineFunctionError: If any functions are not registered """ - return Pipeline(protocol=json.load(file)) + return Pipeline(json.load(file)) @staticmethod def loads(s): @@ -309,7 +301,7 @@ class Pipeline(object): :rtype: Pipeline :raises MissingPipelineFunctionError: If any functions are not registered """ - return Pipeline(protocol=json.loads(s)) + return Pipeline(json.loads(s)) def __str__(self): return json.dumps(self.protocol, indent=2) diff --git a/src/pybel/struct/query/__init__.py b/src/pybel/struct/query/__init__.py new file mode 100644 index 00000000..9bd40297 --- /dev/null +++ b/src/pybel/struct/query/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- + +"""Query builder for PyBEL.""" + +from .exc import * +from .query import Query +from .seeding import SEED_DATA, SEED_METHOD, Seeding +from .selection import get_subgraph diff --git a/src/pybel/struct/query/constants.py b/src/pybel/struct/query/constants.py new file mode 100644 index 00000000..7c624097 --- /dev/null +++ b/src/pybel/struct/query/constants.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +"""Constants for the query builder.""" + +#: Induce a subgraph over the given nodes +SEED_TYPE_INDUCTION = 'induction' +#: Induce a subgraph over the given nodes and expand to their first neighbors +SEED_TYPE_NEIGHBORS = 'neighbors' +#: Induce a subgraph over the given nodes and expand to their second neighbors +SEED_TYPE_DOUBLE_NEIGHBORS = 'dneighbors' +#: Induce a subgraph over the nodes in all shortest paths between the given nodes +SEED_TYPE_PATHS = 'shortest_paths' +#: Induce a subgraph over the edges provided by the given authors and their neighboring nodes +SEED_TYPE_AUTHOR = 'authors' +#: Induce a subgraph over the edges provided by the given citations and their neighboring nodes +SEED_TYPE_PUBMED = 'pubmed' +#: Generate an upstream candidate mechanism +SEED_TYPE_UPSTREAM = 'upstream' +#: Generate a downstream candidate mechanism +SEED_TYPE_DOWNSTREAM = 'downstream' +#: Induce a subgraph over the edges matching the given annotations +SEED_TYPE_ANNOTATION = 'annotation' +#: Induce a subgraph over a random set of (hopefully) connected edges +SEED_TYPE_SAMPLE = 'sample' + +#: A set of the allowed seed type strings, as defined above +SEED_TYPES = { + SEED_TYPE_INDUCTION, + SEED_TYPE_NEIGHBORS, + SEED_TYPE_DOUBLE_NEIGHBORS, + SEED_TYPE_PATHS, + SEED_TYPE_UPSTREAM, + SEED_TYPE_DOWNSTREAM, + SEED_TYPE_PUBMED, + SEED_TYPE_AUTHOR, + SEED_TYPE_ANNOTATION, + SEED_TYPE_SAMPLE +} + +#: Seed types that don't take node lists as their arguments +NONNODE_SEED_TYPES = { + SEED_TYPE_ANNOTATION, + SEED_TYPE_AUTHOR, + SEED_TYPE_PUBMED, + SEED_TYPE_SAMPLE, +} diff --git a/src/pybel/struct/query/exc.py b/src/pybel/struct/query/exc.py new file mode 100644 index 00000000..a3d9d75c --- /dev/null +++ b/src/pybel/struct/query/exc.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +"""Exceptions for the query builder.""" + +__all__ = [ + 'QueryMissingNetworksError', + 'NodeDegreeIterError', +] + + +class QueryMissingNetworksError(KeyError): + """Raised if a query is created from json but doesn't have a listing of network identifiers.""" + + +class NodeDegreeIterError(ValueError): + """Raised when failing to iterate over node degrees.""" diff --git a/src/pybel/struct/query/query.py b/src/pybel/struct/query/query.py new file mode 100644 index 00000000..33731523 --- /dev/null +++ b/src/pybel/struct/query/query.py @@ -0,0 +1,214 @@ +# -*- coding: utf-8 -*- + +"""Query builder.""" + +import json +import logging +from collections import Iterable + +from .exc import QueryMissingNetworksError +from .seeding import Seeding +from ...manager.models import Node +from ...struct.pipeline import Pipeline + +__all__ = [ + 'Query', +] + +log = logging.getLogger(__name__) + + +class Query: + """Represents a query over a network store.""" + + def __init__(self, network_ids=None, seeding=None, pipeline=None): + """Build a query. + + :param iter[int] network_ids: Database network identifiers identifiers + :type network_ids: None or int or iter[int] + :type seeding: Optional[Seeding] + :type pipeline: Optional[Pipeline] + """ + if not network_ids: + self.network_ids = [] + elif isinstance(network_ids, int): + self.network_ids = [network_ids] + elif isinstance(network_ids, Iterable): + network_ids = list(network_ids) + + for network_id in network_ids: + if not isinstance(network_id, int): + raise TypeError(network_ids) + + self.network_ids = network_ids + else: + raise TypeError(network_ids) + + if seeding is not None and not isinstance(seeding, Seeding): + raise TypeError('Not a Seeding: {}'.format(seeding)) + self.seeding = seeding or Seeding() + + if pipeline is not None and not isinstance(pipeline, Pipeline): + raise TypeError('Not a pipeline: {}'.format(pipeline)) + self.pipeline = pipeline or Pipeline() + + def append_network(self, network_id): + """Add a network to this query. + + :param int network_id: The database identifier of the network + :returns: self for fluid API + :rtype: Query + """ + self.network_ids.append(network_id) + return self + + def append_seeding_induction(self, nodes): + """Add a seed induction method. + + :param list[tuple or Node or BaseEntity] nodes: A list of PyBEL node tuples + :returns: seeding container for fluid API + :rtype: Seeding + """ + return self.seeding.append_induction(nodes) + + def append_seeding_neighbors(self, nodes): + """Add a seed by neighbors. + + :param nodes: A list of PyBEL node tuples + :type nodes: BaseEntity or iter[BaseEntity] + """ + return self.seeding.append_neighbors(nodes) + + def append_seeding_annotation(self, annotation, values): + """Add a seed induction method for single annotation's values. + + :param str annotation: The annotation to filter by + :param set[str] values: The values of the annotation to keep + """ + return self.seeding.append_annotation(annotation, values) + + def append_seeding_sample(self, **kwargs): + """Add seed induction methods. + + Kwargs can have ``number_edges`` or ``number_seed_nodes``. + """ + return self.seeding.append_sample(**kwargs) + + def append_pipeline(self, name, *args, **kwargs): + """Add an entry to the pipeline. Defers to :meth:`pybel_tools.pipeline.Pipeline.append`. + + :param name: The name of the function + :type name: str or types.FunctionType + :return: This pipeline for fluid query building + :rtype: Pipeline + """ + return self.pipeline.append(name, *args, **kwargs) + + def __call__(self, manager): + """Run this query and returns the resulting BEL graph with :meth:`Query.run`. + + :param pybel.manager.Manager manager: A cache manager + :rtype: Optional[pybel.BELGraph] + """ + return self.run(manager) + + def run(self, manager): + """Run this query and returns the resulting BEL graph. + + :param manager: A cache manager + :rtype: Optional[pybel.BELGraph] + """ + universe = self._get_universe(manager) + graph = self.seeding.run(universe) + return self.pipeline.run(graph, universe=universe) + + def _get_universe(self, manager): + if not self.network_ids: + raise QueryMissingNetworksError('can not run query without network identifiers') + + log.debug('query universe consists of networks: %s', self.network_ids) + + universe = manager.get_graph_by_ids(self.network_ids) + log.debug('query universe has %d nodes/%d edges', universe.number_of_nodes(), universe.number_of_edges()) + + return universe + + def to_json(self): + """Return this query as a JSON object. + + :rtype: dict + """ + rv = { + 'network_ids': self.network_ids, + } + + if self.seeding: + rv['seeding'] = self.seeding.to_json() + + if self.pipeline: + rv['pipeline'] = self.pipeline.to_json() + + return rv + + def dump(self, file, **kwargs): + """Dump this query to a file as JSON.""" + json.dump(self.to_json(), file, **kwargs) + + def dumps(self, **kwargs): + """Dump this query to a string as JSON + + :rtype: str + """ + return json.dumps(self.to_json(), **kwargs) + + @staticmethod + def from_json(data): + """Load a query from a JSON dictionary. + + :param dict data: A JSON dictionary + :rtype: Query + :raises: QueryMissingNetworksError + """ + network_ids = data.get('network_ids') + if network_ids is None: + raise QueryMissingNetworksError('query JSON did not have key "network_ids"') + + seeding_data = data.get('seeding') + seeding = ( + Seeding(seeding_data) + if seeding_data is not None else + None + ) + + pipeline_data = data.get('pipeline') + pipeline = ( + Pipeline(pipeline_data) + if pipeline_data is not None else + None + ) + + return Query( + network_ids=network_ids, + seeding=seeding, + pipeline=pipeline, + ) + + @staticmethod + def load(file): + """Load a query from a JSON file. + + :param file: A file or file-like + :rtype: Query + :raises: QueryMissingNetworksError + """ + return Query.from_json(json.load(file)) + + @staticmethod + def loads(s): + """Load a query from a JSON string + + :param str s: A stringified JSON query + :rtype: Query + :raises: QueryMissingNetworksError + """ + return Query.from_json(json.loads(s)) diff --git a/src/pybel/struct/query/seeding.py b/src/pybel/struct/query/seeding.py new file mode 100644 index 00000000..ce26f50d --- /dev/null +++ b/src/pybel/struct/query/seeding.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- + +"""Query builder.""" + +import json +import logging +import random + +from six.moves import UserList + +from .constants import ( + SEED_TYPE_ANNOTATION, SEED_TYPE_INDUCTION, SEED_TYPE_NEIGHBORS, SEED_TYPE_SAMPLE, +) +from .selection import get_subgraph +from ...dsl import BaseEntity +from ...manager.models import Node +from ...struct import union +from ...tokens import parse_result_to_dsl + +log = logging.getLogger(__name__) + +SEED_METHOD = 'type' +SEED_DATA = 'data' + + +class Seeding(UserList): + """Represents a container of seeding methods to apply to a network.""" + + def append_induction(self, nodes): + """Add a seed induction method. + + :param list[tuple or Node or BaseEntity] nodes: A list of PyBEL node tuples + :returns: self for fluid API + :rtype: Seeding + """ + return self._append_seed(SEED_TYPE_INDUCTION, _handle_nodes(nodes)) + + def append_neighbors(self, nodes): + """Add a seed by neighbors. + + :param nodes: A list of PyBEL node tuples + :type nodes: BaseEntity or iter[BaseEntity] + :returns: self for fluid API + :rtype: Seeding + """ + return self._append_seed(SEED_TYPE_NEIGHBORS, _handle_nodes(nodes)) + + def append_annotation(self, annotation, values): + """Add a seed induction method for single annotation's values. + + :param str annotation: The annotation to filter by + :param set[str] values: The values of the annotation to keep + :returns: self for fluid API + :rtype: Seeding + """ + return self._append_seed(SEED_TYPE_ANNOTATION, { + 'annotations': { + annotation: values + } + }) + + def append_sample(self, **kwargs): + """Add seed induction methods. + + Kwargs can have ``number_edges`` or ``number_seed_nodes``. + :returns: self for fluid API + :rtype: Seeding + """ + data = { + 'seed': random.randint(0, 1000000) + } + data.update(kwargs) + + return self._append_seed(SEED_TYPE_SAMPLE, data) + + def _append_seed(self, seed_type, data): + """Add a seeding method. + + :param str seed_type: + :param data: + :returns: self for fluid API + :rtype: Seeding + """ + self.append({ + SEED_METHOD: seed_type, + SEED_DATA: data, + }) + return self + + def run(self, graph): + """Seed the graph or return none if not possible. + + :type graph: pybel.BELGraph + :rtype: Optional[pybel.BELGraph] + """ + if not self: + log.debug('no seeding, returning graph: %s', graph) + return graph + + subgraphs = [] + + for seed in self: + seed_method, seed_data = seed[SEED_METHOD], seed[SEED_DATA] + + log.debug('seeding with %s: %s', seed_method, seed_data) + subgraph = get_subgraph(graph, seed_method=seed_method, seed_data=seed_data) + + if subgraph is None: + log.debug('seed returned empty graph: %s', seed) + continue + + subgraphs.append(subgraph) + + if not subgraphs: + log.debug('no subgraphs returned') + return + + return union(subgraphs) + + def to_json(self): + """Serialize this seeding container to a JSON object. + + :rtype: list + """ + return list(self) + + def dump(self, file, sort_keys=True, **kwargs): + """Dump this seeding container to a file as JSON.""" + json.dump(self.to_json(), file, sort_keys=sort_keys, **kwargs) + + def dumps(self, sort_keys=True, **kwargs): + """Dump this query to a string as JSON. + + :rtype: str + """ + return json.dumps(self.to_json(), sort_keys=sort_keys, **kwargs) + + @staticmethod + def from_json(data): + """Build a seeding container from a JSON list. + + :param dict data: + :rtype: Seeding + """ + return Seeding(data) + + @staticmethod + def load(file): + """Load a seeding container from a JSON file. + + :rtype: Seeding + """ + return Seeding.from_json(json.load(file)) + + @staticmethod + def loads(s): + """Load a seeding container from a JSON string. + + :rtype: Seeding + """ + return Seeding.from_json(json.loads(s)) + + +def _handle_nodes(nodes): + """Handle nodes that might be dictionaries. + + :type nodes: BaseEntity or list[dict] or list[BaseEntity] + :rtype: list[BaseEntity] + """ + if isinstance(nodes, BaseEntity): + return [nodes] + + return [ + ( + parse_result_to_dsl(node) + if isinstance(node, dict) else + node + ) + for node in nodes + ] diff --git a/src/pybel/struct/query/selection.py b/src/pybel/struct/query/selection.py new file mode 100644 index 00000000..fa5c4537 --- /dev/null +++ b/src/pybel/struct/query/selection.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- + +import logging + +from .constants import * +from ..mutation import ( + expand_nodes_neighborhoods, get_multi_causal_downstream, get_multi_causal_upstream, + get_random_subgraph, get_subgraph_by_all_shortest_paths, get_subgraph_by_annotations, get_subgraph_by_authors, + get_subgraph_by_induction, get_subgraph_by_neighborhood, get_subgraph_by_pubmed, get_subgraph_by_second_neighbors, +) + +log = logging.getLogger(__name__) + +__all__ = [ + 'get_subgraph', +] + + +def get_subgraph(graph, seed_method=None, seed_data=None, expand_nodes=None, remove_nodes=None): + """Run a pipeline query on graph with multiple sub-graph filters and expanders. + + Order of Operations: + + 1. Seeding by given function name and data + 2. Add nodes + 3. Remove nodes + + :param pybel.BELGraph graph: A BEL graph + :param str seed_method: The name of the get_subgraph_by_* function to use + :param seed_data: The argument to pass to the get_subgraph function + :param list[tuple] expand_nodes: Add the neighborhoods around all of these nodes + :param list[tuple] remove_nodes: Remove these nodes and all of their in/out edges + :rtype: Optional[pybel.BELGraph] + """ + # Seed by the given function + if seed_method == SEED_TYPE_INDUCTION: + result = get_subgraph_by_induction(graph, seed_data) + + elif seed_method == SEED_TYPE_PATHS: + result = get_subgraph_by_all_shortest_paths(graph, seed_data) + + elif seed_method == SEED_TYPE_NEIGHBORS: + result = get_subgraph_by_neighborhood(graph, seed_data) + + elif seed_method == SEED_TYPE_DOUBLE_NEIGHBORS: + result = get_subgraph_by_second_neighbors(graph, seed_data) + + elif seed_method == SEED_TYPE_UPSTREAM: + result = get_multi_causal_upstream(graph, seed_data) + + elif seed_method == SEED_TYPE_DOWNSTREAM: + result = get_multi_causal_downstream(graph, seed_data) + + elif seed_method == SEED_TYPE_PUBMED: + result = get_subgraph_by_pubmed(graph, seed_data) + + elif seed_method == SEED_TYPE_AUTHOR: + result = get_subgraph_by_authors(graph, seed_data) + + elif seed_method == SEED_TYPE_ANNOTATION: + result = get_subgraph_by_annotations(graph, seed_data['annotations'], or_=seed_data.get('or')) + + elif seed_method == SEED_TYPE_SAMPLE: + result = get_random_subgraph( + graph, + number_edges=seed_data.get('number_edges'), + seed=seed_data.get('seed') + ) + + elif not seed_method: # Otherwise, don't seed a sub-graph + result = graph.copy() + log.debug('no seed function - using full network: %s', result.name) + + else: + raise ValueError('Invalid seed method: {}'.format(seed_method)) + + if result is None: + log.debug('query returned no results') + return + + log.debug('original graph has (%s nodes / %s edges)', result.number_of_nodes(), result.number_of_edges()) + + # Expand around the given nodes + if expand_nodes: + expand_nodes_neighborhoods(graph, result, expand_nodes) + log.debug('graph expanded to (%s nodes / %s edges)', result.number_of_nodes(), result.number_of_edges()) + + # Delete the given nodes + if remove_nodes: + for node in remove_nodes: + if node not in result: + log.debug('%s is not in graph %s', node, graph.name) + continue + result.remove_node(node) + log.debug('graph contracted to (%s nodes / %s edges)', result.number_of_nodes(), result.number_of_edges()) + + log.debug( + 'Subgraph coming from %s (seed type) %s (data) contains %d nodes and %d edges', + seed_method, + seed_data, + result.number_of_nodes(), + result.number_of_edges() + ) + + return result diff --git a/src/pybel/struct/utils.py b/src/pybel/struct/utils.py index f00dfc2b..e15b1f51 100644 --- a/src/pybel/struct/utils.py +++ b/src/pybel/struct/utils.py @@ -54,41 +54,4 @@ def ensure_node_from_universe(source, target, node): def relabel_inplace(G, mapping): # borrowed from NX - old_labels = set(mapping.keys()) - new_labels = set(mapping.values()) - if len(old_labels & new_labels) > 0: - # labels sets overlap - # can we topological sort and still do the relabeling? - D = nx.DiGraph(list(mapping.items())) - D.remove_edges_from(nx.selfloop_edges(D)) - try: - nodes = reversed(list(nx.topological_sort(D))) - except nx.NetworkXUnfeasible: - raise nx.NetworkXUnfeasible('The node label sets are overlapping ' - 'and no ordering can resolve the ' - 'mapping. Use copy=True.') - else: - # non-overlapping label sets - nodes = old_labels - - for old in nodes: - try: - new = mapping[old] - except KeyError: - continue - if new == old: - continue - try: - G.add_node(new) - G._node[new] = G.nodes[old] # THIS WAS CHANGED - except KeyError: - raise KeyError("Node %s is not in the graph" % old) - new_edges = [(new, new if old == target else target, key, data) - for (_, target, key, data) - in G.edges(old, data=True, keys=True)] - new_edges += [(new if old == source else source, new, key, data) - for (source, _, key, data) - in G.in_edges(old, data=True, keys=True)] - G.remove_node(old) - G.add_edges_from(new_edges) - return G + return nx.relabel_nodes(G, mapping, copy=False) diff --git a/src/pybel/tokens.py b/src/pybel/tokens.py index d8ca404b..60838c61 100644 --- a/src/pybel/tokens.py +++ b/src/pybel/tokens.py @@ -20,9 +20,9 @@ __all__ = [ def parse_result_to_dsl(tokens): - """Convert a ParseResult to a PyBEL DSL object + """Convert a ParseResult to a PyBEL DSL object. - :type tokens: pyparsing.ParseResults + :type tokens: dict or pyparsing.ParseResults :rtype: BaseEntity """ if MODIFIER in tokens:
Re-implement "get_subgraph" Move from PyBEL-Tools and implement tests
pybel/pybel
diff --git a/src/pybel/testing/mock_manager.py b/src/pybel/testing/mock_manager.py new file mode 100644 index 00000000..188ac564 --- /dev/null +++ b/src/pybel/testing/mock_manager.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- + +"""Mocks for PyBEL testing.""" + +from ..manager.models import Network +from ..struct import union + + +class MockQueryManager: + """A mock manager.""" + + def __init__(self, graphs=None): + """Build a mock manager appropriate for testing the pipeline and query builders. + + :param Optional[list[pybel.BELGraph]] graphs: A list of BEL graphs to index + """ + self.graphs = [] + + #: A lookup for nodes from the node hash (string) to the node tuple + self.hash_to_node = {} + + #: A lookup from network identifier to graph + self.id_graph = {} + + if graphs is not None: + for graph in graphs: + self.insert_graph(graph) + + def count_networks(self): + """Count networks in the manager. + + :rtype: int + """ + return len(self.graphs) + + def insert_graph(self, graph): + """Insert a graph and ensure its nodes are cached. + + :param pybel.BELGraph graph: + :rtype: Network + """ + network_id = len(self.graphs) + self.graphs.append(graph) + self.id_graph[network_id] = graph + + for node in graph: + self.hash_to_node[node.sha512] = node + + return Network(id=network_id) + + def get_graph_by_ids(self, network_ids): + """Get a graph from the union of multiple networks. + + :param iter[int] network_ids: The identifiers of networks in the database + :rtype: pybel.BELGraph + """ + network_ids = list(network_ids) + + if len(network_ids) == 1: + return self.id_graph[network_ids[0]] + + graphs = [ + self.id_graph[graph_id] + for graph_id in network_ids + ] + + return union(graphs) + + def get_dsl_by_hash(self, sha512): + """Get a DSL by its hash. + + :param str sha512: + :rtype: Optional[BaseEntity] + """ + return self.hash_to_node.get(sha512) diff --git a/tests/constants.py b/tests/constants.py index b873ef66..770e4477 100644 --- a/tests/constants.py +++ b/tests/constants.py @@ -74,10 +74,10 @@ def assert_has_node(self, node, graph, **kwargs): self.assertIn( node, graph, - msg='{} not found in graph. Other nodes:\n{}'.format(graph.node_to_bel(node), '\n'.join( - graph.node_to_bel(node) - for node in graph - )) + msg='{} not found in graph. Other nodes:\n{}'.format(node.as_bel(), '\n'.join( + n.as_bel() + for n in graph + )), ) if kwargs: @@ -132,7 +132,7 @@ def assert_has_edge(self, u, v, graph, permissive=True, **kwargs): self.assertTrue( graph.has_edge(u, v), msg='Edge ({}, {}) not in graph. Other edges:\n{}'.format(u, v, '\n'.join( - '{} {} {}'.format(graph.node_to_bel(u), d[RELATION], graph.node_to_bel(v)) + '{} {} {}'.format(u.as_bel(), d[RELATION], v.as_bel()) for u, v, d in graph.edges(data=True) )) ) diff --git a/tests/test_struct/test_filters/test_edge_predicates.py b/tests/test_struct/test_filters/test_edge_predicates.py new file mode 100644 index 00000000..fa76cd20 --- /dev/null +++ b/tests/test_struct/test_filters/test_edge_predicates.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- + +"""Tests for edge predicates""" + +import unittest + +from pybel import BELGraph +from pybel.dsl import pathology, protein +from pybel.struct.filters.edge_predicates import has_pathology_causal +from pybel.testing.utils import n + + +class TestEdgePredicates(unittest.TestCase): + """Tests for edge predicates.""" + + def test_has_pathology(self): + """Test for checking edges that have a causal pathology.""" + graph = BELGraph() + + a, b, c = protein(n(), n()), pathology(n(), n()), pathology(n(), n()) + + key = graph.add_increases(a, b, n(), n()) + self.assertFalse(has_pathology_causal(graph, a, b, key)) + + key = graph.add_increases(b, a, n(), n()) + self.assertTrue(has_pathology_causal(graph, b, a, key)) + + key = graph.add_association(b, a, n(), n()) + self.assertFalse(has_pathology_causal(graph, b, a, key)) + + key = graph.add_increases(a, c, n(), n()) + self.assertFalse(has_pathology_causal(graph, a, c, key)) diff --git a/tests/test_struct/test_query/__init__.py b/tests/test_struct/test_query/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_struct/test_query/test_mocks.py b/tests/test_struct/test_query/test_mocks.py new file mode 100644 index 00000000..3317546e --- /dev/null +++ b/tests/test_struct/test_query/test_mocks.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- + +"""Tests for the mocks for the query builder.""" + +import unittest + +from pybel.examples import egf_graph +from pybel.testing.mock_manager import MockQueryManager + + +class TestMockManager(unittest.TestCase): + """Tests for the mock query manager.""" + + def test_make(self): + """Test instantiating the mock query manager.""" + manager = MockQueryManager() + self.assertEqual(0, manager.count_networks()) + + def test_make_with_graph(self): + """Test counting networks in the mock query manager.""" + manager = MockQueryManager(graphs=[egf_graph]) + self.assertEqual(1, manager.count_networks()) + + def test_add_graph(self): + """Test adding a graph with insert_graph.""" + manager = MockQueryManager() + graph = egf_graph.copy() + manager.insert_graph(graph) + self.assertEqual(1, manager.count_networks()) diff --git a/tests/test_struct/test_query/test_query.py b/tests/test_struct/test_query/test_query.py new file mode 100644 index 00000000..4f320459 --- /dev/null +++ b/tests/test_struct/test_query/test_query.py @@ -0,0 +1,307 @@ +# -*- coding: utf-8 -*- + +"""Tests for the query builder.""" + +import logging +import unittest + +from pybel import BELGraph, Pipeline +from pybel.dsl import Protein +from pybel.examples.egf_example import egf_graph, vcp +from pybel.examples.homology_example import ( + homology_graph, mouse_csf1_protein, mouse_csf1_rna, mouse_mapk1_protein, mouse_mapk1_rna, +) +from pybel.examples.sialic_acid_example import (cd33_phosphorylated, dap12, shp1, shp2, sialic_acid_graph, syk, trem2) +from pybel.struct import expand_node_neighborhood, expand_nodes_neighborhoods, get_subgraph_by_annotation_value +from pybel.struct.mutation import collapse_to_genes, enrich_protein_and_rna_origins +from pybel.struct.query import Query, QueryMissingNetworksError, Seeding +from pybel.testing.generate import generate_random_graph +from pybel.testing.mock_manager import MockQueryManager +from pybel.testing.utils import n + +log = logging.getLogger(__name__) + + +def add(query, manager, graph): + network = manager.insert_graph(graph) + query.append_network(network.id) + + +class TestSeedingConstructor(unittest.TestCase): + + def test_none(self): + """Test construction of a seeding container.""" + seeding = Seeding() + self.assertEqual(0, len(seeding)) + self.assertEqual('[]', seeding.dumps()) + + def test_append_sample(self): + seeding = Seeding() + seeding.append_sample() + self.assertEqual(1, len(seeding)) + + s = seeding.dumps() + self.assertIsInstance(s, str) + + +class TestQueryConstructor(unittest.TestCase): + """Test the construction of a Query.""" + + def test_network_ids_none(self): + query = Query() + self.assertIsInstance(query.network_ids, list) + self.assertIsInstance(query.seeding, Seeding) + self.assertIsInstance(query.pipeline, Pipeline) + self.assertEqual(0, len(query.network_ids)) + + def test_network_ids_single(self): + query = Query(network_ids=1) + self.assertIsInstance(query.network_ids, list) + self.assertEqual(1, len(query.network_ids)) + + def test_network_ids_multiple(self): + query = Query(network_ids=[1, 2, 3]) + self.assertIsInstance(query.network_ids, list) + self.assertEqual(3, len(query.network_ids)) + + def test_network_ids_type_error(self): + with self.assertRaises(TypeError): + Query(network_ids='a') + + def test_seeding(self): + query = Query(seeding=Seeding()) + self.assertEqual(0, len(query.seeding)) + + def test_pipeline(self): + query = Query(pipeline=Pipeline()) + self.assertEqual(0, len(query.pipeline)) + + +class QueryTestEgf(unittest.TestCase): + """Test querying the EGF subgraph""" + + def setUp(self): + """Set up each test with a mock query manager.""" + self.manager = MockQueryManager() + self.query = Query() + + def add_query(self, graph): + add(self.query, self.manager, graph) + return self.query + + def run_query(self): + return self.query.run(self.manager) + + def test_fail_run_with_no_networks(self): + with self.assertRaises(QueryMissingNetworksError): + self.run_query() + + def test_no_seeding_no_pipeline(self): + graph = egf_graph.copy() + + self.add_query(graph) + result = self.run_query() + + self.assertEqual(graph.number_of_nodes(), result.number_of_nodes()) + self.assertEqual(graph.number_of_edges(), result.number_of_edges()) + + def test_seed_by_neighbor(self): + graph = BELGraph() + a, b, c, d = (Protein(namespace=n(), name=str(i)) for i in range(4)) + + graph.add_increases(a, b, n(), n()) + graph.add_increases(b, c, n(), n()) + graph.add_increases(c, d, n(), n()) + + self.add_query(graph).append_seeding_neighbors(b) + result = self.run_query() + self.assertIsInstance(result, BELGraph) + # test nodes + self.assertIn(a, result) + self.assertIn(b, result) + self.assertIn(c, result) + self.assertNotIn(d, result) + # test edges + self.assertIn(b, result[a]) + self.assertIn(c, result[b]) + self.assertNotIn(d, result[c]) + + def test_seed_by_neighbors(self): + graph = BELGraph() + a, b, c, d, e = (Protein(namespace=n(), name=str(i)) for i in range(5)) + + graph.add_increases(a, b, n(), n()) + graph.add_increases(b, c, n(), n()) + graph.add_increases(c, d, n(), n()) + graph.add_increases(d, e, n(), n()) + + self.add_query(graph).append_seeding_neighbors([b, c]) + + result = self.run_query() + self.assertIsInstance(result, BELGraph) + # test nodes + self.assertIn(a, result) + self.assertIn(b, result) + self.assertIn(c, result) + self.assertIn(d, result) + self.assertNotIn(e, result) + # test edges + self.assertIn(b, result[a]) + self.assertIn(c, result[b]) + self.assertIn(d, result[c]) + self.assertNotIn(e, result[d]) + + def test_random_sample(self): # TODO this will fail randomly some times lol + graph = generate_random_graph(50, 1000) + + query = self.add_query(graph) + query.append_seeding_sample(number_edges=10) + query.append_seeding_sample(number_edges=10) + + result = self.run_query() + + self.assertEqual(20, result.number_of_edges()) + + +class QueryTest(unittest.TestCase): + """Test the query""" + + def setUp(self): + """Setup each test with an empty mock query manager.""" + self.manager = MockQueryManager() + + def test_pipeline(self): + graph = egf_graph.copy() + enrich_protein_and_rna_origins(graph) + + self.assertEqual( + 32, # 10 protein nodes already there + complex + bp + 2*10 (genes and rnas) + graph.number_of_nodes() + ) + + # 6 already there + 5 complex hasComponent edges + new 2*10 edges + self.assertEqual(31, graph.number_of_edges()) + + network = self.manager.insert_graph(graph) + + pipeline = Pipeline() + pipeline.append(collapse_to_genes) + + query = Query( + network_ids=[network.id], + pipeline=pipeline + ) + result_graph = query.run(self.manager) + + self.assertEqual(12, result_graph.number_of_nodes()) # same number of nodes than there were + self.assertEqual(11, result_graph.number_of_edges()) # same number of edges than there were + + def test_pipeline_2(self): + graph = egf_graph.copy() + + network = self.manager.insert_graph(graph) + network_id = network.id + + query = Query(network_ids=[network_id]) + query.append_seeding_neighbors(vcp) + query.append_pipeline(get_subgraph_by_annotation_value, 'Species', '9606') + + result = query.run(self.manager) + self.assertIsNotNone(result, msg='Query returned none') + + self.assertEqual(3, result.number_of_nodes()) + + def test_query_multiple_networks(self): + sialic_acid_graph_id = self.manager.insert_graph(sialic_acid_graph.copy()).id + egf_graph_id = self.manager.insert_graph(egf_graph.copy()).id + + query = Query() + query.append_network(sialic_acid_graph_id) + query.append_network(egf_graph_id) + query.append_seeding_neighbors([syk]) + query.append_pipeline(enrich_protein_and_rna_origins) + + result = query.run(self.manager) + self.assertIsNotNone(result, msg='Query returned none') + + self.assertIn(shp1, result) + self.assertIn(shp2, result) + self.assertIn(trem2, result) + self.assertIn(dap12, result) + + self.assertEqual(15, result.number_of_nodes()) + self.assertEqual(14, result.number_of_edges()) + + def test_get_subgraph_by_annotation_value(self): + graph = homology_graph.copy() + + result = get_subgraph_by_annotation_value(graph, 'Species', '10090') + + self.assertIsNotNone(result, msg='Query returned none') + self.assertIsInstance(result, BELGraph) + self.assertLess(0, result.number_of_nodes()) + + self.assertIn(mouse_mapk1_protein, result, msg='nodes:\n{}'.format(list(map(repr, graph)))) + self.assertIn(mouse_csf1_protein, result) + + self.assertEqual(2, result.number_of_nodes()) + self.assertEqual(1, result.number_of_edges()) + + def test_seeding_1(self): + test_network_1 = self.manager.insert_graph(homology_graph.copy()) + + query = Query(network_ids=[test_network_1.id]) + query.append_seeding_neighbors([mouse_csf1_rna, mouse_mapk1_rna]) + + result = query.run(self.manager) + self.assertIsNotNone(result, msg='Query returned none') + self.assertIsInstance(result, BELGraph) + + self.assertIn(mouse_mapk1_rna, result) + self.assertIn(mouse_csf1_rna, result) + self.assertIn(mouse_mapk1_protein, result) + self.assertIn(mouse_csf1_protein, result) + + self.assertEqual(6, result.number_of_nodes()) + self.assertEqual(4, result.number_of_edges()) + + def test_seeding_with_pipeline(self): + test_network_1 = self.manager.insert_graph(sialic_acid_graph.copy()) + + query = Query(network_ids=[test_network_1.id]) + query.append_seeding_neighbors([trem2, dap12, shp2]) + query.append_pipeline(expand_nodes_neighborhoods, [trem2, dap12, shp2]) + result = query.run(self.manager) + self.assertIsNotNone(result, msg='Query returned none') + self.assertIsInstance(result, BELGraph) + + self.assertIn(trem2, result) + self.assertIn(dap12, result) + self.assertIn(shp2, result) + self.assertIn(syk, result) + self.assertIn(cd33_phosphorylated, result) + + self.assertEqual(5, result.number_of_nodes()) + self.assertEqual(4, result.number_of_edges()) + + def test_query_multiple_networks_with_api(self): + test_network_1 = self.manager.insert_graph(homology_graph.copy()) + + pipeline = Pipeline() + pipeline.append(expand_node_neighborhood, mouse_mapk1_protein) + + query = Query( + network_ids=[test_network_1.id], + pipeline=pipeline + ) + query.append_seeding_annotation('Species', {'10090'}) + + result = query.run(self.manager) + + self.assertIsNotNone(result, msg='Query returned none') + + self.assertEqual(3, result.number_of_nodes()) + self.assertIn(mouse_mapk1_protein, result) + self.assertIn(mouse_csf1_protein, result) + + self.assertEqual(2, result.number_of_edges()) diff --git a/tests/test_struct/test_query/test_seeding.py b/tests/test_struct/test_query/test_seeding.py new file mode 100644 index 00000000..324cbaa0 --- /dev/null +++ b/tests/test_struct/test_query/test_seeding.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- + +"""Tests for the query builder.""" + +import logging +import unittest + +from pybel import BELGraph +from pybel.dsl import Protein +from pybel.examples.egf_example import egf_graph +from pybel.struct.query import Seeding +from pybel.testing.generate import generate_random_graph +from pybel.testing.utils import n + +log = logging.getLogger(__name__) + + +class TestSeedingConstructor(unittest.TestCase): + + def test_none(self): + seeding = Seeding() + self.assertEqual(0, len(seeding)) + self.assertEqual('[]', seeding.dumps()) + + def test_append_sample(self): + seeding = Seeding() + seeding.append_sample() + self.assertEqual(1, len(seeding)) + + s = seeding.dumps() + self.assertIsInstance(s, str) + + def test_no_seeding(self): + graph = egf_graph.copy() + + seeding = Seeding() + result = seeding.run(graph) + + self.assertEqual(graph.number_of_nodes(), result.number_of_nodes()) + self.assertEqual(graph.number_of_edges(), result.number_of_edges()) + + def test_seed_by_neighbor(self): + graph = BELGraph() + a, b, c, d = (Protein(namespace=n(), name=str(i)) for i in range(4)) + graph.add_increases(a, b, n(), n()) + graph.add_increases(b, c, n(), n()) + graph.add_increases(c, d, n(), n()) + + seeding = Seeding() + seeding.append_neighbors(b) + result = seeding.run(graph) + + self.assertIsInstance(result, BELGraph) + # test nodes + self.assertIn(a, result) + self.assertIn(b, result) + self.assertIn(c, result) + self.assertNotIn(d, result) + # test edges + self.assertIn(b, result[a]) + self.assertIn(c, result[b]) + self.assertNotIn(d, result[c]) + + def test_seed_by_neighbors(self): + graph = BELGraph() + a, b, c, d, e = (Protein(namespace=n(), name=str(i)) for i in range(5)) + graph.add_increases(a, b, n(), n()) + graph.add_increases(b, c, n(), n()) + graph.add_increases(c, d, n(), n()) + graph.add_increases(d, e, n(), n()) + + seeding = Seeding() + seeding.append_neighbors([b, c]) + result = seeding.run(graph) + + self.assertIsInstance(result, BELGraph) + # test nodes + self.assertIn(a, result) + self.assertIn(b, result) + self.assertIn(c, result) + self.assertIn(d, result) + self.assertNotIn(e, result) + # test edges + self.assertIn(b, result[a]) + self.assertIn(c, result[b]) + self.assertIn(d, result[c]) + self.assertNotIn(e, result[d]) + + def test_random_sample(self): + graph = generate_random_graph(50, 1000) + + seeding = Seeding() + seeding.append_sample(number_edges=10) + seeding.append_sample(number_edges=10) + result = seeding.run(graph) + + # TODO this will fail randomly some times lol, so make allowed to be sort of wrong + self.assertIn(result.number_of_edges(), {18, 19, 20}) diff --git a/tests/test_struct/test_struct_pipeline.py b/tests/test_struct/test_query/test_struct_pipeline.py similarity index 95% rename from tests/test_struct/test_struct_pipeline.py rename to tests/test_struct/test_query/test_struct_pipeline.py index b5af0aed..29550544 100644 --- a/tests/test_struct/test_struct_pipeline.py +++ b/tests/test_struct/test_query/test_struct_pipeline.py @@ -114,7 +114,7 @@ class TestPipeline(TestEgfExample): pipeline = Pipeline.from_functions([ 'infer_central_dogma', ]) - result = pipeline(self.graph, in_place=False) + result = pipeline(self.graph) self.assertEqual(32, result.number_of_nodes()) @@ -127,7 +127,7 @@ class TestPipeline(TestEgfExample): pipeline = Pipeline.from_functions([ enrich_protein_and_rna_origins, ]) - result = pipeline(self.graph, in_place=False) + result = pipeline(self.graph) self.assertEqual(32, result.number_of_nodes()) @@ -144,7 +144,7 @@ class TestDeprecation(unittest.TestCase): @transformation def test_function_1(): - pass + """Test doing nothing.""" self.assertNotIn('test_function_1', deprecated) self.assertIn('test_function_1', mapped) @@ -155,7 +155,7 @@ class TestDeprecation(unittest.TestCase): @register_deprecated('test_function_1') @transformation def test_function_1_new(): - pass + """Test bad uage of register_deprecated.""" self.assertNotIn('test_function_1', deprecated) @@ -165,7 +165,7 @@ class TestDeprecation(unittest.TestCase): @register_deprecated('test_function_2_old') @transformation def test_function_2(): - pass + """Test usage of register_deprecated.""" self.assertNotIn('test_function_2', deprecated) self.assertIn('test_function_2', mapped) @@ -184,7 +184,7 @@ class TestDeprecation(unittest.TestCase): with self.assertRaises(MissingPipelineFunctionError): @register_deprecated('test_function_3_old') def test_function_3(): - pass + """Test bad usage of register_deprecated that throws a MissingPipelineFunctionError.""" self.assertNotIn('test_function_3', mapped) self.assertNotIn('test_function_3', universe_map) diff --git a/tests/test_struct/test_transformations/test_induction.py b/tests/test_struct/test_transformations/test_induction.py index 8a331111..7e8ff5c0 100644 --- a/tests/test_struct/test_transformations/test_induction.py +++ b/tests/test_struct/test_transformations/test_induction.py @@ -7,8 +7,7 @@ import unittest from pybel import BELGraph from pybel.constants import ( - ASSOCIATION, CITATION_AUTHORS, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED, - DECREASES, FUNCTION, INCREASES, PROTEIN, + ASSOCIATION, CITATION_AUTHORS, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED, DECREASES, INCREASES, ) from pybel.dsl import BaseEntity, gene, protein, rna from pybel.struct.mutation.expansion import expand_upstream_causal
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 13 }
0.11
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 click==8.1.8 click-plugins==1.1.1 coverage==7.2.7 execnet==2.0.2 flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core greenlet==3.1.1 idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work networkx==2.6.3 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work -e git+https://github.com/pybel/pybel.git@6b0eb5dcb19400f3a64ac4830747bfe8dcbe8141#egg=PyBEL pyparsing==3.1.4 pytest==7.1.2 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 requests==2.31.0 requests-file==2.1.0 six==1.17.0 SQLAlchemy==2.0.40 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work tqdm==4.67.1 typing_extensions==4.7.1 urllib3==2.0.7 zipp @ file:///croot/zipp_1672387121353/work
name: pybel channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - charset-normalizer==3.4.1 - click==8.1.8 - click-plugins==1.1.1 - coverage==7.2.7 - execnet==2.0.2 - greenlet==3.1.1 - idna==3.10 - networkx==2.6.3 - pyparsing==3.1.4 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - requests==2.31.0 - requests-file==2.1.0 - six==1.17.0 - sqlalchemy==2.0.40 - tqdm==4.67.1 - typing-extensions==4.7.1 - urllib3==2.0.7 prefix: /opt/conda/envs/pybel
[ "tests/test_struct/test_filters/test_edge_predicates.py::TestEdgePredicates::test_has_pathology", "tests/test_struct/test_query/test_mocks.py::TestMockManager::test_add_graph", "tests/test_struct/test_query/test_mocks.py::TestMockManager::test_make", "tests/test_struct/test_query/test_mocks.py::TestMockManager::test_make_with_graph", "tests/test_struct/test_query/test_query.py::TestSeedingConstructor::test_append_sample", "tests/test_struct/test_query/test_query.py::TestSeedingConstructor::test_none", "tests/test_struct/test_query/test_query.py::TestQueryConstructor::test_network_ids_multiple", "tests/test_struct/test_query/test_query.py::TestQueryConstructor::test_network_ids_none", "tests/test_struct/test_query/test_query.py::TestQueryConstructor::test_network_ids_single", "tests/test_struct/test_query/test_query.py::TestQueryConstructor::test_network_ids_type_error", "tests/test_struct/test_query/test_query.py::TestQueryConstructor::test_pipeline", "tests/test_struct/test_query/test_query.py::TestQueryConstructor::test_seeding", "tests/test_struct/test_query/test_query.py::QueryTestEgf::test_fail_run_with_no_networks", "tests/test_struct/test_query/test_query.py::QueryTestEgf::test_no_seeding_no_pipeline", "tests/test_struct/test_query/test_query.py::QueryTestEgf::test_random_sample", "tests/test_struct/test_query/test_query.py::QueryTestEgf::test_seed_by_neighbor", "tests/test_struct/test_query/test_query.py::QueryTestEgf::test_seed_by_neighbors", "tests/test_struct/test_query/test_query.py::QueryTest::test_get_subgraph_by_annotation_value", "tests/test_struct/test_query/test_query.py::QueryTest::test_pipeline", "tests/test_struct/test_query/test_query.py::QueryTest::test_pipeline_2", "tests/test_struct/test_query/test_query.py::QueryTest::test_query_multiple_networks", "tests/test_struct/test_query/test_query.py::QueryTest::test_seeding_1", "tests/test_struct/test_query/test_query.py::QueryTest::test_seeding_with_pipeline", "tests/test_struct/test_query/test_seeding.py::TestSeedingConstructor::test_append_sample", "tests/test_struct/test_query/test_seeding.py::TestSeedingConstructor::test_no_seeding", "tests/test_struct/test_query/test_seeding.py::TestSeedingConstructor::test_none", "tests/test_struct/test_query/test_seeding.py::TestSeedingConstructor::test_random_sample", "tests/test_struct/test_query/test_seeding.py::TestSeedingConstructor::test_seed_by_neighbor", "tests/test_struct/test_query/test_seeding.py::TestSeedingConstructor::test_seed_by_neighbors", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipelineFailures::test_append_invalid", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipelineFailures::test_assert_failure", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipelineFailures::test_assert_success", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipelineFailures::test_build_meta_failure", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipelineFailures::test_fail_add", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipelineFailures::test_get_function_failure", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipeline::test_append", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipeline::test_deprecated_central_dogma_is_registered", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipeline::test_extend", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipeline::test_pipeline_by_function", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipeline::test_pipeline_by_string", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipeline::test_serialize_file", "tests/test_struct/test_query/test_struct_pipeline.py::TestPipeline::test_serialize_string", "tests/test_struct/test_query/test_struct_pipeline.py::TestDeprecation::test_register_deprecated", "tests/test_struct/test_query/test_struct_pipeline.py::TestDeprecation::test_register_deprecation_remapping_error", "tests/test_struct/test_query/test_struct_pipeline.py::TestDeprecation::test_register_missing", "tests/test_struct/test_transformations/test_induction.py::TestInduction::test_expand_upstream_causal_subgraph", "tests/test_struct/test_transformations/test_induction.py::TestInduction::test_get_subgraph_by_all_shortest_paths", "tests/test_struct/test_transformations/test_induction.py::TestInduction::test_get_subgraph_by_induction", "tests/test_struct/test_transformations/test_induction.py::TestInduction::test_get_upstream_causal_subgraph", "tests/test_struct/test_transformations/test_induction.py::TestEdgePredicateBuilders::test_build_author_inclusion_filter", "tests/test_struct/test_transformations/test_induction.py::TestEdgePredicateBuilders::test_build_author_set_inclusion_filter", "tests/test_struct/test_transformations/test_induction.py::TestEdgePredicateBuilders::test_build_pmid_inclusion_filter", "tests/test_struct/test_transformations/test_induction.py::TestEdgePredicateBuilders::test_build_pmid_set_inclusion_filter", "tests/test_struct/test_transformations/test_induction.py::TestEdgeInduction::test_get_subgraph_by_annotation_value", "tests/test_struct/test_transformations/test_induction.py::TestEdgeInduction::test_get_subgraph_by_annotation_values" ]
[ "tests/test_struct/test_query/test_query.py::QueryTest::test_query_multiple_networks_with_api" ]
[]
[]
MIT License
3,010
[ "src/pybel/struct/query/selection.py", "src/pybel/struct/query/constants.py", "src/pybel/struct/filters/edge_filters.py", "src/pybel/struct/mutation/induction/neighborhood.py", "src/pybel/struct/utils.py", "src/pybel/struct/filters/edge_predicates.py", "src/pybel/struct/graph.py", "src/pybel/dsl/namespaces.py", "src/pybel/struct/mutation/induction/paths.py", "src/pybel/struct/query/seeding.py", "src/pybel/manager/query_manager.py", "src/pybel/struct/pipeline/pipeline.py", "src/pybel/struct/query/query.py", "src/pybel/tokens.py", "src/pybel/canonicalize.py", "src/pybel/struct/mutation/induction/annotations.py", "src/pybel/struct/query/__init__.py", "src/pybel/struct/query/exc.py", "src/pybel/manager/models.py" ]
[ "src/pybel/struct/query/selection.py", "src/pybel/struct/query/constants.py", "src/pybel/struct/filters/edge_filters.py", "src/pybel/struct/mutation/induction/neighborhood.py", "src/pybel/struct/utils.py", "src/pybel/struct/filters/edge_predicates.py", "src/pybel/struct/graph.py", "src/pybel/dsl/namespaces.py", "src/pybel/struct/mutation/induction/paths.py", "src/pybel/struct/query/seeding.py", "src/pybel/manager/query_manager.py", "src/pybel/struct/pipeline/pipeline.py", "src/pybel/struct/query/query.py", "src/pybel/tokens.py", "src/pybel/canonicalize.py", "src/pybel/struct/mutation/induction/annotations.py", "src/pybel/struct/query/__init__.py", "src/pybel/struct/query/exc.py", "src/pybel/manager/models.py" ]
miyuchina__mistletoe-57
8cca7a53626e8c25039423dc9cd8db843447f813
2018-09-02 15:07:48
be8f01c85c40b0c6c442e9387be708c6f606d555
coveralls: [![Coverage Status](https://coveralls.io/builds/18793185/badge)](https://coveralls.io/builds/18793185) Coverage increased (+1.3%) to 93.163% when pulling **ba97a0b43331d67a50205adef6ee757ad8650b88 on Rogdham:fix-54** into **8cca7a53626e8c25039423dc9cd8db843447f813 on miyuchina:master**.
diff --git a/mistletoe/block_token.py b/mistletoe/block_token.py index 0a4c2c0..7eeb3f1 100644 --- a/mistletoe/block_token.py +++ b/mistletoe/block_token.py @@ -574,6 +574,9 @@ class ListItem(BlockToken): if not cls.in_continuation(next_line, prepend): # directly followed by another token if cls.other_token(next_line): + if newline: + lines.backstep() + del line_buffer[-newline:] break # next_line is a new list item marker_info = cls.parse_marker(next_line)
lists should not contain <p> Why do you use &lt;p&gt;&lt;/p&gt; in lists ? This is not the way Commonmark does it. Other strange behavior: in the same document, some lists get the paragraphs, some others don't. Could you explain how it is supposed to work ?
miyuchina/mistletoe
diff --git a/test/test_block_token.py b/test/test_block_token.py index b3c94b1..78fb0b2 100644 --- a/test/test_block_token.py +++ b/test/test_block_token.py @@ -191,6 +191,14 @@ class TestListItem(unittest.TestCase): list_item = block_token.tokenize(lines)[0].children[0] self.assertEqual(list_item.loose, False) + def test_tight_list(self): + lines = ['- foo\n', + '\n', + '# bar\n'] + f = FileWrapper(lines) + list_item = block_token.tokenize(lines)[0].children[0] + self.assertEqual(list_item.loose, False) + class TestList(unittest.TestCase): def test_different_markers(self):
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/miyuchina/mistletoe.git@8cca7a53626e8c25039423dc9cd8db843447f813#egg=mistletoe packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: mistletoe channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 prefix: /opt/conda/envs/mistletoe
[ "test/test_block_token.py::TestListItem::test_tight_list" ]
[]
[ "test/test_block_token.py::TestATXHeading::test_children_with_enclosing_hashes", "test/test_block_token.py::TestATXHeading::test_heading_in_paragraph", "test/test_block_token.py::TestATXHeading::test_match", "test/test_block_token.py::TestATXHeading::test_not_heading", "test/test_block_token.py::TestSetextHeading::test_match", "test/test_block_token.py::TestSetextHeading::test_next", "test/test_block_token.py::TestQuote::test_lazy_continuation", "test/test_block_token.py::TestQuote::test_match", "test/test_block_token.py::TestCodeFence::test_fence_code_lazy_continuation", "test/test_block_token.py::TestCodeFence::test_match_fenced_code", "test/test_block_token.py::TestCodeFence::test_match_fenced_code_with_tilda", "test/test_block_token.py::TestCodeFence::test_mixed_code_fence", "test/test_block_token.py::TestCodeFence::test_no_wrapping_newlines_code_fence", "test/test_block_token.py::TestCodeFence::test_unclosed_code_fence", "test/test_block_token.py::TestBlockCode::test_parse_indented_code", "test/test_block_token.py::TestParagraph::test_parse", "test/test_block_token.py::TestParagraph::test_read", "test/test_block_token.py::TestListItem::test_deep_list", "test/test_block_token.py::TestListItem::test_loose_list", "test/test_block_token.py::TestListItem::test_parse_marker", "test/test_block_token.py::TestListItem::test_sublist", "test/test_block_token.py::TestListItem::test_tokenize", "test/test_block_token.py::TestList::test_different_markers", "test/test_block_token.py::TestList::test_sublist", "test/test_block_token.py::TestTable::test_easy_table", "test/test_block_token.py::TestTable::test_match", "test/test_block_token.py::TestTable::test_not_easy_table", "test/test_block_token.py::TestTable::test_parse_align", "test/test_block_token.py::TestTable::test_parse_delimiter", "test/test_block_token.py::TestTableRow::test_easy_table_row", "test/test_block_token.py::TestTableRow::test_match", "test/test_block_token.py::TestTableCell::test_match", "test/test_block_token.py::TestFootnote::test_store", "test/test_block_token.py::TestDocument::test_auto_splitlines", "test/test_block_token.py::TestDocument::test_store_footnote", "test/test_block_token.py::TestThematicBreak::test_match", "test/test_block_token.py::TestContains::test_contains" ]
[]
MIT License
3,011
[ "mistletoe/block_token.py" ]
[ "mistletoe/block_token.py" ]
faucetsdn__chewie-24
1dfd612e1d03f11f7d3e5d091342bdc879eac4db
2018-09-02 23:07:30
84338da6a8fac3ad64fe5f69b1264805d98e986b
diff --git a/chewie/chewie.py b/chewie/chewie.py index eff9b87..60c79f2 100644 --- a/chewie/chewie.py +++ b/chewie/chewie.py @@ -14,6 +14,7 @@ from chewie.radius_attributes import EAPMessage, State, CalledStationId, NASPort from chewie.message_parser import MessageParser, MessagePacker from chewie.mac_address import MacAddress from chewie.event import EventMessageReceived, EventRadiusMessageReceived +from chewie.utils import get_logger def unpack_byte_string(byte_string): @@ -34,7 +35,7 @@ class Chewie(object): auth_handler=None, failure_handler=None, logoff_handler=None, radius_server_ip=None): self.interface_name = interface_name - self.logger = logger + self.logger = get_logger(logger.name + "." + Chewie.__name__) self.auth_handler = auth_handler self.failure_handler = failure_handler self.logoff_handler = logoff_handler @@ -226,7 +227,7 @@ class Chewie(object): if not sm: sm = FullEAPStateMachine(self.eap_output_messages, self.radius_output_messages, src_mac, self.timer_scheduler, self.auth_success, - self.auth_failure, self.auth_logoff) + self.auth_failure, self.auth_logoff, self.logger.name) sm.eapRestart = True # TODO what if port is not actually enabled, but then how did they auth? sm.portEnabled = True diff --git a/chewie/eap_state_machine.py b/chewie/eap_state_machine.py index 3bf3155..c6c3736 100644 --- a/chewie/eap_state_machine.py +++ b/chewie/eap_state_machine.py @@ -192,7 +192,7 @@ class FullEAPStateMachine: eapLogoff = None # bool def __init__(self, eap_output_queue, radius_output_queue, src_mac, timer_scheduler, - auth_handler, failure_handler, logoff_handler): + auth_handler, failure_handler, logoff_handler, log_prefix): """ Args: @@ -219,7 +219,8 @@ class FullEAPStateMachine: # if we want to deal with each method locally. self.m = MPassthrough() - self.logger = utils.get_logger("SM - %s" % self.src_mac) + logname = ".SM - %s" % self.src_mac + self.logger = utils.get_logger(log_prefix + logname) def getId(self): """Determines the identifier value chosen by the AAA server for the current EAP request. diff --git a/chewie/utils.py b/chewie/utils.py index 002f71c..886950a 100644 --- a/chewie/utils.py +++ b/chewie/utils.py @@ -1,17 +1,10 @@ """Utility Functions""" import logging -import sys -def get_logger(name, log_level=logging.DEBUG): - logger = logging.getLogger(name) - if not logger.handlers: - logger.setLevel(log_level) - ch = logging.StreamHandler(sys.stdout) - ch.setLevel(log_level) - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - ch.setFormatter(formatter) - logger.addHandler(ch) +def get_logger(logname): + """Create and return a logger object.""" + logger = logging.getLogger(logname) return logger diff --git a/run.py b/run.py index 4755bb0..bf43012 100644 --- a/run.py +++ b/run.py @@ -1,18 +1,35 @@ +import logging +import sys + from chewie.chewie import Chewie -import chewie.utils as utils -credentials = { - "[email protected]": "microphone" -} +def get_logger(name, log_level=logging.DEBUG): + logger = logging.getLogger(name) + if not logger.handlers: + logger.setLevel(log_level) + ch = logging.StreamHandler(sys.stdout) + ch.setLevel(log_level) + formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + ch.setFormatter(formatter) + logger.addHandler(ch) + return logger def auth_handler(address, group_address): print("Authed address %s on port %s" % (str(address), str(group_address))) -logger = utils.get_logger("CHEWIE") +def failure_handler(address, group_address): + print("failure of address %s on port %s" % (str(address), str(group_address))) + + +def logoff_handler(address, group_address): + print("logoff of address %s on port %s" % (str(address), str(group_address))) + + +logger = get_logger("CHEWIE") logger.info('starting chewieeeee.') -chewie = Chewie("eth1", credentials, logger, auth_handler, radius_server_ip="172.24.0.113") +chewie = Chewie("eth1", logger, auth_handler, failure_handler, logoff_handler, radius_server_ip="172.24.0.113") chewie.run()
Statemachine logging isn't working with Faucet Investigate. It is working when running standalone.
faucetsdn/chewie
diff --git a/test/test_full_state_machine.py b/test/test_full_state_machine.py index 5d94bb8..f49cec5 100644 --- a/test/test_full_state_machine.py +++ b/test/test_full_state_machine.py @@ -27,7 +27,7 @@ class FullStateMachineStartTestCase(unittest.TestCase): self.src_mac = MacAddress.from_string("00:12:34:56:78:90") self.sm = FullEAPStateMachine(self.eap_output_queue, self.radius_output_queue, self.src_mac, self.timer_scheduler, - self.auth_handler, self.failure_handler, self.logoff_handler) + self.auth_handler, self.failure_handler, self.logoff_handler, 'Chewie') self.MAX_RETRANSMITS = 3 self.sm.MAX_RETRANS = self.MAX_RETRANSMITS self.sm.DEFAULT_TIMEOUT = 0.1
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 4 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest-cov", "coverage", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y python3 python3-pip" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/faucetsdn/chewie.git@1dfd612e1d03f11f7d3e5d091342bdc879eac4db#egg=chewie coverage==7.8.0 dnspython==2.7.0 eventlet==0.39.1 exceptiongroup==1.2.2 greenlet==3.1.1 iniconfig==2.1.0 netils==0.0.1 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-cov==6.0.0 tomli==2.2.1
name: chewie channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - dnspython==2.7.0 - eventlet==0.39.1 - exceptiongroup==1.2.2 - greenlet==3.1.1 - iniconfig==2.1.0 - netils==0.0.1 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==6.0.0 - tomli==2.2.1 prefix: /opt/conda/envs/chewie
[ "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_disabled_state", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_discard", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_discard2", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_eap_restart", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_eap_start", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_failure2", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_identity_response", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_logoff2", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_logoff_from_idle2", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_md5_challenge_request", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_md5_challenge_response", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_success2", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_timeout_failure2_from_aaa_timeout", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_timeout_failure2_from_max_retransmits", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_timeout_failure_from_max_retransmits", "test/test_full_state_machine.py::FullStateMachineStartTestCase::test_ttls_request" ]
[]
[]
[]
Apache License 2.0
3,012
[ "chewie/eap_state_machine.py", "chewie/chewie.py", "chewie/utils.py", "run.py" ]
[ "chewie/eap_state_machine.py", "chewie/chewie.py", "chewie/utils.py", "run.py" ]
asottile__add-trailing-comma-53
35ce9905dc9422e354c37fe5a941dc8198de4d56
2018-09-02 23:09:39
35ce9905dc9422e354c37fe5a941dc8198de4d56
diff --git a/README.md b/README.md index 8b8e86d..995826f 100644 --- a/README.md +++ b/README.md @@ -148,6 +148,16 @@ following change: Note that this would cause a **`SyntaxError`** in earlier python versions. +### trailing commas for `from` imports + +```diff + from os import ( + path, +- makedirs ++ makedirs, + ) +``` + ### unhug trailing paren ```diff diff --git a/add_trailing_comma.py b/add_trailing_comma.py index a80c492..b13c968 100644 --- a/add_trailing_comma.py +++ b/add_trailing_comma.py @@ -64,6 +64,7 @@ class FindNodes(ast.NodeVisitor): self.funcs = {} self.literals = {} self.tuples = {} + self.imports = set() def _visit_literal(self, node, key='elts'): if getattr(node, key): @@ -143,6 +144,10 @@ class FindNodes(ast.NodeVisitor): self.generic_visit(node) + def visit_ImportFrom(self, node): + self.imports.add(Offset(node.lineno, node.col_offset)) + self.generic_visit(node) + def _find_simple(first_brace, tokens): brace_stack = [first_brace] @@ -227,6 +232,18 @@ def _find_tuple(i, tokens): return _find_simple(i, tokens) +def _find_import(i, tokens): + # progress forwards until we find either a `(` or a newline + for i in range(i, len(tokens)): + token = tokens[i] + if token.name == 'NEWLINE': + return + elif token.name == 'OP' and token.src == '(': + return _find_simple(i, tokens) + else: + raise AssertionError('Past end?') + + def _fix_brace(fix_data, add_comma, tokens): first_brace, last_brace = fix_data.braces @@ -346,6 +363,11 @@ def _fix_src(contents_text, py35_plus, py36_plus): # Handle parenthesized things, unhug of tuples, and comprehensions elif token.src in START_BRACES: fixes.append((False, _find_simple(i, tokens))) + elif key in visitor.imports: + # some imports do not have parens + fix = _find_import(i, tokens) + if fix: + fixes.append((True, fix)) for add_comma, fix_data in fixes: if fix_data is not None:
Doesn't add trailing commas to imports I would expect this to add trailing commas to multiple imports: ```python from foo import ( bar, baz, ) ```
asottile/add-trailing-comma
diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py index bc1152b..d02d9d9 100644 --- a/tests/add_trailing_comma_test.py +++ b/tests/add_trailing_comma_test.py @@ -684,6 +684,72 @@ def test_fix_trailing_brace(src, expected): assert _fix_src(src, py35_plus=False, py36_plus=False) == expected [email protected]( + 'src', + ( + 'from os import path, makedirs\n', + 'from os import (path, makedirs)\n', + 'from os import (\n' + ' path,\n' + ' makedirs,\n' + ')', + ), +) +def test_fix_from_import_noop(src): + assert _fix_src(src, py35_plus=False, py36_plus=False) == src + + [email protected]( + ('src', 'expected'), + ( + ( + 'from os import (\n' + ' makedirs,\n' + ' path\n' + ')', + 'from os import (\n' + ' makedirs,\n' + ' path,\n' + ')', + ), + ( + 'from os import \\\n' + ' (\n' + ' path,\n' + ' makedirs\n' + ' )\n', + 'from os import \\\n' + ' (\n' + ' path,\n' + ' makedirs,\n' + ' )\n', + ), + ( + 'from os import (\n' + ' makedirs,\n' + ' path,\n' + ' )', + 'from os import (\n' + ' makedirs,\n' + ' path,\n' + ')', + ), + ( + 'if True:\n' + ' from os import (\n' + ' makedirs\n' + ' )', + 'if True:\n' + ' from os import (\n' + ' makedirs,\n' + ' )', + ), + ), +) +def test_fix_from_import(src, expected): + assert _fix_src(src, py35_plus=False, py36_plus=False) == expected + + def test_main_trivial(): assert main(()) == 0
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 2 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "numpy>=1.16.0", "pandas>=1.0.0" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/asottile/add-trailing-comma.git@35ce9905dc9422e354c37fe5a941dc8198de4d56#egg=add_trailing_comma exceptiongroup==1.2.2 iniconfig==2.1.0 numpy==2.0.2 packaging==24.2 pandas==2.2.3 pluggy==1.5.0 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 six==1.17.0 tokenize_rt==6.1.0 tomli==2.2.1 tzdata==2025.2
name: add-trailing-comma channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - pluggy==1.5.0 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - six==1.17.0 - tokenize-rt==6.1.0 - tomli==2.2.1 - tzdata==2025.2 prefix: /opt/conda/envs/add-trailing-comma
[ "tests/add_trailing_comma_test.py::test_fix_from_import[from", "tests/add_trailing_comma_test.py::test_fix_from_import[if" ]
[ "tests/add_trailing_comma_test.py::test_fixes_literals[x", "tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x", "tests/add_trailing_comma_test.py::test_fix_unhugs[x", "tests/add_trailing_comma_test.py::test_fixes_calls[(\\n" ]
[ "tests/add_trailing_comma_test.py::test_fix_calls_noops[x", "tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]", "tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n", "tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n", "tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n", "tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n", "tests/add_trailing_comma_test.py::test_py35_plus_rewrite", "tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n", "tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n", "tests/add_trailing_comma_test.py::test_fixes_calls[x({}).y(\\n", "tests/add_trailing_comma_test.py::test_noop_literals[(1,", "tests/add_trailing_comma_test.py::test_noop_literals[[1,", "tests/add_trailing_comma_test.py::test_noop_literals[{1,", "tests/add_trailing_comma_test.py::test_noop_literals[{1:", "tests/add_trailing_comma_test.py::test_noop_literals[if", "tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces", "tests/add_trailing_comma_test.py::test_noop_function_defs[def", "tests/add_trailing_comma_test.py::test_fixes_defs[def", "tests/add_trailing_comma_test.py::test_fixes_defs_py36_plus[def", "tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,", "tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n", "tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n", "tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n", "tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n", "tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n", "tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n", "tests/add_trailing_comma_test.py::test_fix_unhugs[def", "tests/add_trailing_comma_test.py::test_fix_unhugs[with", "tests/add_trailing_comma_test.py::test_fix_unhugs[if", "tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':", "tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n", "tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":", "tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",", "tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n", "tests/add_trailing_comma_test.py::test_fix_unhugs[[a()\\n", "tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def", "tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]", "tests/add_trailing_comma_test.py::test_noop_trailing_brace[x", "tests/add_trailing_comma_test.py::test_noop_trailing_brace[y", "tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n", "tests/add_trailing_comma_test.py::test_fix_trailing_brace[x", "tests/add_trailing_comma_test.py::test_fix_from_import_noop[from", "tests/add_trailing_comma_test.py::test_main_trivial", "tests/add_trailing_comma_test.py::test_main_noop", "tests/add_trailing_comma_test.py::test_main_changes_a_file", "tests/add_trailing_comma_test.py::test_main_preserves_line_endings", "tests/add_trailing_comma_test.py::test_main_syntax_error", "tests/add_trailing_comma_test.py::test_main_non_utf8_bytes", "tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args", "tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs", "tests/add_trailing_comma_test.py::test_main_py36_plus_implies_py35_plus", "tests/add_trailing_comma_test.py::test_main_py36_plus_function_trailing_commas" ]
[]
MIT License
3,013
[ "add_trailing_comma.py", "README.md" ]
[ "add_trailing_comma.py", "README.md" ]
pydoit__doit-267
50f0c7eaa8084e3a54c88bfec02e477e419f4c12
2018-09-03 08:39:02
50f0c7eaa8084e3a54c88bfec02e477e419f4c12
coveralls: [![Coverage Status](https://coveralls.io/builds/18799556/badge)](https://coveralls.io/builds/18799556) Coverage increased (+0.0008%) to 99.744% when pulling **ce1be80ae54f53a780066da0592aa8f587cfdf22 on wmvanvliet:sorting** into **50f0c7eaa8084e3a54c88bfec02e477e419f4c12 on pydoit:master**. schettino72: Oh. and it seems docs also need to be updated `cmd-other.rst`. Currently it has the wording: ``` Tasks are listed in alphabetical order, not by order of execution. ``` Instead it should mention both way are possible.
diff --git a/AUTHORS b/AUTHORS index 89e6941..045960b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -24,3 +24,4 @@ * Simon Mutch - https://github.com/smutch * Michael Milton - https://github.com/tmiguelt * Mike Pagel - https://github.com/moltob + * Marijn van Vliet - https://github.com/wmvanvliet diff --git a/CHANGES b/CHANGES index da0f984..42ffcbd 100644 --- a/CHANGES +++ b/CHANGES @@ -16,6 +16,7 @@ Changes - Fix #113: `tools.config_changed` deals with nested dictionaries. Using json instead of repr. - Fix #261: help organize command options in sections, and improve formatting. + - Fix #267: `doit list` now has a `--sort` parameter to determine the order in which the tasks are listed. 0.31.1 (*2018-03-18*) diff --git a/doc/cmd_other.rst b/doc/cmd_other.rst index ff8d069..b9a2a3f 100644 --- a/doc/cmd_other.rst +++ b/doc/cmd_other.rst @@ -34,7 +34,8 @@ list ------ *list* is used to show all tasks available in a *dodo* file. -Tasks are listed in alphabetical order, not by order of execution. +Tasks are listed in alphabetical order by default, but *--sort=definition* can +be speficied to sort them in the order in which they appear in the `dodo` file. .. code-block:: console diff --git a/doit/cmd_list.py b/doit/cmd_list.py index 3473231..4652152 100644 --- a/doit/cmd_list.py +++ b/doit/cmd_list.py @@ -55,6 +55,18 @@ opt_template = { 'help': "display entries with template" } +opt_sort = { + 'name': 'sort', + 'short': '', + 'long': 'sort', + 'type': str, + 'choices': [('name', 'sort by task name'), + ('definition', 'list tasks in the order they were defined')], + 'default': 'name', + 'help': ("choose the manner in which the task list is sorted. " + "[default: %(default)s]") + } + class List(DoitCmdBase): doc_purpose = "list tasks from dodo file" @@ -62,7 +74,8 @@ class List(DoitCmdBase): doc_description = None cmd_options = (opt_listall, opt_list_quiet, opt_list_status, - opt_list_private, opt_list_dependencies, opt_template) + opt_list_private, opt_list_dependencies, opt_template, + opt_sort) STATUS_MAP = {'ignore': 'I', 'up-to-date': 'U', 'run': 'R', 'error': 'E'} @@ -113,10 +126,9 @@ class List(DoitCmdBase): return print_list - def _execute(self, subtasks=False, quiet=True, status=False, - private=False, list_deps=False, template=None, pos_args=None): - """List task generators, in the order they were defined. - """ + def _execute(self, subtasks=False, quiet=True, status=False, private=False, + list_deps=False, template=None, sort='name', pos_args=None): + """List task generators""" filter_tasks = pos_args # dict of all tasks tasks = dict([(t.name, t) for t in self.task_list]) @@ -144,7 +156,13 @@ class List(DoitCmdBase): template = '{status} ' + template template += '\n' + # sort list of tasks + if sort == 'name': + print_list = sorted(print_list) + elif sort == 'definition': + pass # task list is already sorted in order of definition + # print list of tasks - for task in sorted(print_list): + for task in print_list: self._print_task(template, task, status, list_deps, tasks) return 0
Use natural order of task names when listed ```bash $ doit list buildhtml Build HTML documentation. buildpdf Build PDF documentation. publish Publish documentation to FTP. spell Spell document source files. ``` Tasks are today listed in alphabetical order. As seen, it can look quite confusing and user would appreciate natural order: ```bash $ doit list spell Spell document source files. buildhtml Build HTML documentation. buildpdf Build PDF documentation. publish Publish documentation to FTP. ``` I guess, changing order of listed items is not likely to break anything and I would prefer do it this way by default. Author has good control of order of tasks definitions in dodo file. If alphabetical order would be (sometime) required, we could add a key "listorder" into `DOIT_CONFIG`. I had similar issue with Click https://github.com/pallets/click/issues/513 (and I managed to resolve it).
pydoit/doit
diff --git a/tests/test_cmd_list.py b/tests/test_cmd_list.py index 6b35eba..9df6942 100644 --- a/tests/test_cmd_list.py +++ b/tests/test_cmd_list.py @@ -163,3 +163,23 @@ class TestCmdList(object): cmd_list._execute() got = [line.strip() for line in output.getvalue().split('\n') if line] assert 't做' == got[0] + + def testSortByName(self): + # by default, the task list should be ordered by name + task_list = list(tasks_sample()) + output = StringIO() + cmd_list = CmdFactory(List, outstream=output, task_list=task_list) + cmd_list._execute() + got = [line.strip() for line in output.getvalue().split('\n') if line] + expected = ['g1', 't1', 't2', 't3'] + assert expected == got + + def testSortByDefinition(self): + # test sorting task list by order of definition + task_list = list(tasks_sample()) + output = StringIO() + cmd_list = CmdFactory(List, outstream=output, task_list=task_list) + cmd_list._execute(sort='definition') + got = [line.strip() for line in output.getvalue().split('\n') if line] + expected = ['t1', 't2', 'g1', 't3'] + assert expected == got
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 4 }
0.31
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-ignore-flaky" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi cloudpickle==2.2.1 -e git+https://github.com/pydoit/doit.git@50f0c7eaa8084e3a54c88bfec02e477e419f4c12#egg=doit flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyinotify==0.9.6 pytest==7.1.2 pytest-ignore-flaky==2.1.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: doit channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - cloudpickle==2.2.1 - pyinotify==0.9.6 - pytest-ignore-flaky==2.1.0 prefix: /opt/conda/envs/doit
[ "tests/test_cmd_list.py::TestCmdList::testSortByDefinition" ]
[]
[ "tests/test_cmd_list.py::TestCmdList::testQuiet", "tests/test_cmd_list.py::TestCmdList::testDoc", "tests/test_cmd_list.py::TestCmdList::testCustomTemplate", "tests/test_cmd_list.py::TestCmdList::testDependencies", "tests/test_cmd_list.py::TestCmdList::testSubTask", "tests/test_cmd_list.py::TestCmdList::testFilter", "tests/test_cmd_list.py::TestCmdList::testFilterSubtask", "tests/test_cmd_list.py::TestCmdList::testFilterAll", "tests/test_cmd_list.py::TestCmdList::testStatus", "tests/test_cmd_list.py::TestCmdList::testErrorStatus", "tests/test_cmd_list.py::TestCmdList::testStatus_result_dep_bug_gh44", "tests/test_cmd_list.py::TestCmdList::testNoPrivate", "tests/test_cmd_list.py::TestCmdList::testWithPrivate", "tests/test_cmd_list.py::TestCmdList::testListInvalidTask", "tests/test_cmd_list.py::TestCmdList::test_unicode_name", "tests/test_cmd_list.py::TestCmdList::testSortByName" ]
[]
MIT License
3,015
[ "doc/cmd_other.rst", "CHANGES", "doit/cmd_list.py", "AUTHORS" ]
[ "doc/cmd_other.rst", "CHANGES", "doit/cmd_list.py", "AUTHORS" ]
cloudtools__stacker-657
cd379d01f089c226e347c256abe75b90268ae144
2018-09-03 18:15:04
cd379d01f089c226e347c256abe75b90268ae144
diff --git a/examples/cross-account/stacker.yaml b/examples/cross-account/stacker.yaml index 123a509..6c39662 100644 --- a/examples/cross-account/stacker.yaml +++ b/examples/cross-account/stacker.yaml @@ -14,7 +14,5 @@ stacks: profile: master template_path: templates/stacker-bucket.yaml variables: - Roles: - # Change these to the correct AWS account IDs - - arn:aws:iam::<prod account id>:role/Stacker - - arn:aws:iam::<stage account id>:role/Stacker + # Change these to the correct AWS account IDs, must be comma seperated list + Roles: arn:aws:iam::<prod account id>:role/Stacker, arn:aws:iam::<stage account id>:role/Stacker diff --git a/stacker/lookups/handlers/kms.py b/stacker/lookups/handlers/kms.py index 9ff6a52..b5f654d 100644 --- a/stacker/lookups/handlers/kms.py +++ b/stacker/lookups/handlers/kms.py @@ -52,5 +52,12 @@ def handler(value, **kwargs): region, value = value.split("@", 1) kms = get_session(region).client('kms') - decoded = codecs.decode(value.encode(), 'base64').decode() + + # encode str value as an utf-8 bytestring for use with codecs.decode. + value = value.encode('utf-8') + + # get raw but still encrypted value from base64 version. + decoded = codecs.decode(value, 'base64') + + # decrypt and return the plain text raw value. return kms.decrypt(CiphertextBlob=decoded)["Plaintext"] diff --git a/stacker/providers/aws/default.py b/stacker/providers/aws/default.py index a8ad679..885f744 100644 --- a/stacker/providers/aws/default.py +++ b/stacker/providers/aws/default.py @@ -397,6 +397,8 @@ def generate_cloudformation_args(stack_name, parameters, tags, template, with create_change_set. service_role (str, optional): An optional service role to use when interacting with Cloudformation. + stack_policy (:class:`stacker.providers.base.Template`): A template + object representing a stack policy. change_set_name (str, optional): An optional change set name to use with create_change_set. @@ -434,6 +436,16 @@ def generate_cloudformation_args(stack_name, parameters, tags, template, def generate_stack_policy_args(stack_policy=None): + """ Converts a stack policy object into keyword args. + + Args: + stack_policy (:class:`stacker.providers.base.Template`): A template + object representing a stack policy. + + Returns: + dict: A dictionary of keyword arguments to be used elsewhere. + """ + args = {} if stack_policy: logger.debug("Stack has a stack policy") @@ -666,6 +678,8 @@ class Provider(BaseProvider): tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. force_change_set (bool): Whether or not to force change set use. + stack_policy (:class:`stacker.providers.base.Template`): A template + object representing a stack policy. """ logger.debug("Attempting to create stack %s:.", fqn) @@ -813,6 +827,8 @@ class Provider(BaseProvider): not. False will follow the behavior of the provider. force_change_set (bool): A flag that indicates whether the update must be executed with a change set. + stack_policy (:class:`stacker.providers.base.Template`): A template + object representing a stack policy. """ logger.debug("Attempting to update stack %s:", fqn) logger.debug(" parameters: %s", parameters) @@ -824,11 +840,28 @@ class Provider(BaseProvider): update_method = self.select_update_method(force_interactive, force_change_set) - return update_method(fqn, template, old_parameters, parameters, tags, - stack_policy=stack_policy, **kwargs) + return update_method(fqn, template, old_parameters, parameters, + stack_policy=stack_policy, tags=tags, **kwargs) + + def deal_with_changeset_stack_policy(self, fqn, stack_policy): + """ Set a stack policy when using changesets. + + ChangeSets don't allow you to set stack policies in the same call to + update them. This sets it before executing the changeset if the + stack policy is passed in. + + Args: + stack_policy (:class:`stacker.providers.base.Template`): A template + object representing a stack policy. + """ + if stack_policy: + kwargs = generate_stack_policy_args(stack_policy) + kwargs["StackName"] = fqn + logger.debug("Setting stack policy on %s.", fqn) + self.cloudformation.set_stack_policy(**kwargs) def interactive_update_stack(self, fqn, template, old_parameters, - parameters, tags, stack_policy=None, + parameters, stack_policy, tags, **kwargs): """Update a Cloudformation stack in interactive mode. @@ -840,6 +873,8 @@ class Provider(BaseProvider): parameter list on the existing Cloudformation stack. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. + stack_policy (:class:`stacker.providers.base.Template`): A template + object representing a stack policy. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. """ @@ -878,19 +913,15 @@ class Provider(BaseProvider): finally: ui.unlock() - # ChangeSets don't support specifying a stack policy inline, like - # CreateStack/UpdateStack, so we just SetStackPolicy if there is one. - if stack_policy: - kwargs = generate_stack_policy_args(stack_policy) - kwargs["StackName"] = fqn - self.cloudformation.set_stack_policy(**kwargs) + self.deal_with_changeset_stack_policy(fqn, stack_policy) self.cloudformation.execute_change_set( ChangeSetName=change_set_id, ) def noninteractive_changeset_update(self, fqn, template, old_parameters, - parameters, tags, **kwargs): + parameters, stack_policy, tags, + **kwargs): """Update a Cloudformation stack using a change set. This is required for stacks with a defined Transform (i.e. SAM), as the @@ -904,6 +935,8 @@ class Provider(BaseProvider): parameter list on the existing Cloudformation stack. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. + stack_policy (:class:`stacker.providers.base.Template`): A template + object representing a stack policy. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. """ @@ -914,6 +947,8 @@ class Provider(BaseProvider): 'UPDATE', service_role=self.service_role, **kwargs ) + self.deal_with_changeset_stack_policy(fqn, stack_policy) + self.cloudformation.execute_change_set( ChangeSetName=change_set_id, ) @@ -932,6 +967,8 @@ class Provider(BaseProvider): parameter list to be applied to the Cloudformation stack. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. + stack_policy (:class:`stacker.providers.base.Template`): A template + object representing a stack policy. """ logger.debug("Using default provider mode for %s.", fqn)
Update of a Raw Blueprint with Transform fails Hi, I believe I have discovered a bug in Stacker. The bug seems to be triggered by trying to update a raw template which uses Transforms. I'm using master at the time of this report (d1353b3a942731ada1532f0e1422d64673988c1a) with python 3.6.5 on macOS 10.13.6. Stacker is installed in a virtualenv. Config to reproduce: config.yaml: ```yaml --- namespace: 'whatever' stacks: - name: 'dummy' template_path: dummy.json ``` dummy.json: ```json { "Transform": "AWS::Serverless-2016-10-31", "Resources": { "DummyParameter": { "Type": "AWS::SSM::Parameter", "Properties": { "Type": "String", "Value": "foobar" } } } } ``` Steps to reproduce: run `build` twice: ``` % stacker build config.yml [2018-08-28T15:56:44] Using default AWS provider mode [2018-08-28T15:56:50] dummy: submitted (creating new stack) [2018-08-28T15:57:20] dummy: complete (creating new stack) ``` ``` % stacker build config.yml [2018-08-28T16:00:56] Using default AWS provider mode [2018-08-28T16:00:57] create_change_set() got an unexpected keyword argument 'stack_policy' Traceback (most recent call last): File "<stripped>/stacker/venv/lib/python3.6/site-packages/stacker/plan.py", line 93, in _run_once status = self.fn(self.stack, status=self.status) File "<stripped>/stacker/venv/lib/python3.6/site-packages/stacker/actions/build.py", line 353, in _launch_stack stack_policy=stack_policy, File "<stripped>/stacker/venv/lib/python3.6/site-packages/stacker/providers/aws/default.py", line 805, in update_stack stack_policy=stack_policy, **kwargs) File "<stripped>/stacker/venv/lib/python3.6/site-packages/stacker/providers/aws/default.py", line 891, in noninteractive_changeset_update 'UPDATE', service_role=self.service_role, **kwargs TypeError: create_change_set() got an unexpected keyword argument 'stack_policy' [2018-08-28T16:00:57] dummy: failed (create_change_set() got an unexpected keyword argument 'stack_policy') [2018-08-28T16:00:57] The following steps failed: dummy ```
cloudtools/stacker
diff --git a/stacker/tests/providers/aws/test_default.py b/stacker/tests/providers/aws/test_default.py index d1de9a7..fcc1404 100644 --- a/stacker/tests/providers/aws/test_default.py +++ b/stacker/tests/providers/aws/test_default.py @@ -500,6 +500,64 @@ class TestProviderDefaultMode(unittest.TestCase): self.assertFalse( self.provider.prepare_stack_for_update(stack, [])) + def test_noninteractive_changeset_update_no_stack_policy(self): + stack_name = "MockStack" + + self.stubber.add_response( + "create_change_set", + {'Id': 'CHANGESETID', 'StackId': 'STACKID'} + ) + changes = [] + changes.append(generate_change()) + + self.stubber.add_response( + "describe_change_set", + generate_change_set_response( + status="CREATE_COMPLETE", execution_status="AVAILABLE", + changes=changes, + ) + ) + + self.stubber.add_response("execute_change_set", {}) + + with self.stubber: + self.provider.noninteractive_changeset_update( + fqn=stack_name, + template=Template(url="http://fake.template.url.com/"), + old_parameters=[], + parameters=[], stack_policy=None, tags=[], + ) + + def test_noninteractive_changeset_update_with_stack_policy(self): + stack_name = "MockStack" + + self.stubber.add_response( + "create_change_set", + {'Id': 'CHANGESETID', 'StackId': 'STACKID'} + ) + changes = [] + changes.append(generate_change()) + + self.stubber.add_response( + "describe_change_set", + generate_change_set_response( + status="CREATE_COMPLETE", execution_status="AVAILABLE", + changes=changes, + ) + ) + + self.stubber.add_response("set_stack_policy", {}) + + self.stubber.add_response("execute_change_set", {}) + + with self.stubber: + self.provider.noninteractive_changeset_update( + fqn=stack_name, + template=Template(url="http://fake.template.url.com/"), + old_parameters=[], + parameters=[], stack_policy=Template(body="{}"), tags=[], + ) + class TestProviderInteractiveMode(unittest.TestCase): def setUp(self): @@ -516,7 +574,8 @@ class TestProviderInteractiveMode(unittest.TestCase): self.assertEqual(p.replacements_only, replacements) @patch("stacker.providers.aws.default.ask_for_approval") - def test_update_stack_execute_success(self, patched_approval): + def test_update_stack_execute_success_no_stack_policy(self, + patched_approval): stack_name = "my-fake-stack" self.stubber.add_response( @@ -550,6 +609,45 @@ class TestProviderInteractiveMode(unittest.TestCase): self.assertEqual(patched_approval.call_count, 1) + @patch("stacker.providers.aws.default.ask_for_approval") + def test_update_stack_execute_success_with_stack_policy(self, + patched_approval): + stack_name = "my-fake-stack" + + self.stubber.add_response( + "create_change_set", + {'Id': 'CHANGESETID', 'StackId': 'STACKID'} + ) + changes = [] + changes.append(generate_change()) + + self.stubber.add_response( + "describe_change_set", + generate_change_set_response( + status="CREATE_COMPLETE", execution_status="AVAILABLE", + changes=changes, + ) + ) + + self.stubber.add_response("set_stack_policy", {}) + + self.stubber.add_response("execute_change_set", {}) + + with self.stubber: + self.provider.update_stack( + fqn=stack_name, + template=Template(url="http://fake.template.url.com/"), + old_parameters=[], + parameters=[], tags=[], + stack_policy=Template(body="{}"), + ) + + patched_approval.assert_called_with(full_changeset=changes, + params_diff=[], + include_verbose=True) + + self.assertEqual(patched_approval.call_count, 1) + def test_select_update_method(self): for i in [[{'force_interactive': False, 'force_change_set': False},
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_git_commit_hash", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 3 }
1.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "mock", "moto", "testfixtures", "flake8", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
awacs==2.5.0 boto3==1.7.84 botocore==1.10.84 certifi==2025.1.31 cffi==1.17.1 cfn-flip==1.3.0 charset-normalizer==3.4.1 click==8.1.8 cryptography==44.0.2 docutils==0.21.2 exceptiongroup==1.2.2 flake8==7.2.0 formic2==1.0.3 future==1.0.0 gitdb2==2.0.6 GitPython==2.1.15 idna==3.10 iniconfig==2.1.0 Jinja2==3.1.6 jmespath==0.10.0 MarkupSafe==3.0.2 mccabe==0.7.0 mock==5.2.0 moto==5.1.2 packaging==24.2 pluggy==1.5.0 pycodestyle==2.13.0 pycparser==2.22 pyflakes==3.3.2 pytest==8.3.5 python-dateutil==2.9.0.post0 PyYAML==6.0.2 requests==2.32.3 responses==0.25.7 s3transfer==0.1.13 schematics==2.0.1 six==1.17.0 smmap==5.0.2 smmap2==3.0.1 -e git+https://github.com/cloudtools/stacker.git@cd379d01f089c226e347c256abe75b90268ae144#egg=stacker testfixtures==8.3.0 tomli==2.2.1 troposphere==4.9.0 urllib3==1.26.20 Werkzeug==3.1.3 xmltodict==0.14.2
name: stacker channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - awacs==2.5.0 - boto3==1.7.84 - botocore==1.10.84 - certifi==2025.1.31 - cffi==1.17.1 - cfn-flip==1.3.0 - charset-normalizer==3.4.1 - click==8.1.8 - cryptography==44.0.2 - docutils==0.21.2 - exceptiongroup==1.2.2 - flake8==7.2.0 - formic2==1.0.3 - future==1.0.0 - gitdb2==2.0.6 - gitpython==2.1.15 - idna==3.10 - iniconfig==2.1.0 - jinja2==3.1.6 - jmespath==0.10.0 - markupsafe==3.0.2 - mccabe==0.7.0 - mock==5.2.0 - moto==5.1.2 - packaging==24.2 - pluggy==1.5.0 - pycodestyle==2.13.0 - pycparser==2.22 - pyflakes==3.3.2 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pyyaml==6.0.2 - requests==2.32.3 - responses==0.25.7 - s3transfer==0.1.13 - schematics==2.0.1 - six==1.17.0 - smmap==5.0.2 - smmap2==3.0.1 - testfixtures==8.3.0 - tomli==2.2.1 - troposphere==4.9.0 - urllib3==1.26.20 - werkzeug==3.1.3 - xmltodict==0.14.2 prefix: /opt/conda/envs/stacker
[ "stacker/tests/providers/aws/test_default.py::TestProviderDefaultMode::test_noninteractive_changeset_update_no_stack_policy", "stacker/tests/providers/aws/test_default.py::TestProviderDefaultMode::test_noninteractive_changeset_update_with_stack_policy" ]
[ "stacker/tests/providers/aws/test_default.py::TestMethods::test_ask_for_approval", "stacker/tests/providers/aws/test_default.py::TestMethods::test_ask_for_approval_with_params_diff", "stacker/tests/providers/aws/test_default.py::TestMethods::test_create_change_set_bad_execution_status", "stacker/tests/providers/aws/test_default.py::TestMethods::test_create_change_set_stack_did_not_change", "stacker/tests/providers/aws/test_default.py::TestMethods::test_create_change_set_unhandled_failed_status", "stacker/tests/providers/aws/test_default.py::TestMethods::test_generate_cloudformation_args", "stacker/tests/providers/aws/test_default.py::TestMethods::test_requires_replacement", "stacker/tests/providers/aws/test_default.py::TestMethods::test_summarize_params_diff", "stacker/tests/providers/aws/test_default.py::TestMethods::test_wait_till_change_set_complete_failed", "stacker/tests/providers/aws/test_default.py::TestMethods::test_wait_till_change_set_complete_success" ]
[ "stacker/tests/providers/aws/test_default.py::TestProviderDefaultMode::test_get_stack_stack_does_not_exist", "stacker/tests/providers/aws/test_default.py::TestProviderDefaultMode::test_get_stack_stack_exists", "stacker/tests/providers/aws/test_default.py::TestProviderDefaultMode::test_prepare_stack_for_update_bad_tags", "stacker/tests/providers/aws/test_default.py::TestProviderDefaultMode::test_prepare_stack_for_update_completed", "stacker/tests/providers/aws/test_default.py::TestProviderDefaultMode::test_prepare_stack_for_update_disallowed", "stacker/tests/providers/aws/test_default.py::TestProviderDefaultMode::test_prepare_stack_for_update_in_progress", "stacker/tests/providers/aws/test_default.py::TestProviderDefaultMode::test_prepare_stack_for_update_non_recreatable", "stacker/tests/providers/aws/test_default.py::TestProviderDefaultMode::test_prepare_stack_for_update_recreate", "stacker/tests/providers/aws/test_default.py::TestProviderDefaultMode::test_select_update_method", "stacker/tests/providers/aws/test_default.py::TestProviderInteractiveMode::test_select_update_method", "stacker/tests/providers/aws/test_default.py::TestProviderInteractiveMode::test_successful_init", "stacker/tests/providers/aws/test_default.py::TestProviderInteractiveMode::test_update_stack_execute_success_no_stack_policy", "stacker/tests/providers/aws/test_default.py::TestProviderInteractiveMode::test_update_stack_execute_success_with_stack_policy" ]
[]
BSD 2-Clause "Simplified" License
3,016
[ "stacker/lookups/handlers/kms.py", "stacker/providers/aws/default.py", "examples/cross-account/stacker.yaml" ]
[ "stacker/lookups/handlers/kms.py", "stacker/providers/aws/default.py", "examples/cross-account/stacker.yaml" ]
Pylons__webob-372
d2b3a966f577918352a7d2abceebfe0fa7bf9dc8
2018-09-03 18:45:44
c0dda6a40b6fd5dd90f7d6c61556cb489ac2b4e8
diff --git a/CHANGES.txt b/CHANGES.txt index fd34d21..7ccc765 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -33,3 +33,10 @@ Bugfix MIMEAccept to behave more like the old version. See https://github.com/Pylons/webob/pull/356 +- ``acceptparse.AcceptValidHeader``, ``acceptparse.AcceptInvalidHeader``, and + ``acceptparse.AcceptNoHeader`` will now always ignore offers that do not + match the required media type grammar when calling ``.acceptable_offers()``. + Previous versions raised a ``ValueError`` for invalid offers in + ``AcceptValidHeader`` and returned them as acceptable in the others. + See https://github.com/Pylons/webob/pull/372 + diff --git a/src/webob/acceptparse.py b/src/webob/acceptparse.py index d1d9d6f..0000667 100644 --- a/src/webob/acceptparse.py +++ b/src/webob/acceptparse.py @@ -408,6 +408,26 @@ class Accept(object): ) return generator(value=value) + def _parse_and_normalize_offers(self, offers): + """ + Throw out any offers that do not match the media type ABNF. + + :return: A list of offers split into the format ``[offer_index, + offer_type_subtype, offer_media_type_params]``. + + """ + lowercased_offers_parsed = [] + for index, offer in enumerate(offers): + match = self.media_type_compiled_re.match(offer.lower()) + # we're willing to try to match any offer that matches the + # media type grammar can parse, but we'll throw out anything + # that doesn't fit the correct syntax - this is not saying that + # the media type is actually a real media type, just that it looks + # like one + if match: + lowercased_offers_parsed.append([index] + list(match.groups())) + return lowercased_offers_parsed + class AcceptValidHeader(Accept): """ @@ -771,6 +791,8 @@ class AcceptValidHeader(Accept): This uses the matching rules described in :rfc:`RFC 7231, section 5.3.2 <7231#section-5.3.2>`. + Any offers that do not match the media type grammar will be ignored. + :param offers: ``iterable`` of ``str`` media types (media types can include media type parameters) :return: A list of tuples of the form (media type, qvalue), in @@ -793,21 +815,12 @@ class AcceptValidHeader(Accept): for media_range, qvalue, media_type_params, extension_params in parsed ] - lowercased_offers = [offer.lower() for offer in offers] - - lowercased_offers_parsed = [] - for offer in lowercased_offers: - match = self.media_type_compiled_re.match(offer) - # The regex here is only used for parsing, and not intended to - # validate the offer - if not match: - raise ValueError(repr(offer) + ' is not a media type.') - lowercased_offers_parsed.append(match.groups()) + lowercased_offers_parsed = self._parse_and_normalize_offers(offers) acceptable_offers_n_quality_factors = {} for ( - offer_index, (offer_type_subtype, offer_media_type_params) - ) in enumerate(lowercased_offers_parsed): + offer_index, offer_type_subtype, offer_media_type_params + ) in lowercased_offers_parsed: offer_media_type_params = self._parse_media_type_params( media_type_params_segment=offer_media_type_params, ) @@ -1242,6 +1255,8 @@ class _AcceptInvalidOrNoHeader(Accept): """ Return the offers that are acceptable according to the header. + Any offers that do not match the media type grammar will be ignored. + :param offers: ``iterable`` of ``str`` media types (media types can include media type parameters) :return: When the header is invalid, or there is no ``Accept`` header @@ -1250,7 +1265,14 @@ class _AcceptInvalidOrNoHeader(Accept): where each offer in `offers` is paired with the qvalue of 1.0, in the same order as in `offers`. """ - return [(offer, 1.0) for offer in offers] + return [ + (offers[offer_index], 1.0) + for offer_index, _, _ + # avoid returning any offers that don't match the grammar so + # that the return values here are consistent with what would be + # returned in AcceptValidHeader + in self._parse_and_normalize_offers(offers) + ] def best_match(self, offers, default_match=None): """
validate media types consistently in acceptable_offers ```python >>> create_accept_header('').acceptable_offers(['foo']) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/michael/work/oss/pyramid/env/lib/python3.6/site-packages/webob/acceptparse.py", line 804, in acceptable_offers raise ValueError(repr(offer) + ' is not a media type.') ValueError: 'foo' is not a media type. >>> create_accept_header('*/*').acceptable_offers(['foo']) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/michael/work/oss/pyramid/env/lib/python3.6/site-packages/webob/acceptparse.py", line 804, in acceptable_offers raise ValueError(repr(offer) + ' is not a media type.') ValueError: 'foo' is not a media type. >>> create_accept_header('invalid/x/y').acceptable_offers(['foo']) [('foo', 1.0)] >>> create_accept_header(None).acceptable_offers(['foo']) [('foo', 1.0)] ``` It's not ok to raise here in some scenarios and not in others. The offer is server-side and webob should handle it consistently by validating those parameters the same always. I went backward and found out that webob < 1.5 actually did this consistently but after that it stopped calling `_check_offer` in `MIMEAccept` but kept calling it in `MIMENilAccept`.
Pylons/webob
diff --git a/tests/test_acceptparse.py b/tests/test_acceptparse.py index e9f3935..b8c0620 100644 --- a/tests/test_acceptparse.py +++ b/tests/test_acceptparse.py @@ -909,16 +909,30 @@ class TestAcceptValidHeader(object): instance = AcceptValidHeader(header_value=header_value) assert instance.accepts_html is returned - @pytest.mark.parametrize('offers', [ - ['text/html;p=1;q=0.5'], - ['text/html;q=0.5'], - ['text/html;q=0.5;e=1'], - ['text/html', 'text/plain;p=1;q=0.5;e=1'], + @pytest.mark.parametrize('header, offers, expected_returned', [ + (AcceptValidHeader('text/html'), ['text/html;p=1;q=0.5'], []), + (AcceptValidHeader('text/html'), ['text/html;q=0.5'], []), + (AcceptValidHeader('text/html'), ['text/html;q=0.5;e=1'], []), + ( + AcceptValidHeader('text/html'), + ['text/html', 'text/plain;p=1;q=0.5;e=1', 'foo'], + [('text/html', 1.0)], + ), + ( + AcceptInvalidHeader('foo'), + ['text/html', 'text/plain;p=1;q=0.5;e=1', 'foo'], + [('text/html', 1.0)], + ), + ( + AcceptNoHeader(), + ['text/html', 'text/plain;p=1;q=0.5;e=1', 'foo'], + [('text/html', 1.0)], + ), ]) - def test_acceptable_offers__invalid_offers(self, offers): - instance = AcceptValidHeader(header_value='text/html') - with pytest.raises(ValueError): - instance.acceptable_offers(offers=offers) + def test_acceptable_offers__invalid_offers( + self, header, offers, expected_returned, + ): + assert header.acceptable_offers(offers=offers) == expected_returned @pytest.mark.parametrize('header_value, offers, expected_returned', [ # RFC 7231, section 5.3.2
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 2 }
1.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "coverage", "flake8" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 exceptiongroup==1.2.2 flake8==7.2.0 iniconfig==2.1.0 mccabe==0.7.0 packaging==24.2 pluggy==1.5.0 pycodestyle==2.13.0 pyflakes==3.3.2 pytest==8.3.5 tomli==2.2.1 -e git+https://github.com/Pylons/webob.git@d2b3a966f577918352a7d2abceebfe0fa7bf9dc8#egg=WebOb
name: webob channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - exceptiongroup==1.2.2 - flake8==7.2.0 - iniconfig==2.1.0 - mccabe==0.7.0 - packaging==24.2 - pluggy==1.5.0 - pycodestyle==2.13.0 - pyflakes==3.3.2 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/webob
[ "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__invalid_offers[header0-offers0-expected_returned0]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__invalid_offers[header1-offers1-expected_returned1]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__invalid_offers[header2-offers2-expected_returned2]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__invalid_offers[header3-offers3-expected_returned3]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__invalid_offers[header4-offers4-expected_returned4]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__invalid_offers[header5-offers5-expected_returned5]" ]
[]
[ "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[q=]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[q=1]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[;q]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[;q=]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[;q=1]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;q]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;q1]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;q=]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;q=-1]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;q=2]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;q=1.001]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;q=0.0001]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;q=00]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;q=01]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;q=00.1]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo,q=0.1]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;q", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_invalid[foo;q=", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo-groups0]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;q=0-groups1]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;q=0.0-groups2]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;q=0.00-groups3]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;q=0.000-groups4]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;q=1-groups5]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;q=1.0-groups6]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;q=1.00-groups7]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;q=1.000-groups8]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;q=0.1-groups9]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;q=0.87-groups10]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;q=0.382-groups11]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;Q=0.382-groups12]", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo", "tests/test_acceptparse.py::Test_ItemNWeightRe::test_valid[foo;", "tests/test_acceptparse.py::Test_List1OrMoreCompiledRe::test_invalid[,]", "tests/test_acceptparse.py::Test_List1OrMoreCompiledRe::test_invalid[,", "tests/test_acceptparse.py::Test_List1OrMoreCompiledRe::test_invalid[foo", "tests/test_acceptparse.py::Test_List1OrMoreCompiledRe::test_invalid[", "tests/test_acceptparse.py::Test_List1OrMoreCompiledRe::test_invalid[,foo", "tests/test_acceptparse.py::Test_List1OrMoreCompiledRe::test_invalid[\\tfoo", "tests/test_acceptparse.py::Test_List1OrMoreCompiledRe::test_invalid[\\t,foo", "tests/test_acceptparse.py::Test_List1OrMoreCompiledRe::test_valid[foo,bar]", "tests/test_acceptparse.py::Test_List1OrMoreCompiledRe::test_valid[foo,", "tests/test_acceptparse.py::Test_List1OrMoreCompiledRe::test_valid[foo", "tests/test_acceptparse.py::Test_List1OrMoreCompiledRe::test_valid[,foo", "tests/test_acceptparse.py::Test_List1OrMoreCompiledRe::test_valid[,\\t", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[,", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[noslash]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[/]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[/html]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=val;]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=\\x19]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=\"]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=\\\\]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=\\x7f]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=\"\\\\\"]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=\"\\\\\\\\\\\\\"]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=\"\\\\\\\\\"\"]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=\"\\\\\\x19\"]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=\"\\\\\\x7f\"]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=-1]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=2]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1.001]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=0.0001]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=00]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=01]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=00.1]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html,q=0.1]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param;q=1]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam;]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=val;]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=\"val\";]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=\"0]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=\"val]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=val\"]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=\\x19]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=\"1]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=\\\\]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=\\x7f]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=\"\\\\\"]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=\"\\\\\\\\\\\\\"]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=\"\\\\\\\\\"\"]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=\"\\\\\\x19\"]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;q=1;extparam=\"\\\\\\x7f\"]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=\\x19;q=1;extparam]", "tests/test_acceptparse.py::TestAccept::test_parse__invalid_header[text/html;param=val;q=1;extparam=\\x19]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[audio/*;", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/plain;", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/*,", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/*;q=0.3,", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[-expected_list4]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[,-expected_list5]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[,", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[*/*,", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[*/html-expected_list8]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;param=!#$%&'*+-.^_`|~09AZaz-expected_list10]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;param=\"\"-expected_list11]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;param=\"\\t", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;param=\"\\x80\\x81\\xfe\\xff\\\\\"\\\\\\\\\"-expected_list13]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;param=\"\\\\\\t\\\\", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;param='val'-expected_list15]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=0.9-expected_list16]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=0-expected_list17]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=0.0-expected_list18]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=0.00-expected_list19]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=0.000-expected_list20]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=1-expected_list21]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=1.0-expected_list22]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=1.00-expected_list23]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=1.000-expected_list24]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=0.1-expected_list25]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=0.87-expected_list26]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=0.382-expected_list27]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;Q=0.382-expected_list28]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=0.9;q=0.8-expected_list32]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=1;q=1;q=1-expected_list33]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=0.9;extparam1;extparam2=val2;extparam3=\"val3\"-expected_list34]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=1;extparam=!#$%&'*+-.^_`|~09AZaz-expected_list35]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=1;extparam=\"\"-expected_list36]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=1;extparam=\"\\t", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=1;extparam=\"\\x80\\x81\\xfe\\xff\\\\\"\\\\\\\\\"-expected_list38]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=1;extparam=\"\\\\\\t\\\\", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;q=1;extparam='val'-expected_list40]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[text/html;param1=\"val1\";param2=val2;q=0.9;extparam1=\"val1\";extparam2;extparam3=val3-expected_list41]", "tests/test_acceptparse.py::TestAccept::test_parse__valid_header[,\\t", "tests/test_acceptparse.py::TestAcceptValidHeader::test_parse__inherited", "tests/test_acceptparse.py::TestAcceptValidHeader::test___init___invalid_header[,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___init___invalid_header[text/html;param=val;q=1;extparam=\\x19]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___init___valid_header", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___None", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___invalid_value[,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___invalid_value[right_operand1]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___invalid_value[right_operand2]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___invalid_value[right_operand3]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___invalid_value[right_operand4]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___invalid_value[a/b,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___invalid_value[right_operand6]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___invalid_value[right_operand7]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___invalid_value[right_operand8]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___invalid_value[right_operand9]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___other_type_with_invalid___str__[,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___other_type_with_invalid___str__[a/b,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_empty_value[]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_empty_value[value1]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_empty_value[value2]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_empty_value[value3]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___other_type_with_valid___str___empty", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_value[a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_value[value1-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_value[value2-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_value[value3-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_value[value4-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_value[value5-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_value[value6-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_value[value7-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_value[value8-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___valid_value[value9-e/f;p1=1;q=1;e1=1;e2=2,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___other_type_with_valid___str___not_empty", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___AcceptValidHeader_header_value_empty", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___AcceptValidHeader_header_value_not_empty", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___AcceptNoHeader", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___AcceptInvalidHeader[,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___add___AcceptInvalidHeader[a/b;p1=1;p2=2;q=0.8;e1;e2=\"]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___bool__", "tests/test_acceptparse.py::TestAcceptValidHeader::test___contains__", "tests/test_acceptparse.py::TestAcceptValidHeader::test___iter__", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___None", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___invalid_value[,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___invalid_value[left_operand1]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___invalid_value[left_operand2]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___invalid_value[left_operand3]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___invalid_value[left_operand4]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___invalid_value[a/b,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___invalid_value[left_operand6]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___invalid_value[left_operand7]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___invalid_value[left_operand8]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___invalid_value[left_operand9]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___other_type_with_invalid___str__[,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___other_type_with_invalid___str__[a/b,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_empty_value[]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_empty_value[value1]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_empty_value[value2]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_empty_value[value3]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___other_type_with_valid___str___empty", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_non_empty_value[a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_non_empty_value[value1-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_non_empty_value[value2-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_non_empty_value[value3-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_non_empty_value[value4-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_non_empty_value[value5-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_non_empty_value[value6-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_non_empty_value[value7-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_non_empty_value[value8-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___valid_non_empty_value[value9-e/f;p1=1;q=1;e1=1;e2=2,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___radd___other_type_with_valid___str___not_empty", "tests/test_acceptparse.py::TestAcceptValidHeader::test___repr__[-<AcceptValidHeader", "tests/test_acceptparse.py::TestAcceptValidHeader::test___repr__[,,text/html", "tests/test_acceptparse.py::TestAcceptValidHeader::test___repr__[,\\t,", "tests/test_acceptparse.py::TestAcceptValidHeader::test___str__[-]", "tests/test_acceptparse.py::TestAcceptValidHeader::test___str__[,,text/html", "tests/test_acceptparse.py::TestAcceptValidHeader::test___str__[,\\t,", "tests/test_acceptparse.py::TestAcceptValidHeader::test__old_match", "tests/test_acceptparse.py::TestAcceptValidHeader::test__old_match_wildcard_matching", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accept_html[tExt/HtMl-True]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accept_html[APPlication/XHTML+xml-True]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accept_html[appliCATION/xMl-True]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accept_html[TeXt/XmL-True]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accept_html[image/jpg-False]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accept_html[TeXt/Plain-False]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accept_html[image/jpg,", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accepts_html[tExt/HtMl-True]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accepts_html[APPlication/XHTML+xml-True]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accepts_html[appliCATION/xMl-True]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accepts_html[TeXt/XmL-True]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accepts_html[image/jpg-False]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accepts_html[TeXt/Plain-False]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_accepts_html[image/jpg,", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[audio/*;", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/plain;", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/*;q=0.3,", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[teXT/*;Q=0.5,", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/html,", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/html", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[-offers6-expected_returned6]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/html;p1=1;p2=2;p3=\"\\\\\"\"-offers8-expected_returned8]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/html;p1=1-offers9-expected_returned9]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/html-offers10-expected_returned10]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/html;p1=1-offers11-expected_returned11]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/html-offers12-expected_returned12]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/*-offers13-expected_returned13]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[*/*-offers14-expected_returned14]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/*-offers15-expected_returned15]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[*/*-offers16-expected_returned16]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/html;p1=1;q=0-offers17-expected_returned17]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/html;q=0-offers18-expected_returned18]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/*;q=0-offers19-expected_returned19]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[*/*;q=0-offers20-expected_returned20]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[*/*;q=0,", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/html;p1=1,", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/*,", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/html;q=0,", "tests/test_acceptparse.py::TestAcceptValidHeader::test_acceptable_offers__valid_offers[text/html-offers26-expected_returned26]", "tests/test_acceptparse.py::TestAcceptValidHeader::test_best_match", "tests/test_acceptparse.py::TestAcceptValidHeader::test_best_match_with_one_lower_q", "tests/test_acceptparse.py::TestAcceptValidHeader::test_best_match_with_complex_q", "tests/test_acceptparse.py::TestAcceptValidHeader::test_best_match_json", "tests/test_acceptparse.py::TestAcceptValidHeader::test_best_match_mixedcase", "tests/test_acceptparse.py::TestAcceptValidHeader::test_best_match_zero_quality", "tests/test_acceptparse.py::TestAcceptValidHeader::test_quality", "tests/test_acceptparse.py::TestAcceptValidHeader::test_quality_not_found", "tests/test_acceptparse.py::TestAcceptNoHeader::test_parse__inherited", "tests/test_acceptparse.py::TestAcceptNoHeader::test___init__", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___None", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___invalid_value[,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___invalid_value[right_operand1]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___invalid_value[right_operand2]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___invalid_value[right_operand3]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___invalid_value[right_operand4]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___invalid_value[a/b,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___invalid_value[right_operand6]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___invalid_value[right_operand7]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___invalid_value[right_operand8]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___invalid_value[right_operand9]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___other_type_with_invalid___str__[,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___other_type_with_invalid___str__[a/b,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_empty_value[]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_empty_value[value1]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_empty_value[value2]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_empty_value[value3]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___other_type_with_valid___str___empty", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_value[a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_value[value1-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_value[value2-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_value[value3-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_value[value4-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_value[value5-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_value[value6-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_value[value7-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_value[value8-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___valid_value[value9-e/f;p1=1;q=1;e1=1;e2=2,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___other_type_with_valid___str___not_empty", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___AcceptValidHeader_header_value_empty", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___AcceptValidHeader_header_value_not_empty", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___AcceptNoHeader", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___AcceptInvalidHeader[,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___add___AcceptInvalidHeader[a/b;p1=1;p2=2;q=0.8;e1;e2=\"]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___bool__", "tests/test_acceptparse.py::TestAcceptNoHeader::test___contains__", "tests/test_acceptparse.py::TestAcceptNoHeader::test___iter__", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___None", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___invalid_value[,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___invalid_value[left_operand1]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___invalid_value[left_operand2]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___invalid_value[left_operand3]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___invalid_value[left_operand4]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___invalid_value[a/b,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___invalid_value[left_operand6]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___invalid_value[left_operand7]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___invalid_value[left_operand8]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___invalid_value[left_operand9]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___other_type_with_invalid___str__[,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___other_type_with_invalid___str__[a/b,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_empty_value[]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_empty_value[value1]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_empty_value[value2]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_empty_value[value3]", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___other_type_with_valid___str___empty", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_non_empty_value[a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_non_empty_value[value1-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_non_empty_value[value2-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_non_empty_value[value3-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_non_empty_value[value4-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_non_empty_value[value5-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_non_empty_value[value6-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_non_empty_value[value7-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_non_empty_value[value8-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___valid_non_empty_value[value9-e/f;p1=1;q=1;e1=1;e2=2,", "tests/test_acceptparse.py::TestAcceptNoHeader::test___radd___other_type_with_valid___str___not_empty", "tests/test_acceptparse.py::TestAcceptNoHeader::test___repr__", "tests/test_acceptparse.py::TestAcceptNoHeader::test___str__", "tests/test_acceptparse.py::TestAcceptNoHeader::test_accept_html", "tests/test_acceptparse.py::TestAcceptNoHeader::test_accepts_html", "tests/test_acceptparse.py::TestAcceptNoHeader::test_acceptable_offers", "tests/test_acceptparse.py::TestAcceptNoHeader::test_best_match", "tests/test_acceptparse.py::TestAcceptNoHeader::test_quality", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test_parse__inherited", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___init__", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___None", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___invalid_value[,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___invalid_value[right_operand1]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___invalid_value[right_operand2]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___invalid_value[right_operand3]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___invalid_value[right_operand4]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___invalid_value[a/b,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___invalid_value[right_operand6]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___invalid_value[right_operand7]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___invalid_value[right_operand8]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___invalid_value[right_operand9]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___other_type_with_invalid___str__[,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___other_type_with_invalid___str__[a/b,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_empty_value[]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_empty_value[value1]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_empty_value[value2]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_empty_value[value3]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___other_type_with_valid___str___empty", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_value[a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_value[value1-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_value[value2-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_value[value3-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_value[value4-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_value[value5-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_value[value6-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_value[value7-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_value[value8-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___valid_value[value9-e/f;p1=1;q=1;e1=1;e2=2,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___other_type_with_valid___str___not_empty", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___AcceptValidHeader_header_value_empty", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___AcceptValidHeader_header_value_not_empty", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___AcceptNoHeader", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___AcceptInvalidHeader[,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___add___AcceptInvalidHeader[a/b;p1=1;p2=2;q=0.8;e1;e2=\"]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___bool__", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___contains__", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___iter__", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___None", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___invalid_value[,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___invalid_value[left_operand1]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___invalid_value[left_operand2]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___invalid_value[left_operand3]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___invalid_value[left_operand4]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___invalid_value[a/b,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___invalid_value[left_operand6]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___invalid_value[left_operand7]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___invalid_value[left_operand8]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___invalid_value[left_operand9]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___other_type_with_invalid___str__[,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___other_type_with_invalid___str__[a/b,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_empty_value[]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_empty_value[value1]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_empty_value[value2]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_empty_value[value3]", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___other_type_with_valid___str___empty", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_non_empty_value[a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_non_empty_value[value1-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_non_empty_value[value2-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_non_empty_value[value3-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_non_empty_value[value4-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_non_empty_value[value5-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_non_empty_value[value6-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_non_empty_value[value7-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_non_empty_value[value8-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___valid_non_empty_value[value9-e/f;p1=1;q=1;e1=1;e2=2,", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___radd___other_type_with_valid___str___not_empty", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___repr__", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test___str__", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test_accept_html", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test_accepts_html", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test_acceptable_offers", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test_best_match", "tests/test_acceptparse.py::TestAcceptInvalidHeader::test_quality", "tests/test_acceptparse.py::TestCreateAcceptHeader::test_header_value_is_None", "tests/test_acceptparse.py::TestCreateAcceptHeader::test_header_value_is_valid", "tests/test_acceptparse.py::TestCreateAcceptHeader::test_header_value_is_invalid[,", "tests/test_acceptparse.py::TestCreateAcceptHeader::test_header_value_is_invalid[noslash]", "tests/test_acceptparse.py::TestAcceptProperty::test_fget_header_is_valid", "tests/test_acceptparse.py::TestAcceptProperty::test_fget_header_is_None", "tests/test_acceptparse.py::TestAcceptProperty::test_fget_header_is_invalid", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_is_valid", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_is_None", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_is_invalid", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[-]", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[value1-]", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[value2-]", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[value3-]", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[value5-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[value6-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[value7-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[value8-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[value9-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[value10-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[value11-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[value12-a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_value_types[value13-e/f;p1=1;q=1;e1=1;e2=2,", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_other_type_with___str__[]", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_other_type_with___str__[a/b;q=0.5,", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_AcceptValidHeader", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_AcceptNoHeader", "tests/test_acceptparse.py::TestAcceptProperty::test_fset_AcceptInvalidHeader", "tests/test_acceptparse.py::TestAcceptProperty::test_fdel_header_key_in_environ", "tests/test_acceptparse.py::TestAcceptProperty::test_fdel_header_key_not_in_environ", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[\"]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[(]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[)]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[/]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[:]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[;]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[<]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[=]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[>]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[?]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[@]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[[]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[\\\\]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[]]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[{]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[}]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[foo,", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__invalid_header[foo", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__valid_header[*-expected_list0]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__valid_header[!#$%&'*+-.^_`|~;q=0.5-expected_list1]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__valid_header[0123456789-expected_list2]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__valid_header[,\\t", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__valid_header[iso-8859-5;q=0.372,unicode-1-1;q=0.977,UTF-8,", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__valid_header[foo,bar-expected_list5]", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__valid_header[foo,", "tests/test_acceptparse.py::TestAcceptCharset::test_parse__valid_header[foo", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_parse__inherited", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___init___invalid_header[]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___init___invalid_header[,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___init___valid_header", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___None", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___invalid_value[]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___invalid_value[right_operand1]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___invalid_value[right_operand2]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___invalid_value[right_operand3]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___invalid_value[UTF/8]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___invalid_value[right_operand5]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___invalid_value[right_operand6]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___invalid_value[right_operand7]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___other_type_with_invalid___str__[]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___other_type_with_invalid___str__[UTF/8]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___valid_value[UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___valid_value[value1-UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___valid_value[value2-UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___valid_value[value3-UTF-8,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___other_type_with_valid___str__", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___AcceptCharsetValidHeader", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___AcceptCharsetNoHeader", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___AcceptCharsetInvalidHeader[]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___add___AcceptCharsetInvalidHeader[utf/8]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___bool__", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___contains__", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___contains___not", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___contains___zero_quality", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___iter__", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___None", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___invalid_value[]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___invalid_value[left_operand1]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___invalid_value[left_operand2]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___invalid_value[left_operand3]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___invalid_value[UTF/8]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___invalid_value[left_operand5]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___invalid_value[left_operand6]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___invalid_value[left_operand7]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___other_type_with_invalid___str__[]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___other_type_with_invalid___str__[UTF/8]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___valid_value[UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___valid_value[value1-UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___valid_value[value2-UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___valid_value[value3-UTF-8,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___radd___other_type_with_valid___str__", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___repr__", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test___str__", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_acceptable_offers[UTF-7,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_acceptable_offers[utf-8,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_acceptable_offers[utF-8;q=0.2,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_acceptable_offers[*-offers4-returned4]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_acceptable_offers[*;q=0.8-offers5-returned5]", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_acceptable_offers[UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_acceptable_offers[UTF-8,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_acceptable_offers[UTF-8;q=0,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_acceptable_offers[UTF-8;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_acceptable_offers[UTF-8;q=0.8,", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_best_match", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_best_match_with_one_lower_q", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_best_match_with_complex_q", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_best_match_mixedcase", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_best_match_zero_quality", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_quality", "tests/test_acceptparse.py::TestAcceptCharsetValidHeader::test_quality_not_found", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test_parse__inherited", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___init__", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___None", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___invalid_value[]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___invalid_value[right_operand1]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___invalid_value[right_operand2]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___invalid_value[right_operand3]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___invalid_value[UTF/8]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___invalid_value[right_operand5]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___invalid_value[right_operand6]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___invalid_value[right_operand7]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___other_type_with_invalid___str__[]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___other_type_with_invalid___str__[UTF/8]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___valid_value[UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___valid_value[value1-UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___valid_value[value2-UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___valid_value[value3-UTF-8,", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___other_type_with_valid___str__", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___AcceptCharsetValidHeader", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___AcceptCharsetNoHeader", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___AcceptCharsetInvalidHeader[]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___add___AcceptCharsetInvalidHeader[utf/8]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___bool__", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___contains__", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___iter__", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___None", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___invalid_value[]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___invalid_value[left_operand1]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___invalid_value[left_operand2]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___invalid_value[left_operand3]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___invalid_value[UTF/8]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___invalid_value[left_operand5]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___invalid_value[left_operand6]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___invalid_value[left_operand7]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___other_type_with_invalid___str__[]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___other_type_with_invalid___str__[UTF/8]", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___valid_value[UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___valid_value[value1-UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___valid_value[value2-UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___valid_value[value3-UTF-8,", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___radd___other_type_with_valid___str__", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___repr__", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test___str__", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test_acceptable_offers", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test_best_match", "tests/test_acceptparse.py::TestAcceptCharsetNoHeader::test_quality", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test_parse__inherited", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___init__", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___None", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___invalid_value[]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___invalid_value[right_operand1]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___invalid_value[right_operand2]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___invalid_value[right_operand3]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___invalid_value[UTF/8]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___invalid_value[right_operand5]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___invalid_value[right_operand6]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___invalid_value[right_operand7]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___other_type_with_invalid___str__[]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___other_type_with_invalid___str__[UTF/8]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___valid_header_value[UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___valid_header_value[value1-UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___valid_header_value[value2-UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___valid_header_value[value3-UTF-8,", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___other_type_valid_header_value", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___AcceptCharsetValidHeader", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___AcceptCharsetNoHeader", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___add___AcceptCharsetInvalidHeader", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___bool__", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___contains__", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___iter__", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___None", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___invalid_value[]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___invalid_value[left_operand1]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___invalid_value[left_operand2]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___invalid_value[left_operand3]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___invalid_value[UTF/8]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___invalid_value[left_operand5]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___invalid_value[left_operand6]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___invalid_value[left_operand7]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___other_type_with_invalid___str__[]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___other_type_with_invalid___str__[UTF/8]", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___valid_header_value[UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___valid_header_value[value1-UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___valid_header_value[value2-UTF-7;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___valid_header_value[value3-UTF-8,", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___radd___other_type_valid_header_value", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___repr__", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test___str__", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test_acceptable_offers", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test_best_match", "tests/test_acceptparse.py::TestAcceptCharsetInvalidHeader::test_quality", "tests/test_acceptparse.py::TestCreateAcceptCharsetHeader::test_header_value_is_valid", "tests/test_acceptparse.py::TestCreateAcceptCharsetHeader::test_header_value_is_None", "tests/test_acceptparse.py::TestCreateAcceptCharsetHeader::test_header_value_is_invalid[]", "tests/test_acceptparse.py::TestCreateAcceptCharsetHeader::test_header_value_is_invalid[iso-8859-5,", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fget_header_is_None", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fget_header_is_valid", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fget_header_is_invalid", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fset_value_is_None", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fset_value_is_invalid", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fset_value_is_valid", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fset_value_types[utf-8;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fset_value_types[value1-utf-8;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fset_value_types[value2-utf-8;q=0.5,", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fset_value_types[value3-utf-7,", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fset_other_type_with_valid___str__", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fset_AcceptCharsetNoHeader", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fset_AcceptCharsetValidHeader", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fset_AcceptCharsetInvalidHeader", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fdel_header_key_in_environ", "tests/test_acceptparse.py::TestAcceptCharsetProperty::test_fdel_header_key_not_in_environ", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[\"]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[(]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[)]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[/]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[:]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[;]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[<]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[=]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[>]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[?]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[@]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[[]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[\\\\]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[]]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[{]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[}]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[,", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__invalid_header[gzip;q=1.0,", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__valid_header[,-expected_list0]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__valid_header[,", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__valid_header[*-expected_list2]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__valid_header[!#$%&'*+-.^_`|~;q=0.5-expected_list3]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__valid_header[0123456789-expected_list4]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__valid_header[,,\\t", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__valid_header[compress,", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__valid_header[-expected_list7]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__valid_header[*-expected_list8]", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__valid_header[compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncoding::test_parse__valid_header[gzip;q=1.0,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_parse__inherited", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___init___invalid_header[,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___init___invalid_header[gzip;q=1.0,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___init___valid_header", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___None", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___invalid_value[,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___invalid_value[right_operand1]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___invalid_value[right_operand2]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___invalid_value[right_operand3]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___other_type_with_invalid___str__", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___valid_empty_value[]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___valid_empty_value[value1]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___valid_empty_value[value2]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___valid_empty_value[value3]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___other_type_with_valid___str___empty", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___valid_value[compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___valid_value[value1-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___valid_value[value2-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___valid_value[value3-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___valid_value[value4-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___valid_value[value5-*,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___other_type_with_valid___str___not_empty", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___AcceptEncodingValidHeader_header_value_empty", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___AcceptEncodingValidHeader_header_value_not_empty", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___AcceptEncodingNoHeader", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___AcceptEncodingInvalidHeader[,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___add___AcceptEncodingInvalidHeader[compress;q=1.001]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___bool__", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___contains__", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___iter__", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___None", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___invalid_value[,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___invalid_value[left_operand1]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___invalid_value[left_operand2]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___invalid_value[left_operand3]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___other_type_with_invalid___str__", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___valid_empty_value[]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___valid_empty_value[value1]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___valid_empty_value[value2]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___valid_empty_value[value3]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___other_type_with_valid___str___empty", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___valid_non_empty_value[compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___valid_non_empty_value[value1-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___valid_non_empty_value[value2-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___valid_non_empty_value[value3-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___valid_non_empty_value[value4-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___valid_non_empty_value[value5-*,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___radd___other_type_with_valid___str___not_empty", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___repr__[-<AcceptEncodingValidHeader", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___repr__[,\\t,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___str__[-]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test___str__[,\\t,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_acceptable_offers[-offers0-expected_returned0]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_acceptable_offers[gzip,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_acceptable_offers[-offers2-expected_returned2]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_acceptable_offers[-offers3-expected_returned3]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_acceptable_offers[compress,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_acceptable_offers[*;q=0-offers6-expected_returned6]", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_acceptable_offers[*;q=0,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_acceptable_offers[IDentity;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_acceptable_offers[compress;q=0,", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_best_match", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_best_match_with_one_lower_q", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_best_match_with_complex_q", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_best_match_mixedcase", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_best_match_zero_quality", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_quality", "tests/test_acceptparse.py::TestAcceptEncodingValidHeader::test_quality_not_found", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test_parse__inherited", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___init__", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___None", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___invalid_value[,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___invalid_value[right_operand1]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___invalid_value[right_operand2]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___invalid_value[right_operand3]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___other_type_with_invalid___str__", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___valid_empty_value[]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___valid_empty_value[value1]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___valid_empty_value[value2]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___valid_empty_value[value3]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___other_type_with_valid___str___empty", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___valid_value[compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___valid_value[value1-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___valid_value[value2-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___valid_value[value3-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___valid_value[value4-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___valid_value[value5-*,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___other_type_with_valid___str___not_empty", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___AcceptEncodingValidHeader_header_value_empty", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___AcceptEncodingValidHeader_header_value_not_empty", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___AcceptEncodingNoHeader", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___AcceptEncodingInvalidHeader[,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___add___AcceptEncodingInvalidHeader[compress;q=1.001]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___bool__", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___contains__", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___iter__", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___None", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___invalid_value[,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___invalid_value[left_operand1]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___invalid_value[left_operand2]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___invalid_value[left_operand3]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___other_type_with_invalid___str__", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___valid_empty_value[]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___valid_empty_value[value1]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___valid_empty_value[value2]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___valid_empty_value[value3]", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___other_type_with_valid___str___empty", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___valid_non_empty_value[compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___valid_non_empty_value[value1-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___valid_non_empty_value[value2-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___valid_non_empty_value[value3-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___valid_non_empty_value[value4-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___valid_non_empty_value[value5-*,", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___radd___other_type_with_valid___str___not_empty", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___repr__", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test___str__", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test_acceptable_offers", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test_best_match", "tests/test_acceptparse.py::TestAcceptEncodingNoHeader::test_quality", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test_parse__inherited", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___init__", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___None", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___invalid_value[,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___invalid_value[right_operand1]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___invalid_value[right_operand2]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___invalid_value[right_operand3]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___other_type_with_invalid___str__", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___valid_empty_value[]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___valid_empty_value[value1]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___valid_empty_value[value2]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___valid_empty_value[value3]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___other_type_with_valid___str___empty", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___valid_value[compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___valid_value[value1-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___valid_value[value2-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___valid_value[value3-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___valid_value[value4-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___valid_value[value5-*,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___other_type_with_valid___str___not_empty", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___AcceptEncodingValidHeader_header_value_empty", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___AcceptEncodingValidHeader_header_value_not_empty", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___AcceptEncodingNoHeader", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___AcceptEncodingInvalidHeader[,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___add___AcceptEncodingInvalidHeader[compress;q=1.001]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___bool__", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___contains__", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___iter__", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___None", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___invalid_value[,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___invalid_value[left_operand1]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___invalid_value[left_operand2]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___invalid_value[left_operand3]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___other_type_with_invalid___str__", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___valid_empty_value[]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___valid_empty_value[value1]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___valid_empty_value[value2]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___valid_empty_value[value3]", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___other_type_with_valid___str___empty", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___valid_non_empty_value[compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___valid_non_empty_value[value1-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___valid_non_empty_value[value2-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___valid_non_empty_value[value3-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___valid_non_empty_value[value4-compress;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___valid_non_empty_value[value5-*,", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___radd___other_type_with_valid___str___not_empty", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___repr__", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test___str__", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test_acceptable_offers", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test_best_match", "tests/test_acceptparse.py::TestAcceptEncodingInvalidHeader::test_quality", "tests/test_acceptparse.py::TestCreateAcceptEncodingHeader::test_header_value_is_None", "tests/test_acceptparse.py::TestCreateAcceptEncodingHeader::test_header_value_is_valid", "tests/test_acceptparse.py::TestCreateAcceptEncodingHeader::test_header_value_is_invalid[,", "tests/test_acceptparse.py::TestCreateAcceptEncodingHeader::test_header_value_is_invalid[gzip;q=", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fget_header_is_None", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fget_header_is_valid", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fget_header_is_invalid", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fset_value_is_None", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fset_value_is_invalid", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fset_value_is_valid", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fset_value_types[gzip;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fset_value_types[value1-gzip;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fset_value_types[value2-gzip;q=0.5,", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fset_value_types[value3-deflate,", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fset_other_type_with_valid___str__", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fset_AcceptEncodingNoHeader", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fset_AcceptEncodingValidHeader", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fset_AcceptEncodingInvalidHeader", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fdel_header_key_in_environ", "tests/test_acceptparse.py::TestAcceptEncodingProperty::test_fdel_header_key_not_in_environ", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[*s]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[*-a]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[a-*]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[aaaaaaaaa]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[a-aaaaaaaaa]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[a-a-aaaaaaaaa]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[-]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[a-]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[-a]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[---]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[--a]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[1-a]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[1-a-a]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[en/gb]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[foo,", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__invalid_header[foo", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[*-expected_list0]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[fR;q=0.5-expected_list1]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[zh-Hant;q=0.500-expected_list2]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[zh-Hans-CN;q=1-expected_list3]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[de-CH-x-phonebk;q=1.0-expected_list4]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[az-Arab-x-AZE-derbend;q=1.00-expected_list5]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[zh-CN-a-myExt-x-private;q=1.000-expected_list6]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[aaaaaaaa-expected_list7]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[aaaaaaaa-a-expected_list8]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[aaaaaaaa-aaaaaaaa-expected_list9]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[a-aaaaaaaa-aaaaaaaa-expected_list10]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[aaaaaaaa-a-aaaaaaaa-expected_list11]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[zh-Hant;q=0.372,zh-CN-a-myExt-x-private;q=0.977,de,*;q=0.000-expected_list12]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[,\\t", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[foo,bar-expected_list14]", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[foo,", "tests/test_acceptparse.py::TestAcceptLanguage::test_parse__valid_header[foo", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___init___invalid_header[]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___init___invalid_header[,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___init___valid_header", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___None", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___invalid_value[]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___invalid_value[right_operand1]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___invalid_value[right_operand2]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___invalid_value[right_operand3]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___invalid_value[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___invalid_value[right_operand5]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___invalid_value[right_operand6]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___invalid_value[right_operand7]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___invalid_value[,]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___invalid_value[right_operand9]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___invalid_value[right_operand10]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___invalid_value[right_operand11]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___other_type_with_invalid___str__[]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___other_type_with_invalid___str__[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___other_type_with_invalid___str__[,]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___valid_value[en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___valid_value[value1-en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___valid_value[value2-en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___valid_value[value3-es,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___other_type_with_valid___str__", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___AcceptLanguageValidHeader", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___AcceptLanguageNoHeader", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___AcceptLanguageInvalidHeader[]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___AcceptLanguageInvalidHeader[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___add___AcceptLanguageInvalidHeader[,]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___bool__", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___contains___in[*-da]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___contains___in[da-DA]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___contains___in[en-en-gb]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___contains___in[en-gb-en-gb]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___contains___in[en-gb-en]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___contains___in[en-gb-en_GB]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___contains___not_in[en-gb-en-us]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___contains___not_in[en-gb-fr-fr]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___contains___not_in[en-gb-fr]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___contains___not_in[en-fr-fr]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___iter__[fr;q=0,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___iter__[en-gb,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___iter__[en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___iter__[de;q=0.8,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___iter__[en-gb;q=0,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___iter__[de,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___None", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___invalid_value[]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___invalid_value[left_operand1]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___invalid_value[left_operand2]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___invalid_value[left_operand3]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___invalid_value[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___invalid_value[left_operand5]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___invalid_value[left_operand6]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___invalid_value[left_operand7]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___invalid_value[,]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___invalid_value[left_operand9]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___invalid_value[left_operand10]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___invalid_value[left_operand11]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___other_type_with_invalid___str__[]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___other_type_with_invalid___str__[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___other_type_with_invalid___str__[,]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___valid_value[en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___valid_value[value1-en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___valid_value[value2-en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___valid_value[value3-es,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___radd___other_type_with_valid___str__", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___repr__", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test___str__", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[de-de-language_tags0-expected_returned0]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[a-language_tags1-expected_returned1]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[a,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[a-b;q=0.9,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[foO,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[b-c,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[d-e-f-language_tags6-expected_returned6]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[a-b-c-d,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[*-language_tags8-expected_returned8]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[*;q=0.2,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[a;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[a-b-c;q=0.7,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[a;q=0.7,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[a-language_tags16-expected_returned16]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_basic_filtering[a-b;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_best_match[bar,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_best_match[en-gb,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_best_match[en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup_default_tag_and_default_cannot_both_be_None", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup_default_range_cannot_be_asterisk", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[aA;q=0.3,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[bB-Cc;q=0.8,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[de-ch-language_tags2-None-default-tag-None-de-CH]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[de-ch-language_tags3-None-default-tag-None-de]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[zh-Hant-CN-x-private1-private2-language_tags4-None-default-tag-None-zh-Hant-CN-x-private1-private2]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[zh-Hant-CN-x-private1-private2-language_tags5-None-default-tag-None-zh-Hant-CN-x-private1]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[zh-Hant-CN-x-private1-private2-language_tags6-None-default-tag-None-zh-Hant-CN]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[zh-Hant-CN-x-private1-private2-language_tags7-None-default-tag-None-zh-Hant-CN]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[zh-Hant-CN-x-private1-private2-language_tags8-None-default-tag-None-zh-Hant]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[zh-Hant-CN-x-private1-private2-language_tags9-None-default-tag-None-zh]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[zh-Hant-CN-x-private1-private2-language_tags10-None-default-tag-None-default-tag]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[AA-T-subtag-language_tags11-None-default-tag-None-aA]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[AA-1-subtag-language_tags12-None-default-tag-None-aA]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[Aa-P-subtag-8-subtag-language_tags13-None-default-tag-None-aA]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[aA-3-subTag-C-subtag-language_tags14-None-default-tag-None-aA]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[T-subtag-language_tags15-None-default-tag-None-t-SubTag]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[T-subtag-language_tags16-None-default-tag-None-default-tag]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[*,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[*-language_tags18-None-default-tag-None-default-tag]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[dd,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[aa,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[fr-FR,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[aa-bb,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[aa-language_tags42-None-None-0-0]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[Aa,", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[aa-language_tags44-None-None-<lambda>-callable", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[range-language_tags50-None-default-tag-None-default-tag]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[range-language_tags51--default-tag-None-default-tag]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[range-language_tags52--default-tag-None-default-tag]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[range-language_tags53---None-]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_lookup[range-language_tags54-default-range--None-]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_quality[en-gb-en-gb-1]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_quality[en-gb;q=0.5-en-gb-0.5]", "tests/test_acceptparse.py::TestAcceptLanguageValidHeader::test_quality[en-gb-sr-Cyrl-None]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___init__", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___None", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___invalid_value[]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___invalid_value[right_operand1]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___invalid_value[right_operand2]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___invalid_value[right_operand3]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___invalid_value[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___invalid_value[right_operand5]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___invalid_value[right_operand6]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___invalid_value[right_operand7]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___other_type_with_invalid___str__[]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___other_type_with_invalid___str__[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___valid_value[en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___valid_value[value1-en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___valid_value[value2-en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___valid_value[value3-es,", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___other_type_with_valid___str__", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___AcceptLanguageValidHeader", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___AcceptLanguageNoHeader", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___AcceptLanguageInvalidHeader[]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___add___AcceptLanguageInvalidHeader[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___bool__", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___contains__", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___iter__", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___None", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___invalid_value[]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___invalid_value[left_operand1]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___invalid_value[left_operand2]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___invalid_value[left_operand3]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___invalid_value[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___invalid_value[left_operand5]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___invalid_value[left_operand6]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___invalid_value[left_operand7]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___other_type_with_invalid___str__[]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___other_type_with_invalid___str__[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___other_type_with_invalid___str__[,]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___valid_value[en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___valid_value[value1-en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___valid_value[value2-en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___valid_value[value3-es,", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___radd___other_type_with_valid___str__", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___repr__", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test___str__", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test_basic_filtering", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test_best_match[offers0-None-foo]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test_best_match[offers1-None-foo]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test_best_match[offers2-None-bar]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test_best_match[offers3-None-bar]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test_best_match[offers4-default_match4-bar]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test_best_match[offers5-fallback-fallback]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test_lookup_default_tag_and_default_cannot_both_be_None", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test_lookup[default-tag-default-default-tag]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test_lookup[None-0-0]", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test_lookup[None-<lambda>-callable", "tests/test_acceptparse.py::TestAcceptLanguageNoHeader::test_quality", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___init__", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___None", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___invalid_value[]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___invalid_value[right_operand1]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___invalid_value[right_operand2]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___invalid_value[right_operand3]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___invalid_value[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___invalid_value[right_operand5]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___invalid_value[right_operand6]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___invalid_value[right_operand7]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___other_type_with_invalid___str__[]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___other_type_with_invalid___str__[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___valid_header_value[en]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___valid_header_value[value1]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___valid_header_value[value2]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___valid_header_value[value3]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___other_type_valid_header_value", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___AcceptLanguageValidHeader", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___AcceptLanguageNoHeader", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___add___AcceptLanguageInvalidHeader", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___bool__", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___contains__", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___iter__", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___None", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___invalid_value[]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___invalid_value[left_operand1]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___invalid_value[left_operand2]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___invalid_value[left_operand3]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___invalid_value[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___invalid_value[left_operand5]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___invalid_value[left_operand6]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___invalid_value[left_operand7]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___other_type_with_invalid___str__[]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___other_type_with_invalid___str__[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___valid_header_value[en]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___valid_header_value[value1]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___valid_header_value[value2]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___valid_header_value[value3]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___radd___other_type_valid_header_value", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___repr__", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test___str__", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test_basic_filtering", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test_best_match[offers0-None-foo]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test_best_match[offers1-None-foo]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test_best_match[offers2-None-bar]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test_best_match[offers3-None-bar]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test_best_match[offers4-default_match4-bar]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test_best_match[offers5-fallback-fallback]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test_lookup_default_tag_and_default_cannot_both_be_None", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test_lookup[default-tag-default-default-tag]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test_lookup[None-0-0]", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test_lookup[None-<lambda>-callable", "tests/test_acceptparse.py::TestAcceptLanguageInvalidHeader::test_quality", "tests/test_acceptparse.py::TestCreateAcceptLanguageHeader::test_header_value_is_None", "tests/test_acceptparse.py::TestCreateAcceptLanguageHeader::test_header_value_is_valid", "tests/test_acceptparse.py::TestCreateAcceptLanguageHeader::test_header_value_is_invalid[]", "tests/test_acceptparse.py::TestCreateAcceptLanguageHeader::test_header_value_is_invalid[en_gb]", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fget_header_is_None", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fget_header_is_valid", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fget_header_is_invalid", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fset_value_is_None", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fset_value_is_invalid", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fset_value_is_valid", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fset_value_types[en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fset_value_types[value1-en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fset_value_types[value2-en-gb;q=0.5,", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fset_value_types[value3-es,", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fset_other_type_with_valid___str__", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fset_AcceptLanguageNoHeader", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fset_AcceptLanguageValidHeader", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fset_AcceptLanguageInvalidHeader", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fdel_header_key_in_environ", "tests/test_acceptparse.py::TestAcceptLanguageProperty::test_fdel_header_key_not_in_environ", "tests/test_acceptparse.py::test_MIMEAccept_init_warns", "tests/test_acceptparse.py::test_MIMEAccept_init", "tests/test_acceptparse.py::test_MIMEAccept_parse", "tests/test_acceptparse.py::test_MIMEAccept_accept_html", "tests/test_acceptparse.py::test_MIMEAccept_contains", "tests/test_acceptparse.py::test_MIMEAccept_json", "tests/test_acceptparse.py::test_MIMEAccept_no_raise_invalid", "tests/test_acceptparse.py::test_MIMEAccept_iter", "tests/test_acceptparse.py::test_MIMEAccept_str", "tests/test_acceptparse.py::test_MIMEAccept_add", "tests/test_acceptparse.py::test_MIMEAccept_radd", "tests/test_acceptparse.py::test_MIMEAccept_repr", "tests/test_acceptparse.py::test_MIMEAccept_quality" ]
[]
null
3,017
[ "CHANGES.txt", "src/webob/acceptparse.py" ]
[ "CHANGES.txt", "src/webob/acceptparse.py" ]
imageio__imageio-374
7df4f3afd0fb54845d28237f7c794c81e195684a
2018-09-04 11:34:26
7df4f3afd0fb54845d28237f7c794c81e195684a
diff --git a/imageio/plugins/freeimage.py b/imageio/plugins/freeimage.py index 2e52c3d..7f93cb6 100644 --- a/imageio/plugins/freeimage.py +++ b/imageio/plugins/freeimage.py @@ -195,7 +195,7 @@ class FreeimagePngFormat(FreeimageFormat): Parameters for reading ---------------------- ignoregamma : bool - Avoid gamma correction. Default False. + Avoid gamma correction. Default True. Parameters for saving --------------------- @@ -212,7 +212,7 @@ class FreeimagePngFormat(FreeimageFormat): """ class Reader(FreeimageFormat.Reader): - def _open(self, flags=0, ignoregamma=False): + def _open(self, flags=0, ignoregamma=True): # Build flags from kwargs flags = int(flags) if ignoregamma: diff --git a/imageio/plugins/pillow.py b/imageio/plugins/pillow.py index f863241..ef11c5a 100644 --- a/imageio/plugins/pillow.py +++ b/imageio/plugins/pillow.py @@ -212,7 +212,7 @@ class PNGFormat(PillowFormat): Parameters for reading ---------------------- ignoregamma : bool - Avoid gamma correction. Default False. + Avoid gamma correction. Default True. pilmode : str From the Pillow documentation: @@ -272,12 +272,16 @@ class PNGFormat(PillowFormat): """ class Reader(PillowFormat.Reader): - def _open(self, pilmode=None, as_gray=False, ignoregamma=False): + def _open(self, pilmode=None, as_gray=False, ignoregamma=True): return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray) def _get_data(self, index): im, info = PillowFormat.Reader._get_data(self, index) - if not self.request.kwargs.get("ignoregamma", False): + if not self.request.kwargs.get("ignoregamma", True): + # The gamma value in the file represents the gamma factor for the + # hardware on the system where the file was created, and is meant + # to be able to match the colors with the system on which the + # image is shown. See also issue #366 try: gamma = float(info["gamma"]) except (KeyError, ValueError):
When should the Pillow plugin apply gamma correction? *original title: Reading PNG files differently than scipy.misc* I've been using `scipy.misc.imread`for reading (tomographic) images for processing. After a recent upgrade of my system I'm told this is deprecated in favor of `imageio.imread`. Unfortunately my images are read differently depending on which I use, namely with a different gray-scale range, as can be seen below. ``` import scipy.misc import imageio import matplotlib.pyplot as plt %matplotlib inline import numpy r = 'test.png' # attached tho this issue img_scipy = scipy.misc.imread(r) img_imageio = imageio.imread(r) plt.subplot(121) plt.title('scipy min: %s, max: %s' %(numpy.min(img_scipy), numpy.max(img_scipy))) plt.imshow(img_scipy) plt.subplot(122) plt.imshow(img_imageio) plt.title('imageio min: %s, max: %s' %(numpy.min(img_imageio), numpy.max(img_imageio))) plt.show() ``` ![output](https://user-images.githubusercontent.com/1651235/43787395-2bf373c8-9a6b-11e8-98d4-1c04190ce38c.png) [ImageJ](http://fiji.sc/) reports a maximum gray value of 162 for the image, which is what I would have expected... imageio.__version__ = '2.3.0' scipy.__version__ = '1.1.0' ![test.png](https://user-images.githubusercontent.com/1651235/43787150-a6cf810a-9a6a-11e8-9a3d-71f06eb6e2db.png) How can I get my correct gray value range back?
imageio/imageio
diff --git a/tests/test_freeimage.py b/tests/test_freeimage.py index 93e0d74..537fe76 100644 --- a/tests/test_freeimage.py +++ b/tests/test_freeimage.py @@ -493,6 +493,30 @@ def test_other(): raises(Exception, imageio.imsave, fnamebase + ".jng", im, "JNG") +def test_gamma_correction(): + need_internet() + + fname = get_remote_file("images/kodim03.png") + + # Load image three times + im1 = imageio.imread(fname, format="PNG-FI") + im2 = imageio.imread(fname, ignoregamma=True, format="PNG-FI") + im3 = imageio.imread(fname, ignoregamma=False, format="PNG-FI") + + # Default is to ignore gamma + assert np.all(im1 == im2) + + # Test result depending of application of gamma + assert im1.mean() == im2.mean() + + # TODO: We have assert im2.mean() == im3.mean() + # But this is wrong, we want: assert im2.mean() < im3.mean() + + # test_regression_302 + for im in (im1, im2, im3): + assert im.shape == (512, 768, 3) and im.dtype == "uint8" + + if __name__ == "__main__": # test_animated_gif() run_tests_if_main() diff --git a/tests/test_pillow.py b/tests/test_pillow.py index 290246d..fed9041 100644 --- a/tests/test_pillow.py +++ b/tests/test_pillow.py @@ -332,13 +332,27 @@ def test_images_with_transparency(): assert im.shape == (24, 30, 4) -def test_regression_302(): - # When using gamma correction, the result should keep the same dtype +def test_gamma_correction(): need_internet() fname = get_remote_file("images/kodim03.png") - im = imageio.imread(fname) - assert im.shape == (512, 768, 3) and im.dtype == "uint8" + + # Load image three times + im1 = imageio.imread(fname) + im2 = imageio.imread(fname, ignoregamma=True) + im3 = imageio.imread(fname, ignoregamma=False) + + # Default is to ignore gamma + assert np.all(im1 == im2) + + # Test result depending of application of gamma + assert im1.meta["gamma"] < 1 + assert im1.mean() == im2.mean() + assert im2.mean() < im3.mean() + + # test_regression_302 + for im in (im1, im2, im3): + assert im.shape == (512, 768, 3) and im.dtype == "uint8" def test_inside_zipfile():
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 2 }
2.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "black", "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc libfreeimage3" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
black==25.1.0 click==8.1.8 coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work -e git+https://github.com/imageio/imageio.git@7df4f3afd0fb54845d28237f7c794c81e195684a#egg=imageio iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mypy-extensions==1.0.0 numpy==2.0.2 packaging @ file:///croot/packaging_1734472117206/work pathspec==0.12.1 pillow==11.1.0 platformdirs==4.3.7 pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work pytest-cov==6.0.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions==4.13.0
name: imageio channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - black==25.1.0 - click==8.1.8 - coverage==7.8.0 - mypy-extensions==1.0.0 - numpy==2.0.2 - pathspec==0.12.1 - pillow==11.1.0 - platformdirs==4.3.7 - pytest-cov==6.0.0 - typing-extensions==4.13.0 prefix: /opt/conda/envs/imageio
[ "tests/test_pillow.py::test_gamma_correction" ]
[ "tests/test_pillow.py::test_png", "tests/test_pillow.py::test_gif", "tests/test_pillow.py::test_animated_gif", "tests/test_pillow.py::test_images_with_transparency" ]
[ "tests/test_freeimage.py::test_get_ref_im", "tests/test_freeimage.py::test_get_fi_lib", "tests/test_freeimage.py::test_freeimage_format", "tests/test_freeimage.py::test_freeimage_lib", "tests/test_freeimage.py::test_png", "tests/test_freeimage.py::test_png_dtypes", "tests/test_freeimage.py::test_jpg", "tests/test_freeimage.py::test_jpg_more", "tests/test_freeimage.py::test_bmp", "tests/test_freeimage.py::test_gif", "tests/test_freeimage.py::test_animated_gif", "tests/test_freeimage.py::test_ico", "tests/test_freeimage.py::test_mng", "tests/test_freeimage.py::test_other", "tests/test_freeimage.py::test_gamma_correction", "tests/test_pillow.py::test_pillow_format", "tests/test_pillow.py::test_png_remote", "tests/test_pillow.py::test_jpg", "tests/test_pillow.py::test_jpg_more", "tests/test_pillow.py::test_inside_zipfile", "tests/test_pillow.py::test_scipy_imread_compat" ]
[]
BSD 2-Clause "Simplified" License
3,018
[ "imageio/plugins/freeimage.py", "imageio/plugins/pillow.py" ]
[ "imageio/plugins/freeimage.py", "imageio/plugins/pillow.py" ]
zalando-stups__senza-535
935f4111323f6b98ff136ae44a0d57825ac763c7
2018-09-04 14:15:05
e9f84724628b4761f8d5da4d37a2993f11d6433b
lmineiro: :+1: jmcs: :+1:
diff --git a/senza/components/elastigroup.py b/senza/components/elastigroup.py index 7d39d6b..301359a 100644 --- a/senza/components/elastigroup.py +++ b/senza/components/elastigroup.py @@ -64,7 +64,7 @@ def component_elastigroup(definition, configuration, args, info, force, account_ extract_instance_profile(args, definition, configuration, elastigroup_config) # cfn definition access_token = _extract_spotinst_access_token(definition) - config_name = configuration["Name"] + "Config" + config_name = configuration["Name"] definition["Resources"][config_name] = { "Type": "Custom::elastigroup", "Properties": {
Spotinst elastigroup adds "Config" to resource name This invalidates many existing Senza templates. It should keep the original name
zalando-stups/senza
diff --git a/tests/test_elastigroup.py b/tests/test_elastigroup.py index 339bf4e..b98621e 100644 --- a/tests/test_elastigroup.py +++ b/tests/test_elastigroup.py @@ -44,7 +44,7 @@ def test_component_elastigroup_defaults(monkeypatch): result = component_elastigroup(definition, configuration, args, info, False, mock_account_info) - properties = result["Resources"]["eg1Config"]["Properties"] + properties = result["Resources"]["eg1"]["Properties"] assert properties["accountId"] == 'act-12345abcdef' assert properties["group"]["capacity"] == {"target": 1, "minimum": 1, "maximum": 1} instance_types = properties["group"]["compute"]["instanceTypes"]
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "mock", "responses" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
arrow==1.3.0 boto3==1.37.23 botocore==1.37.23 certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 clickclick==20.10.2 coverage==7.8.0 dnspython==2.7.0 exceptiongroup==1.2.2 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 jmespath==1.0.1 mock==5.2.0 packaging==24.2 pluggy==1.5.0 pystache==0.6.8 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 PyYAML==6.0.2 raven==6.10.0 requests==2.32.3 responses==0.25.7 s3transfer==0.11.4 six==1.17.0 stups-cli-support==1.1.22 stups-pierone==1.1.56 -e git+https://github.com/zalando-stups/senza.git@935f4111323f6b98ff136ae44a0d57825ac763c7#egg=stups_senza stups-tokens==1.1.19 stups-zign==1.2 tomli==2.2.1 types-python-dateutil==2.9.0.20241206 typing==3.7.4.3 urllib3==1.26.20 zipp==3.21.0
name: senza channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - arrow==1.3.0 - boto3==1.37.23 - botocore==1.37.23 - certifi==2025.1.31 - charset-normalizer==3.4.1 - click==8.1.8 - clickclick==20.10.2 - coverage==7.8.0 - dnspython==2.7.0 - exceptiongroup==1.2.2 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jmespath==1.0.1 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - pystache==0.6.8 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - pyyaml==6.0.2 - raven==6.10.0 - requests==2.32.3 - responses==0.25.7 - s3transfer==0.11.4 - six==1.17.0 - stups-cli-support==1.1.22 - stups-pierone==1.1.56 - stups-tokens==1.1.19 - stups-zign==1.2 - tomli==2.2.1 - types-python-dateutil==2.9.0.20241206 - typing==3.7.4.3 - urllib3==1.26.20 - zipp==3.21.0 prefix: /opt/conda/envs/senza
[ "tests/test_elastigroup.py::test_component_elastigroup_defaults" ]
[]
[ "tests/test_elastigroup.py::test_missing_access_token", "tests/test_elastigroup.py::test_spotinst_account_resolution", "tests/test_elastigroup.py::test_spotinst_account_resolution_failure", "tests/test_elastigroup.py::test_block_mappings", "tests/test_elastigroup.py::test_auto_scaling_rules", "tests/test_elastigroup.py::test_detailed_monitoring", "tests/test_elastigroup.py::test_prediction_strategy", "tests/test_elastigroup.py::test_autoscaling_capacity", "tests/test_elastigroup.py::test_product", "tests/test_elastigroup.py::test_standard_tags", "tests/test_elastigroup.py::test_extract_subnets", "tests/test_elastigroup.py::test_load_balancers", "tests/test_elastigroup.py::test_public_ips", "tests/test_elastigroup.py::test_extract_image_id", "tests/test_elastigroup.py::test_extract_security_group_ids", "tests/test_elastigroup.py::test_missing_instance_type", "tests/test_elastigroup.py::test_extract_instance_types", "tests/test_elastigroup.py::test_extract_instance_profile" ]
[]
Apache License 2.0
3,019
[ "senza/components/elastigroup.py" ]
[ "senza/components/elastigroup.py" ]
conan-io__conan-3477
82631b05304f07dddfbd9f2cb0721e10fcd43d17
2018-09-04 15:41:29
b02cce4e78d5982e00b66f80a683465b3c679033
diff --git a/conans/model/conan_file.py b/conans/model/conan_file.py index f025e5367..c77ae3263 100644 --- a/conans/model/conan_file.py +++ b/conans/model/conan_file.py @@ -22,13 +22,13 @@ def create_options(conanfile): default_options = getattr(conanfile, "default_options", None) if default_options: - if isinstance(default_options, (list, tuple)): + if isinstance(default_options, (list, tuple, dict)): default_values = OptionsValues(default_options) elif isinstance(default_options, str): default_values = OptionsValues.loads(default_options) else: - raise ConanException("Please define your default_options as list or " - "multiline string") + raise ConanException("Please define your default_options as list, " + "multiline string or dictionary") options.values = default_values return options except Exception as e: diff --git a/conans/model/options.py b/conans/model/options.py index da7cde5b0..51feaa4fb 100644 --- a/conans/model/options.py +++ b/conans/model/options.py @@ -1,8 +1,11 @@ -from conans.util.sha import sha1 -from conans.errors import ConanException + import yaml import six import fnmatch +from collections import Counter + +from conans.util.sha import sha1 +from conans.errors import ConanException _falsey_options = ["false", "none", "0", "off", ""] @@ -162,19 +165,20 @@ class OptionsValues(object): # convert tuple "Pkg:option=value", "..." to list of tuples(name, value) if isinstance(values, tuple): - new_values = [] - for v in values: - option, value = v.split("=", 1) - new_values.append((option.strip(), value.strip())) - values = new_values + values = [item.split("=", 1) for item in values] + + # convert dict {"Pkg:option": "value", "..": "..", ...} to list of tuples (name, value) + if isinstance(values, dict): + values = [(k, v) for k, v in values.items()] # handle list of tuples (name, value) for (k, v) in values: + k = k.strip() + v = v.strip() if isinstance(v, six.string_types) else v tokens = k.split(":") if len(tokens) == 2: package, option = tokens - package_values = self._reqs_options.setdefault(package.strip(), - PackageOptionValues()) + package_values = self._reqs_options.setdefault(package.strip(), PackageOptionValues()) package_values.add_option(option, v) else: self._package_values.add_option(k, v) @@ -264,14 +268,8 @@ class OptionsValues(object): other_option=3 OtherPack:opt3=12.1 """ - result = [] - for line in text.splitlines(): - line = line.strip() - if not line: - continue - name, value = line.split("=", 1) - result.append((name.strip(), value.strip())) - return OptionsValues(result) + options = tuple(line.strip() for line in text.splitlines() if line.strip()) + return OptionsValues(options) @property def sha(self):
Allow specifying default options as a dictionary To help us debug your issue please explain: - [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md). - [x] I've specified the Conan version, operating system version and any tool that can be relevant. - [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion. Hello, Looking at the structure of the conanfile.py, I always found it weird why the options were specified as a dictionary, but the default_options as a list of tuples. This is not an issue for packages that have a small amount of options, but it gets ugly for projects with a lot of options. I always end up doing the following: ```python _default_options = { "shared": True, "tools": True, ... } default_options = [(k, str(v)) for k, v in _default_options.items()] ``` In my opinion, it would be a great enhancement if the default_options would allow specifying values as a dictionary and would make the style of the conanfile.py more uniform. Software information: I'm currently using ```Conan version 1.2.0``` on ``` ProductName: Mac OS X ProductVersion: 10.13.3 BuildVersion: 17D102 ``` with ``` Xcode 9.2 Build version 9C40b ``` and ```Python 3.6.4``` installed from brew
conan-io/conan
diff --git a/conans/test/integration/options_test.py b/conans/test/integration/options_test.py index 5ea8211a6..05b59e375 100644 --- a/conans/test/integration/options_test.py +++ b/conans/test/integration/options_test.py @@ -65,6 +65,73 @@ zlib/0.1@lasote/testing conaninfo = load(os.path.join(client.current_folder, CONANINFO)) self.assertNotIn("zlib:shared=True", conaninfo) + def test_default_options(self): + client = TestClient() + conanfile = """ +from conans import ConanFile + +class MyConanFile(ConanFile): + name = "MyConanFile" + version = "1.0" + options = {"config": %s} + default_options = "config%s" + + def configure(self): + if self.options.config: + self.output.info("Boolean evaluation") + if self.options.config is None: + self.output.info("None evaluation") + if self.options.config == "None": + self.output.info("String evaluation") +""" + # Using "ANY" as possible options + client.save({"conanfile.py": conanfile % ("\"ANY\"", "")}) + error = client.run("create . danimtb/testing", ignore_error=True) + self.assertTrue(error) + self.assertIn("Error while initializing options.", client.out) + client.save({"conanfile.py": conanfile % ("\"ANY\"", "=None")}) + client.run("create . danimtb/testing") + self.assertNotIn("Boolean evaluation", client.out) + self.assertNotIn("None evaluation", client.out) + self.assertIn("String evaluation", client.out) + + # Using None as possible options + client.save({"conanfile.py": conanfile % ("[None]", "")}) + error = client.run("create . danimtb/testing", ignore_error=True) + self.assertTrue(error) + self.assertIn("Error while initializing options.", client.out) + client.save({"conanfile.py": conanfile % ("[None]", "=None")}) + client.run("create . danimtb/testing") + self.assertNotIn("Boolean evaluation", client.out) + self.assertNotIn("None evaluation", client.out) + self.assertIn("String evaluation", client.out) + + # Using "None" as possible options + client.save({"conanfile.py": conanfile % ("[\"None\"]", "")}) + error = client.run("create . danimtb/testing", ignore_error=True) + self.assertTrue(error) + self.assertIn("Error while initializing options.", client.out) + client.save({"conanfile.py": conanfile % ("[\"None\"]", "=None")}) + client.run("create . danimtb/testing") + self.assertNotIn("Boolean evaluation", client.out) + self.assertNotIn("None evaluation", client.out) + self.assertIn("String evaluation", client.out) + client.save({"conanfile.py": conanfile % ("[\"None\"]", "=\\\"None\\\"")}) + error = client.run("create . danimtb/testing", ignore_error=True) + self.assertTrue(error) + self.assertIn("'\"None\"' is not a valid 'options.config' value", client.out) + + # Using "ANY" as possible options and "otherstringvalue" as default + client.save({"conanfile.py": conanfile % ("[\"otherstringvalue\"]", "")}) + error = client.run("create . danimtb/testing", ignore_error=True) + self.assertTrue(error) + self.assertIn("Error while initializing options.", client.out) + client.save({"conanfile.py": conanfile % ("\"ANY\"", "=otherstringvalue")}) + client.run("create . danimtb/testing") + self.assertIn("Boolean evaluation", client.out) + self.assertNotIn("None evaluation", client.out) + self.assertNotIn("String evaluation", client.out) + def general_scope_options_test(self): # https://github.com/conan-io/conan/issues/2538 client = TestClient() diff --git a/conans/test/model/options_test.py b/conans/test/model/options_test.py index c672bf204..6b4a24e2d 100644 --- a/conans/test/model/options_test.py +++ b/conans/test/model/options_test.py @@ -1,3 +1,4 @@ +import six import unittest from conans.model.options import OptionsValues, PackageOptions, Options, PackageOptionValues,\ option_undefined_msg @@ -239,6 +240,39 @@ class OptionsValuesTest(unittest.TestCase): option_values = OptionsValues(self.sut.as_list()) self.assertEqual(option_values.dumps(), self.sut.dumps()) + def test_from_dict(self): + options_as_dict = dict([item.split('=') for item in self.sut.dumps().splitlines()]) + option_values = OptionsValues(options_as_dict) + self.assertEqual(option_values.dumps(), self.sut.dumps()) + + def test_consistency(self): + def _check_equal(hs1, hs2, hs3, hs4): + opt_values1 = OptionsValues(hs1) + opt_values2 = OptionsValues(hs2) + opt_values3 = OptionsValues(hs3) + opt_values4 = OptionsValues(hs4) + + self.assertEqual(opt_values1.dumps(), opt_values2.dumps()) + self.assertEqual(opt_values1.dumps(), opt_values3.dumps()) + self.assertEqual(opt_values1.dumps(), opt_values4.dumps()) + + # Check that all possible input options give the same result + _check_equal([('opt', 3)], [('opt', '3'), ], ('opt=3', ), {'opt': 3}) + _check_equal([('opt', True)], [('opt', 'True'), ], ('opt=True', ), {'opt': True}) + _check_equal([('opt', False)], [('opt', 'False'), ], ('opt=False', ), {'opt': False}) + _check_equal([('opt', None)], [('opt', 'None'), ], ('opt=None', ), {'opt': None}) + _check_equal([('opt', 0)], [('opt', '0'), ], ('opt=0', ), {'opt': 0}) + _check_equal([('opt', '')], [('opt', ''), ], ('opt=', ), {'opt': ''}) + + # Check for leading and trailing spaces + _check_equal([(' opt ', 3)], [(' opt ', '3'), ], (' opt =3', ), {' opt ': 3}) + _check_equal([('opt', ' value ')], [('opt', ' value '), ], ('opt= value ', ), + {'opt': ' value '}) + + # This is expected behaviour: + self.assertNotEqual(OptionsValues([('opt', ''), ]).dumps(), + OptionsValues(('opt=""', )).dumps()) + def test_dumps(self): self.assertEqual(self.sut.dumps(), "\n".join(["optimized=3", "static=True", @@ -265,3 +299,34 @@ class OptionsValuesTest(unittest.TestCase): "Poco:new_option=0"])) self.assertEqual(self.sut.sha, "2442d43f1d558621069a15ff5968535f818939b5") + + def test_loads_exceptions(self): + emsg = "not enough values to unpack" if six.PY3 else "need more than 1 value to unpack" + with self.assertRaisesRegexp(ValueError, emsg): + OptionsValues.loads("a=2\nconfig\nb=3") + + with self.assertRaisesRegexp(ValueError, emsg): + OptionsValues.loads("config\na=2\ncommit\nb=3") + + def test_exceptions_empty_value(self): + emsg = "not enough values to unpack" if six.PY3 else "need more than 1 value to unpack" + with self.assertRaisesRegexp(ValueError, emsg): + OptionsValues("a=2\nconfig\nb=3") + + with self.assertRaisesRegexp(ValueError, emsg): + OptionsValues(("a=2", "config")) + + with self.assertRaisesRegexp(ValueError, emsg): + OptionsValues([('a', 2), ('config', ), ]) + + def test_exceptions_repeated_value(self): + try: + OptionsValues.loads("a=2\na=12\nb=3").dumps() + OptionsValues(("a=2", "b=23", "a=12")) + OptionsValues([('a', 2), ('b', True), ('a', '12')]) + except Exception as e: + self.fail("Not expected exception: {}".format(e)) + + def test_package_with_spaces(self): + self.assertEqual(OptionsValues([('pck2:opt', 50), ]).dumps(), + OptionsValues([('pck2 :opt', 50), ]).dumps()) diff --git a/conans/test/model/transitive_reqs_test.py b/conans/test/model/transitive_reqs_test.py index 5ef9a24ea..fb867ce48 100644 --- a/conans/test/model/transitive_reqs_test.py +++ b/conans/test/model/transitive_reqs_test.py @@ -560,6 +560,17 @@ class HelloConan(ConanFile): """ _assert_conanfile(hello_content_tuple) + hello_content_dict = """ +from conans import ConanFile + +class HelloConan(ConanFile): + name = "Hello" + version = "1.2" + requires = "Say/0.1@user/testing" + default_options = {"Say:myoption" : 234, } # To test dict definition +""" + _assert_conanfile(hello_content_dict) + def test_transitive_two_levels_options(self): say_content = """ from conans import ConanFile
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 2 }
1.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "nose-cov", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "conans/requirements.txt", "conans/requirements_server.txt", "conans/requirements_dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==2.11.7 attrs==22.2.0 beautifulsoup4==4.12.3 bottle==0.12.25 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 colorama==0.3.9 -e git+https://github.com/conan-io/conan.git@82631b05304f07dddfbd9f2cb0721e10fcd43d17#egg=conan cov-core==1.15.0 coverage==4.2 deprecation==2.0.7 dill==0.3.4 distro==1.1.0 fasteners==0.19 future==0.16.0 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 isort==5.10.1 lazy-object-proxy==1.7.1 mccabe==0.7.0 mock==1.3.0 node-semver==0.2.0 nose==1.3.7 nose-cov==1.6 packaging==21.3 parameterized==0.8.1 patch==1.16 pbr==6.1.1 platformdirs==2.4.0 pluggy==1.0.0 pluginbase==0.7 py==1.11.0 Pygments==2.14.0 PyJWT==1.7.1 pylint==2.13.9 pyparsing==3.1.4 pytest==7.0.1 PyYAML==3.13 requests==2.27.1 six==1.17.0 soupsieve==2.3.2.post1 tomli==1.2.3 typed-ast==1.5.5 typing_extensions==4.1.1 urllib3==1.26.20 waitress==2.0.0 WebOb==1.8.9 WebTest==2.0.35 wrapt==1.16.0 zipp==3.6.0
name: conan channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==2.11.7 - attrs==22.2.0 - beautifulsoup4==4.12.3 - bottle==0.12.25 - charset-normalizer==2.0.12 - codecov==2.1.13 - colorama==0.3.9 - cov-core==1.15.0 - coverage==4.2 - deprecation==2.0.7 - dill==0.3.4 - distro==1.1.0 - fasteners==0.19 - future==0.16.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isort==5.10.1 - lazy-object-proxy==1.7.1 - mccabe==0.7.0 - mock==1.3.0 - node-semver==0.2.0 - nose==1.3.7 - nose-cov==1.6 - packaging==21.3 - parameterized==0.8.1 - patch==1.16 - pbr==6.1.1 - platformdirs==2.4.0 - pluggy==1.0.0 - pluginbase==0.7 - py==1.11.0 - pygments==2.14.0 - pyjwt==1.7.1 - pylint==2.13.9 - pyparsing==3.1.4 - pytest==7.0.1 - pyyaml==3.13 - requests==2.27.1 - six==1.17.0 - soupsieve==2.3.2.post1 - tomli==1.2.3 - typed-ast==1.5.5 - typing-extensions==4.1.1 - urllib3==1.26.20 - waitress==2.0.0 - webob==1.8.9 - webtest==2.0.35 - wrapt==1.16.0 - zipp==3.6.0 prefix: /opt/conda/envs/conan
[ "conans/test/model/options_test.py::OptionsValuesTest::test_consistency", "conans/test/model/options_test.py::OptionsValuesTest::test_from_dict", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_basic_transitive_option" ]
[ "conans/test/integration/options_test.py::OptionsTest::test_default_options" ]
[ "conans/test/model/options_test.py::OptionsTest::test_in", "conans/test/model/options_test.py::OptionsTest::test_int", "conans/test/model/options_test.py::OptionsValuesTest::test_dumps", "conans/test/model/options_test.py::OptionsValuesTest::test_exceptions_empty_value", "conans/test/model/options_test.py::OptionsValuesTest::test_exceptions_repeated_value", "conans/test/model/options_test.py::OptionsValuesTest::test_from_list", "conans/test/model/options_test.py::OptionsValuesTest::test_loads_exceptions", "conans/test/model/options_test.py::OptionsValuesTest::test_package_with_spaces", "conans/test/model/options_test.py::OptionsValuesTest::test_sha_constant", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_basic", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_basic_option", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_conditional", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_conditional_diamond", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_dep_requires_clear", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_conflict", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_conflict_error", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_conflict_options", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_conflict_options_solved", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_conflict_solved", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_no_conflict", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_no_conflict_options", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_propagate_indirect_options", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_remove_build_requires", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_remove_two_build_requires", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_simple_override", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive_diamond_private", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive_pattern_options", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive_private", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive_two_levels", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive_two_levels_options", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive_two_levels_wrong_options", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_version_requires2_change", "conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_version_requires_change", "conans/test/model/transitive_reqs_test.py::ConanRequirementsOptimizerTest::test_avoid_duplicate_expansion", "conans/test/model/transitive_reqs_test.py::ConanRequirementsOptimizerTest::test_expand_conflict_options", "conans/test/model/transitive_reqs_test.py::ConanRequirementsOptimizerTest::test_expand_options", "conans/test/model/transitive_reqs_test.py::ConanRequirementsOptimizerTest::test_expand_requirements", "conans/test/model/transitive_reqs_test.py::ConanRequirementsOptimizerTest::test_expand_requirements_direct", "conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_basic", "conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_config", "conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_config_remove", "conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_config_remove2", "conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_errors", "conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_new_configure", "conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_transitive_two_levels_options" ]
[]
MIT License
3,020
[ "conans/model/options.py", "conans/model/conan_file.py" ]
[ "conans/model/options.py", "conans/model/conan_file.py" ]
conan-io__conan-3479
82631b05304f07dddfbd9f2cb0721e10fcd43d17
2018-09-04 17:48:40
b02cce4e78d5982e00b66f80a683465b3c679033
diff --git a/conans/client/cmd/export.py b/conans/client/cmd/export.py index 020980d7c..f242fb0ad 100644 --- a/conans/client/cmd/export.py +++ b/conans/client/cmd/export.py @@ -60,7 +60,7 @@ def cmd_export(conanfile_path, conanfile, reference, keep_source, output, client keep_source) -def _capture_export_scm_data(conanfile, src_path, destination_folder, output, paths, conan_ref): +def _capture_export_scm_data(conanfile, conanfile_dir, destination_folder, output, paths, conan_ref): scm_src_file = paths.scm_folder(conan_ref) if os.path.exists(scm_src_file): @@ -71,7 +71,7 @@ def _capture_export_scm_data(conanfile, src_path, destination_folder, output, pa if not scm_data or not (scm_data.capture_origin or scm_data.capture_revision): return - scm = SCM(scm_data, src_path) + scm = SCM(scm_data, conanfile_dir) if scm_data.url == "auto": origin = scm.get_remote_url() @@ -87,6 +87,7 @@ def _capture_export_scm_data(conanfile, src_path, destination_folder, output, pa output.success("Revision deduced by 'auto': %s" % scm_data.revision) # Generate the scm_folder.txt file pointing to the src_path + src_path = scm.get_repo_root() save(scm_src_file, src_path.replace("\\", "/")) scm_data.replace_in_file(os.path.join(destination_folder, "conanfile.py")) diff --git a/conans/client/tools/scm.py b/conans/client/tools/scm.py index 0c1c84c2d..50b06b46d 100644 --- a/conans/client/tools/scm.py +++ b/conans/client/tools/scm.py @@ -13,8 +13,8 @@ from conans.util.files import decode_text, to_file_bytes class Git(object): - def __init__(self, folder=None, verify_ssl=True, username=None, password=None, force_english=True, - runner=None): + def __init__(self, folder=None, verify_ssl=True, username=None, password=None, + force_english=True, runner=None): self.folder = folder or os.getcwd() if not os.path.exists(self.folder): os.makedirs(self.folder) @@ -33,6 +33,9 @@ class Git(object): else: return self._runner(command) + def get_repo_root(self): + return self.run("rev-parse --show-toplevel") + def get_url_with_credentials(self, url): if not self._username or not self._password: return url diff --git a/conans/model/scm.py b/conans/model/scm.py index e7ff64aca..84210fc25 100644 --- a/conans/model/scm.py +++ b/conans/model/scm.py @@ -75,3 +75,6 @@ class SCM(object): def get_revision(self): return self.repo.get_revision() + + def get_repo_root(self): + return self.repo.get_repo_root()
[SCM] SCM feature + recipe in a repo subfolder leads to "same computer clone" bug Getting back to our Conan recipe, conan `1.7.1` solves the problem described #3322 thus (re)allowing a recipe living in subfolder of the repository to use the `scm` feature. Sadly, another bug, initially described in #3069 (under the name _same computer optimization problem_) is still present: when `conan create`ing the recipe, in this situation, the creation fails, complaining there are no files in the `build` folder (and indeed, there are none). It seems the analysis made in #3069 is still relevant: Basically, when calling `conan create`, an optimization described in the [tip box of this documentation section](https://docs.conan.io/en/latest/creating_packages/package_repo.html#capturing-the-remote-and-commit-from-git-scm-experimental) creates an `scm_folder.txt` file in the conan package. When this file is present, conan tries to clone the repo from the path in this file instead of the git url stored in the exported recipe. Sadly, this file contains the path to the folder containing the recipe, instead of the path to the root of the repository. So, when the recipe is at the root of the repository, the behaviour appears correct. But if the recipe is stored in a subfolder, then it seems nothing is cloned over to the build folder, failing the process.
conan-io/conan
diff --git a/conans/test/functional/scm_test.py b/conans/test/functional/scm_test.py index 24a7ea320..1e19a06f4 100644 --- a/conans/test/functional/scm_test.py +++ b/conans/test/functional/scm_test.py @@ -149,6 +149,21 @@ class ConanLib(ConanFile): self.assertTrue(os.path.exists(os.path.join(folder, "mysub", "myfile.txt"))) self.assertFalse(os.path.exists(os.path.join(folder, "mysub", "conanfile.py"))) + def test_auto_conanfile_no_root(self): + """ + Conanfile is not in the root of the repo: https://github.com/conan-io/conan/issues/3465 + """ + curdir = self.client.current_folder + conanfile = base.format(url="auto", revision="auto") + self.client.save({"conan/conanfile.py": conanfile, "myfile.txt": "content of my file"}) + self._commit_contents() + self.client.runner('git remote add origin https://myrepo.com.git', cwd=curdir) + + # Create the package + self.client.run("create conan/ user/channel") + sources_dir = self.client.client_cache.scm_folder(self.reference) + self.assertEquals(load(sources_dir), curdir.replace('\\', '/')) # Root of git is 'curdir' + def test_deleted_source_folder(self): path, commit = create_local_git_repo({"myfile": "contents"}, branch="my_release") curdir = self.client.current_folder.replace("\\", "/") diff --git a/conans/test/util/tools_test.py b/conans/test/util/tools_test.py index d60c3365c..d3db53ab2 100644 --- a/conans/test/util/tools_test.py +++ b/conans/test/util/tools_test.py @@ -1100,6 +1100,19 @@ ProgramFiles(x86)=C:\Program Files (x86) class GitToolTest(unittest.TestCase): + def test_repo_root(self): + root_path, _ = create_local_git_repo({"myfile": "anything"}) + + # Initialized in the root folder + git = Git(root_path) + self.assertEqual(root_path, git.get_repo_root()) + + # Initialized elsewhere + subfolder = os.path.join(root_path, 'subfolder') + os.makedirs(subfolder) + git = Git(subfolder) + self.assertEqual(root_path, git.get_repo_root()) + def test_clone_git(self): path, _ = create_local_git_repo({"myfile": "contents"}) tmp = temp_folder()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 3 }
1.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc cmake" ], "python": "3.6", "reqs_path": [ "conans/requirements.txt", "conans/requirements_server.txt", "conans/requirements_dev.txt", "conans/requirements_osx.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
asn1crypto==1.5.1 astroid==2.11.7 attrs==22.2.0 beautifulsoup4==4.12.3 bottle==0.12.25 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 codecov==2.1.13 colorama==0.3.9 -e git+https://github.com/conan-io/conan.git@82631b05304f07dddfbd9f2cb0721e10fcd43d17#egg=conan coverage==4.2 cryptography==2.3.1 deprecation==2.0.7 dill==0.3.4 distro==1.1.0 fasteners==0.19 future==0.16.0 idna==2.6 importlib-metadata==4.8.3 iniconfig==1.1.1 isort==5.10.1 lazy-object-proxy==1.7.1 mccabe==0.7.0 mock==1.3.0 ndg-httpsclient==0.4.4 node-semver==0.2.0 nose==1.3.7 packaging==21.3 parameterized==0.8.1 patch==1.16 pbr==6.1.1 platformdirs==2.4.0 pluggy==1.0.0 pluginbase==0.7 py==1.11.0 pyasn==1.5.0b7 pyasn1==0.5.1 pycparser==2.21 Pygments==2.14.0 PyJWT==1.7.1 pylint==2.13.9 pyOpenSSL==17.5.0 pyparsing==3.1.4 pytest==7.0.1 PyYAML==3.13 requests==2.27.1 six==1.17.0 soupsieve==2.3.2.post1 tomli==1.2.3 typed-ast==1.5.5 typing_extensions==4.1.1 urllib3==1.26.20 waitress==2.0.0 WebOb==1.8.9 WebTest==2.0.35 wrapt==1.16.0 zipp==3.6.0
name: conan channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - asn1crypto==1.5.1 - astroid==2.11.7 - attrs==22.2.0 - beautifulsoup4==4.12.3 - bottle==0.12.25 - cffi==1.15.1 - charset-normalizer==2.0.12 - codecov==2.1.13 - colorama==0.3.9 - coverage==4.2 - cryptography==2.3.1 - deprecation==2.0.7 - dill==0.3.4 - distro==1.1.0 - fasteners==0.19 - future==0.16.0 - idna==2.6 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isort==5.10.1 - lazy-object-proxy==1.7.1 - mccabe==0.7.0 - mock==1.3.0 - ndg-httpsclient==0.4.4 - node-semver==0.2.0 - nose==1.3.7 - packaging==21.3 - parameterized==0.8.1 - patch==1.16 - pbr==6.1.1 - platformdirs==2.4.0 - pluggy==1.0.0 - pluginbase==0.7 - py==1.11.0 - pyasn==1.5.0b7 - pyasn1==0.5.1 - pycparser==2.21 - pygments==2.14.0 - pyjwt==1.7.1 - pylint==2.13.9 - pyopenssl==17.5.0 - pyparsing==3.1.4 - pytest==7.0.1 - pyyaml==3.13 - requests==2.27.1 - six==1.17.0 - soupsieve==2.3.2.post1 - tomli==1.2.3 - typed-ast==1.5.5 - typing-extensions==4.1.1 - urllib3==1.26.20 - waitress==2.0.0 - webob==1.8.9 - webtest==2.0.35 - wrapt==1.16.0 - zipp==3.6.0 prefix: /opt/conda/envs/conan
[ "conans/test/util/tools_test.py::GitToolTest::test_repo_root" ]
[ "conans/test/functional/scm_test.py::SCMTest::test_auto_conanfile_no_root", "conans/test/functional/scm_test.py::SCMTest::test_auto_filesystem_remote_git", "conans/test/functional/scm_test.py::SCMTest::test_auto_git", "conans/test/functional/scm_test.py::SCMTest::test_auto_subfolder", "conans/test/functional/scm_test.py::SCMTest::test_deleted_source_folder", "conans/test/functional/scm_test.py::SCMTest::test_excluded_repo_files", "conans/test/functional/scm_test.py::SCMTest::test_install_checked_out", "conans/test/functional/scm_test.py::SCMTest::test_local_source", "conans/test/functional/scm_test.py::SCMTest::test_local_source_subfolder", "conans/test/functional/scm_test.py::SCMTest::test_repeat_clone_changing_subfolder", "conans/test/functional/scm_test.py::SCMTest::test_source_method_export_sources_and_scm_mixed", "conans/test/functional/scm_test.py::SCMTest::test_source_removed_in_local_cache", "conans/test/functional/scm_test.py::SCMTest::test_submodule", "conans/test/util/tools_test.py::ToolsTest::test_get_env_in_conanfile", "conans/test/util/tools_test.py::ToolsTest::test_global_tools_overrided", "conans/test/util/tools_test.py::GitToolTest::test_clone_submodule_git" ]
[ "conans/test/functional/scm_test.py::SCMTest::test_scm_other_type_ignored", "conans/test/util/tools_test.py::ReplaceInFileTest::test_replace_in_file", "conans/test/util/tools_test.py::ToolsTest::test_environment_nested", "conans/test/util/tools_test.py::GitToolTest::test_clone_existing_folder_git", "conans/test/util/tools_test.py::GitToolTest::test_clone_existing_folder_without_branch", "conans/test/util/tools_test.py::GitToolTest::test_clone_git", "conans/test/util/tools_test.py::GitToolTest::test_credentials", "conans/test/util/tools_test.py::GitToolTest::test_verify_ssl" ]
[]
MIT License
3,021
[ "conans/client/tools/scm.py", "conans/client/cmd/export.py", "conans/model/scm.py" ]
[ "conans/client/tools/scm.py", "conans/client/cmd/export.py", "conans/model/scm.py" ]
CORE-GATECH-GROUP__serpent-tools-240
03997bdce0a5adb75cf5796278ea61b799f7b6dc
2018-09-04 19:06:39
03997bdce0a5adb75cf5796278ea61b799f7b6dc
diff --git a/docs/changelog.rst b/docs/changelog.rst index 8b52e58..0c9561c 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -1,4 +1,8 @@ .. |homogUniv| replace:: :py:class:`~serpentTools.objects.containers.HomogUniv` +.. |resultReader| replace:: :class:`~serpentTools.parsers.results.ResultsReader` +.. |detector| replace:: :class:`~serpentTools.objects.detectors.Detector` +.. |detectorReader| replace:: :class:`~serpentTools.parsers.detector.DetectorReader` +.. |depletionReader| replace:: :class:`~serpentTools.parsers.depletion.DepletionReader` .. _changelog: @@ -6,9 +10,24 @@ Changelog ========= +.. _v0.6.0: + +0.6.0 +===== + +* :pull:`174` - Added parent object ``BaseObject`` with basic comparison + method from which all objects inherit. Comparison method contains + upper and lower bounds for values w/o uncertainties, :pull:`191` +* :pull:`196` - Add comparison methods for |resultReader| and + |homogUniv| objects +* :pull:`228` - Add comparison methods for |detectorReader| and + |detector| objects +* :pull:`236` - Add comparison methods for |depletionReader| and + :class:`~serpentTools.objects.materials.DepletedMaterial` objects + .. _v0.5.4: -:release-tag:`0.5.3` +:release-tag:`0.5.4` ==================== * :pull:`239` - Update python dependencies to continue use of python 2 @@ -24,7 +43,7 @@ Changelog files with unique random seeds - :mod:`serpentTools.seed` * :pull:`229` - :meth:`serpentTools.parsers.sensitivity.SensitivityReader.plot` now respects the option to not set x nor y labels. -* :pull:`231` - :class:`~serpentTools.parsers.results.ResultsReader` objects +* :pull:`231` - |resultReader| objects can now read files that do not contain group constant data. The setting :ref:`results-expectGcu` should be used to inform the reader that no group constant data is anticipated @@ -41,10 +60,10 @@ Changelog :func:`serpentTools.plot.cartMeshPlot` * :pull:`201` - Support for plotting hexagonal meshes with :meth:`serpentTools.objects.detectors.HexagonalDetector.hexPlot` -* :pull:`204` - Access :class:`serpentTools.objects.detectors.Detector` - objects directly from :class:`serpentTools.parsers.detector.DetectorReader` +* :pull:`204` - Access |detector| + objects directly from |detectorReader| with ``reader[detName]`` -* :pull:`205` - Access materials from :class:`serpentTools.readers.depletion.DepletionReader` +* :pull:`205` - Access materials from |depletionReader| and :class:`serpentTools.samplers.depletion.DepletionSampler` using key-like indexing, e.g. ``reader[matName] == reader.material[matName]`` * :pull:`213` - Better default x-axis labels for simple detector plots diff --git a/docs/develop/comparisons.rst b/docs/develop/comparisons.rst new file mode 100644 index 0000000..8ac3e90 --- /dev/null +++ b/docs/develop/comparisons.rst @@ -0,0 +1,132 @@ +.. |baseObj| replace:: ``BaseObject`` + +.. |error| replace:: :func:`~serpentTools.messages.error` + +.. |warn| replace:: :func:`~serpentTools.messages.warning` + +.. |info| replace:: :func:`~serpentTools.messages.info` + +.. |debug| replace:: :func:`~serpentTools.messages.debug` + +.. _dev-comparisons: + +================== +Comparison Methods +================== + +We are currently developing methods for our readers and containers to be able +for comparing between like objects. This could be used to compare the effect +of changing fuel enrichment on pin powers or criticality, or used to compare +the effect of different ``SERPENT`` settings. The ``BaseObject`` that +every object **should** inherit from contains the bulk of the input checking, +so each reader and object needs to implement a private ``_compare`` method with +the following structure:: + + def _compare(self, other, lower, upper, sigma): + return <boolean output of comparison> + +.. note:: + + While these methods will iterate over many quantities, and some quantities + may fail early on in the test, the comparison method should continue + until all quantities have been tested. + +The value ``sigma`` should be used to compare quantities with uncertainties +by constructing intervals bounded by :math:`x\pm S\sigma`, where +``sigma``:math:`=S`. Quantities that do not have overlapping confidence +windows will be considered too different and should result in a ``False`` +value being returned from the method. + +The ``lower`` and ``upper`` arguments should be used to compare values +that do not have uncertainties. Both will be ``float`` values, with +``lower`` less than or equal to ``upper``. This functionality is +implemented with the :func:`serpentTools.utils.directCompare` function, +while the result is reported with :func:`serpentTools.utils.logDirectCompare`. + +.. _dev-comp-message: + +Use of messaging module +======================= + +Below is a non-definitive nor comprehensive list of possible comparison cases +and the corresponding message that should be printed. Using a range of message +types allows the user to be able to easily focus on things that are really bad by +using our :ref:`verbosity` setting. + +* Two objects contain different data sets, e.g. different dictionary values + - |warn| displaying the missing items, and then apply test to items in both objects +* Two items are identically zero, or arrays of zeros - |debug| +* Two items are outside of the ``sigma`` confidence intervals - |error| +* Two items without uncertainties have relative difference + + * less than ``lower`` - |debug| + * greater than or equal to ``upper`` - |error| + * otherwise - |warn| + +* Two items are identical - |debug| +* Two arrays are not of similar size - |error| + + +.. _dev-comp-utils: + +High-level Logging and Comparison Utilities +=========================================== + +The :mod:`~serpentTools.utils` module contains a collection of functions +that can be used to compare quantities and automatically log results. +When possible, these routines should be favored over hand-writing +comparison routines. If the situation calls for custom comparison +functions, utilize or extend logging routines from :ref:`dev-comp-log` +appropriately. + +.. autofunction:: serpentTools.utils.compare.compareDictOfArrays + +.. autofunction:: serpentTools.utils.compare.getCommonKeys + +.. autofunction:: serpentTools.utils.compare.directCompare + +.. autofunction:: serpentTools.utils.compare.logDirectCompare + +.. autofunction:: serpentTools.utils.compare.splitdictByKeys + +.. autofunction:: serpentTools.utils.compare.getKeyMatchingShapes + +.. autofunction:: serpentTools.utils.compare.getOverlaps + +.. autofunction:: serpentTools.utils.compare.getLogOverlaps + +.. autofunction:: serpentTools.utils.docstrings.compareDocDecorator + +.. _dev-comp-log: + +Low-level Logging Utilities +=========================== + +The :mod:`~serpentTools.messages` module contains a collection of functions +that can be used to notify the user about the results of a comparison +routine. + +.. autofunction:: serpentTools.messages.logIdentical + +.. autofunction:: serpentTools.messages.logNotIdentical + +.. autofunction:: serpentTools.messages.logAcceptableLow + +.. autofunction:: serpentTools.messages.logAcceptableHigh + +.. autofunction:: serpentTools.messages.logOutsideTols + +.. autofunction:: serpentTools.messages.logIdenticalWithUncs + +.. autofunction:: serpentTools.messages.logInsideConfInt + +.. autofunction:: serpentTools.messages.logOutsideConfInt + +.. autofunction:: serpentTools.messages.logDifferentTypes + +.. autofunction:: serpentTools.messages.logMissingKeys + +.. autofunction:: serpentTools.messages.logBadTypes + +.. autofunction:: serpentTools.messages.logBadShapes + diff --git a/docs/develop/index.rst b/docs/develop/index.rst index 8845e23..e616b27 100644 --- a/docs/develop/index.rst +++ b/docs/develop/index.rst @@ -24,3 +24,4 @@ without any loss of comprehension. checklist.rst git.rst serpentVersions.rst + comparisons.rst diff --git a/docs/develop/utils.rst b/docs/develop/utils.rst index 6d251ab..726aa42 100644 --- a/docs/develop/utils.rst +++ b/docs/develop/utils.rst @@ -1,4 +1,4 @@ -.. _dev-utils: +.. _api-utils: ========= Utilities @@ -6,7 +6,6 @@ Utilities .. automodule:: serpentTools.utils - :members: convertVariableName, linkToWiki, str2vec, splitValsUnc .. _dev-testUtils: diff --git a/serpentTools/messages.py b/serpentTools/messages.py index 8ef4f26..119c7ce 100644 --- a/serpentTools/messages.py +++ b/serpentTools/messages.py @@ -12,6 +12,9 @@ import warnings import logging from logging import Handler from logging.config import dictConfig +from collections import Callable + +from numpy import ndarray class SerpentToolsException(Exception): @@ -164,6 +167,203 @@ def _updateFilterAlert(msg, category): warnings.warn(msg, category=category, stacklevel=3) warnings.simplefilter('default', category) +# ========================================================= +# Functions for notifying the user about comparison results +# ========================================================= + + +def _prefaceNotice(obj, leader): + msg = '\n\t{} '.format(leader) + ''.join(str(obj).split('\n')) + return msg + + +def _notify(func, quantity, header, obj0, obj1): + msg = header.format(quantity) + msg += _prefaceNotice(obj0, '>') + if obj1 is not None: + msg += _prefaceNotice(obj1, '<') + func(msg) + + +def logIdentical(obj0, obj1, quantity): + """Two objects are identical.""" + _notify(debug, quantity, 'Values for {} are identical', obj0, None) + + +def logNotIdentical(obj0, obj1, quantity): + """Values should be identical but aren't.""" + _notify(error, quantity, "Values for {} are not identical", + obj0, obj1) + + +def logAcceptableLow(obj0, obj1, quantity): + """Two values differ, but inside nominal and acceptable ranges.""" + _notify(info, quantity, "Values for {} are not identical, but close", + obj0, obj1) + + +def logAcceptableHigh(obj0, obj1, quantity): + """Two values differ, enough to merit a warning but not an error.""" + _notify(warning, quantity, + "Values for {} are different, but within tolerances", obj0, obj1) + + +def logOutsideTols(obj0, obj1, quantity): + """Two values differ outside acceptable tolerances.""" + _notify(error, quantity, + "Values for {} are outside acceptable tolerances.", obj0, obj1) + + +def _notifyWithUncs(func, quantity, msg, value0, unc0, value1, unc1): + logMsg = msg.format(quantity) + logMsg += _prefaceNotice(value0, '>V') + logMsg += _prefaceNotice(unc0, '>U') + if value1 is not None: + logMsg += _prefaceNotice(value1, '<V') + logMsg += _prefaceNotice(unc0, '<U') + func(logMsg) + + +def logIdenticalWithUncs(value, unc0, unc1, quantity): + """Notify that two values have identical expected values.""" + _notifyWithUncs(debug, quantity, + 'Expected values for {} are identical', + value, unc0, None, unc1) + + +def logInsideConfInt(value0, unc0, value1, unc1, quantity): + """Two values are within acceptable statistical limits.""" + _notifyWithUncs(debug, quantity, 'Confidence intervals for {} overlap', + value0, unc0, value1, unc1) + + +def logOutsideConfInt(value0, unc0, value1, unc1, quantity): + """Two values are outside acceptable statistical limits.""" + _notifyWithUncs(error, quantity, + "Values for {} are outside acceptable statistical limits", + value0, unc0, value1, unc1) + + +def logDifferentTypes(type0, type1, quantity): + """Two values are of different types.""" + _notify(error, quantity, "Types for {} are different.", + type0, type1) + + +def logBadShapes(obj0, obj1, quantity): + """ + Log an error message that two arrays are of different shapes. + + Parameters + ---------- + obj0: :class:`numpy.ndarray` + obj1: :class:`numpy.ndarray` + Arrays that have been compared and found to have different shapes + quantity: str + Descriptor of the quantity being compared, e.g. what these objects + represent + """ + shapes = [obj.shape if isinstance(obj, ndarray) + else len(obj) for obj in (obj0, obj1)] + _notify(error, quantity, "Shapes for {} are different.", + shapes[0], shapes[1]) + + +MISSING_MSG_HEADER = "{} from {} and {} contain different items" +MISSING_MSG_SUBJ = "\n\tItems present in {} but not in {}:\n\t\t{}" + + +def _checkHerald(herald): + if not isinstance(herald, Callable): + critical("Heralding object {} is not callable. Falling back to error." + .format(herald)) + return error + return herald + + +def logMissingKeys(quantity, desc0, desc1, in0, in1, herald=error): + """ + Log a warning message that two objects contain different items + + Parameters + ---------- + quantity: str + Indicator as to what is being compared, e.g. ``'metadata'`` + desc0: str + desc1: str + Descriptions of the two originators + in0: set or iterable + in1: set or iterable + Items that are unique to originators ``0`` and ``1``, respectively + herald: callable + Callable function that accepts a single string. This will be called + with the error message. If not given, defaults to :func:`error` + """ + if not any(in0) and not any(in1): + return + herald = _checkHerald(herald) + msg = MISSING_MSG_HEADER.format(quantity, desc0, desc1) + if any(in0): + msg += MISSING_MSG_SUBJ.format(desc0, desc1, + ', '.join([str(xx) for xx in in0])) + if any(in1): + msg += MISSING_MSG_SUBJ.format(desc1, desc0, + ', '.join([str(xx) for xx in in1])) + herald(msg) + + +BAD_TYPES_HEADER = "Items from {d0} and {d1} {q} have different types" +BAD_SHAPES_HEADER = "Items from {d0} and {d1} {q} have different shapes" +BAD_OBJ_SUBJ = "\n\t{key}: {t0} - {t1}" + + +def logBadTypes(quantity, desc0, desc1, types): + """ + Log an error message for containers with mismatched types + + Parameters + ---------- + quantity: str + Indicator as to what is being compared, e.g. ``'metadata'`` + desc0: str + desc1: str + Descriptions of the two originators + types: dict + Dictionary where the keys represent the locations of + items with mismatched types. Corresponding keys should + be a list or tuple of the types for objects from + ``desc0`` and ``desc1`` stored under ``key`` + """ + msg = BAD_TYPES_HEADER.format(q=quantity, d0=desc0, d1=desc1) + for key in sorted(list(types.keys())): + t0, t1 = types[key] + msg += BAD_OBJ_SUBJ.format(key=key, t0=t0, t1=t1) + error(msg) + + +def logMapOfBadShapes(quantity, desc0, desc1, shapes): + """ + Log an error message for containers with mismatched shapes + + Parameters + ---------- + quantity: str + Indicator as to what is being compared, e.g. ``'metadata'`` + desc0: str + desc1: str + Descriptions of the two originators + shapes: dict + Dictionary where the keys represent the locations of + items with mismatched shapes. Corresponding keys should + be a list or tuple of the shapes for objects from + ``desc0`` and ``desc1`` stored under ``key`` + """ + msg = BAD_SHAPES_HEADER.format(q=quantity, d0=desc0, d1=desc1) + for key in sorted(list(shapes.keys())): + t0, t1 = shapes[key] + msg += BAD_OBJ_SUBJ.format(key=key, t0=t0, t1=t1) + error(msg) + class DictHandler(Handler): """ diff --git a/serpentTools/objects/base.py b/serpentTools/objects/base.py index 8c961c4..eadb0fe 100644 --- a/serpentTools/objects/base.py +++ b/serpentTools/objects/base.py @@ -8,14 +8,138 @@ from six import add_metaclass from numpy import arange, hstack, log, divide from matplotlib.pyplot import axes -from serpentTools.messages import debug, warning, SerpentToolsException +from serpentTools.messages import ( + debug, warning, SerpentToolsException, info, + error, + BAD_OBJ_SUBJ, + +) from serpentTools.plot import plot, cartMeshPlot from serpentTools.utils import ( magicPlotDocDecorator, formatPlot, DETECTOR_PLOT_LABELS, + compareDocDecorator, DEF_COMP_LOWER, DEF_COMP_SIGMA, + DEF_COMP_UPPER, compareDictOfArrays, +) +from serpentTools.utils.compare import ( + getLogOverlaps, finalCompareMsg, ) +from serpentTools.settings import rc + + +class BaseObject(object): + """Most basic class shared by all other classes.""" + + @compareDocDecorator + def compare(self, other, lower=DEF_COMP_LOWER, upper=DEF_COMP_UPPER, + sigma=DEF_COMP_SIGMA, verbosity=None): + """ + Compare the results of this reader to another. + + For values without uncertainties, the upper and lower + arguments control what passes and what messages get + raised. If a quantity in ``other`` is less than + ``lower`` percent different that the same quantity + on this object, consider this allowable and make + no messages. + Quantities that are greater than ``upper`` percent + different will have a error messages printed and + the comparison will return ``False``, but continue. + Quantities with difference between these ranges will + have warning messages printed. + Parameters + ---------- + other: + Other reader instance against which to compare. + Must be a similar class as this one. + {compLimits} + {sigma} + verbosity: None or str + If given, update the verbosity just for this comparison. -class NamedObject(object): + Returns + ------- + bool: + ``True`` if the objects are in agreement with + each other according to the parameters specified + + Raises + ------ + {compTypeErr} + ValueError + If upper > lower, + If sigma, lower, or upper are negative + """ + upper = float(upper) + lower = float(lower) + sigma = int(sigma) + if upper < lower: + raise ValueError("Upper limit must be greater than lower. " + "{} is not greater than {}" + .format(upper, lower)) + for item, key in zip((upper, lower, sigma), + ('upper', 'lower', 'sigma')): + if item < 0: + raise ValueError("{} must be non-negative, is {}" + .format(key, item)) + + self._checkCompareObj(other) + + previousVerb = None + if verbosity is not None: + previousVerb = rc['verbosity'] + rc['verbosity'] = verbosity + + self._compareLogPreMsg(other, lower, upper, sigma) + + areSimilar = self._compare(other, lower, upper, sigma) + + if areSimilar: + herald = info + else: + herald = warning + herald(finalCompareMsg(self, other, areSimilar)) + if previousVerb is not None: + rc['verbosity'] = previousVerb + + return areSimilar + + def _compare(self, other, lower, upper, sigma): + """Actual comparison method for similar classes.""" + raise NotImplementedError + + def _checkCompareObj(self, other): + """Verify that the two objects are same class or subclasses.""" + if not (isinstance(other, self.__class__) or + issubclass(other.__class__, self.__class__)): + oName = other.__class__.__name__ + name = self.__class__.__name__ + raise TypeError( + "Cannot compare against {} - not instance nor subclass " + "of {}".format(oName, name)) + + def _compareLogPreMsg(self, other, lower=None, upper=None, sigma=None, + quantity=None): + """Log an INFO message about this specific comparison.""" + leader = "Comparing {}> against < with the following tolerances:" + tols = [leader.format((quantity + ' from ') if quantity else ''), ] + for leader, obj in zip(('>', '<'), (self, other)): + tols.append("{} {}".format(leader, obj)) + for title, val in zip(('Lower', 'Upper'), (lower, upper)): + if val is None: + continue + tols.append("{} tolerance: {:5.3F} [%]".format(title, val)) + if sigma is not None: + sigmaStr = ("Confidence interval for statistical values: {:d} " + "sigma or {} %") + sigmaDict = {1: 68, 2: 95} + tols.append( + sigmaStr.format(sigma, sigmaDict.get(sigma, '>= 99.7') + if sigma else 0)) + info('\n\t'.join(tols)) + + +class NamedObject(BaseObject): """Class for named objects like materials and detectors.""" def __init__(self, name): @@ -413,3 +537,34 @@ class DetectorBase(NamedObject): if qty in self.indexes: return self.indexes[qty], xlabel return fallbackX, xlabel + + def _compare(self, other, lower, upper, sigma): + myShape = self.tallies.shape + otherShape = other.tallies.shape + if myShape != otherShape: + error("Detector tallies do not have identical shapes" + + BAD_OBJ_SUBJ.format('tallies', myShape, otherShape)) + return False + similar = compareDictOfArrays(self.grids, other.grids, 'grids', + lower=lower, upper=upper) + + similar &= getLogOverlaps('tallies', self.tallies, other.tallies, + self.errors, other.errors, sigma, + relative=True) + hasScores = [obj.scores is not None for obj in (self, other)] + + similar &= hasScores[0] == hasScores[1] + + if not any(hasScores): + return similar + if all(hasScores): + similar &= getLogOverlaps('scores', self.scores, other.scores, + self.errors, other.errors, sigma, + relative=True) + return similar + firstK, secondK = "first", "second" + if hasScores[1]: + firstK, secondK = secondK, firstK + error("{} detector has scores while {} does not" + .format(firstK.capitalize(), secondK)) + return similar diff --git a/serpentTools/objects/containers.py b/serpentTools/objects/containers.py index 5b57bcd..e1c3fd2 100644 --- a/serpentTools/objects/containers.py +++ b/serpentTools/objects/containers.py @@ -15,10 +15,28 @@ from matplotlib import pyplot from numpy import array, arange, hstack, ndarray, zeros_like from serpentTools.settings import rc -from serpentTools.utils.plot import magicPlotDocDecorator, formatPlot -from serpentTools.objects.base import NamedObject -from serpentTools.utils import convertVariableName -from serpentTools.messages import warning, SerpentToolsException, debug, info +from serpentTools.objects.base import NamedObject, BaseObject +from serpentTools.utils import ( + convertVariableName, + getKeyMatchingShapes, + logDirectCompare, + getLogOverlaps, + compareDocReplacer, + compareDocDecorator, + magicPlotDocDecorator, + formatPlot, +) + +from serpentTools.objects.base import (DEF_COMP_LOWER, + DEF_COMP_UPPER, DEF_COMP_SIGMA) +from serpentTools.messages import ( + warning, + SerpentToolsException, + debug, + info, + critical, + error, +) SCATTER_MATS = set() SCATTER_ORDERS = 8 @@ -439,8 +457,132 @@ class HomogUniv(NamedObject): hasData = __bool__ + def _compare(self, other, lower, upper, sigma): + similar = self.compareAttributes(other, lower, upper, sigma) + similar &= self.compareInfData(other, sigma) + similar &= self.compareB1Data(other, sigma) + similar &= self.compareGCData(other, sigma) -class BranchContainer(object): + return similar + + @compareDocDecorator + def compareAttributes(self, other, lower=DEF_COMP_LOWER, + upper=DEF_COMP_UPPER, sigma=DEF_COMP_SIGMA): + """ + Compare attributes like group structure and burnup. Return the result + + Parameters + ---------- + other: :class:`HomogUniv` + Universe against which to compare + {compLimits} + {sigma} + + Returns + ------- + bools: + ``True`` if the attributes agree within specifications + + Raises + ------ + {compTypeErr} + """ + + self._checkCompareObj(other) + + myMeta = {} + otherMeta = {} + + for key in {'bu', 'step', 'groups', 'microGroups', 'reshaped'}: + for meta, obj in zip((myMeta, otherMeta), (self, other)): + try: + meta[key] = getattr(obj, key) + except AttributeError: + meta[key] = None + + matchingKeys = getKeyMatchingShapes(myMeta, otherMeta, 'metadata') + similar = len(matchingKeys) == len(myMeta) + + for key in sorted(matchingKeys): + similar &= logDirectCompare(myMeta[key], otherMeta[key], + lower, upper, key) + + return similar + + __docCompare = compareDocReplacer(""" + Return ``True`` if contents of ``{qty}Exp`` and ``{qty}Unc`` agree + + Parameters + ---------- + other: :class:`HomogUniv` + Object from which to grab group constant dictionaries + {sigma} + + Returns + bool + If the dictionaries contain identical values with uncertainties, + and if those values have overlapping confidence intervals + Raises + ------ + {compTypeErr} + """) + + def _helpCompareGCDict(self, other, attrBase, sigma): + """ + Method that actually compare group constant dictionaries. + + ``attrBase`` is used to find dictionaries by appending + ``'Exp'`` and ``'Unc'`` to ``attrBase`` + """ + self._checkCompareObj(other) + + valName = (attrBase + 'Exp') if attrBase != 'gc' else 'gc' + uncName = attrBase + 'Unc' + try: + myVals = getattr(self, valName) + myUncs = getattr(self, uncName) + otherVals = getattr(other, valName) + otherUncs = getattr(other, uncName) + except Exception as ee: + critical("The following error was raised extracting {} and " + "{} from universes {} and {}:\n\t{}" + .format(valName, uncName, self, other, ee)) + return False + + keys = getKeyMatchingShapes(myVals, otherVals, valName) + similar = len(keys) == len(myVals) == len(otherVals) + + for key in keys: + if key not in myUncs or key not in otherUncs: + loc = self if key in otherUncs else other + error("Uncertainty data for {} missing from {}" + .format(key, loc)) + similar = False + continue + myVal = myVals[key] + myUnc = myUncs[key] + otherVal = otherVals[key] + otherUnc = otherUncs[key] + + similar &= getLogOverlaps(key, myVal, otherVal, myUnc, otherUnc, + sigma, relative=True) + return similar + + def compareInfData(self, other, sigma): + return self._helpCompareGCDict(other, 'inf', sigma) + + def compareB1Data(self, other, sigma): + return self._helpCompareGCDict(other, 'b1', sigma) + + def compareGCData(self, other, sigma): + return self._helpCompareGCDict(other, 'gc', sigma) + + compareInfData.__doc__ = __docCompare.format(qty='inf') + compareB1Data.__doc__ = __docCompare.format(qty='b1') + compareGCData.__doc__ = __docCompare.format(qty='gc') + + +class BranchContainer(BaseObject): """ Class that stores data for a single branch. @@ -524,18 +666,6 @@ class BranchContainer(object): univID: int or str Identifier for this universe burnup: float or int - Value of burnup [MWd/kgU]. A negative value here indicates - the value is really in units of days. - burnIndex: int - Point in the depletion schedule - burnDays: int or float - Point in time - - Returns - ------- - serpentTools.objects.containers.HomogUniv - Empty new universe - """ if self.__hasDays is None and burnup: self.__hasDays = burnup < 0 diff --git a/serpentTools/objects/materials.py b/serpentTools/objects/materials.py index 294da7e..07d18a9 100644 --- a/serpentTools/objects/materials.py +++ b/serpentTools/objects/materials.py @@ -8,6 +8,10 @@ from serpentTools.utils import ( magicPlotDocDecorator, formatPlot, DEPLETION_PLOT_LABELS, convertVariableName, ) +from serpentTools.utils.compare import ( + logDirectCompare, + compareDictOfArrays, +) from serpentTools.objects.base import NamedObject @@ -235,6 +239,23 @@ class DepletedMaterialBase(NamedObject): return labels + def _compare(self, other, lower, upper, sigma): + # look for identical isotope names and + similar = logDirectCompare(self.names, other.names, 0, 0, + 'isotope names') + similar &= logDirectCompare(self.zai, other.zai, 0, 0, 'isotope ZAI') + + # test data dictionary + # if uncertianties exist, use those + myUncs = self.uncertainties if hasattr(self, 'uncertainties') else {} + otherUncs = (other.uncertainties if hasattr(other, 'uncertainties') + else {}) + similar &= compareDictOfArrays( + self.data, other.data, 'data', lower=lower, upper=upper, + sigma=sigma, u0=myUncs, u1=otherUncs, relative=False) + + return similar + class DepletedMaterial(DepletedMaterialBase): __doc__ = DepletedMaterialBase.__doc__ diff --git a/serpentTools/parsers/_collections.py b/serpentTools/parsers/_collections.py new file mode 100644 index 0000000..0b93156 --- /dev/null +++ b/serpentTools/parsers/_collections.py @@ -0,0 +1,97 @@ +""" +Collections of objects that are helpful for the parsers +""" + +RES_DATA_NO_UNCS = { + "burnMaterials", + "burnMode", + "burnStep", + "iniBurnFmass", + "totBurnFmass", + "resMemsize", + "totNuclides", + "fissionProductInhTox", + "ingestionToxicity", + "totSfRate", + "electronDecaySource", + "uresDiluCut", + "implNxn", + "neutronErgTol", + "useDbrc", + "actinideActivity", + "actinideInhTox", + "photonDecaySource", + "te132Activity", + "implCapt", + "alphaDecaySource", + "totActivity", + "fissionProductActivity", + "simulationCompleted", + "sourcePopulation", + "useUres", + "lostParticles", + "iniFmass", + "totPhotonNuclides", + "totTransmuRea", + "uresEmax", + "uresUsed", + "i132Activity", + "cpuUsage", + "xsMemsize", + "memsize", + "totDecayNuclides", + "tmsMode", + "actinideIngTox", + "totDosimetryNuclides", + "cs134Activity", + "uresEmin", + "totCells", + "neutronErgNe", + "fissionProductIngTox", + "sampleCapt", + "actinideDecayHeat", + "runningTime", + "uresAvail", + "cycleIdx", + "neutronEmin", + "neutronDecaySource", + "totDecayHeat", + "dopplerPreprocessor", + "matMemsize", + "inhalationToxicity", + "sampleFiss", + "totFmass", + "useDelnu", + "cs137Activity", + "availMem", + "neutronEmax", + "miscMemsize", + "sampleScatt", + "unusedMemsize", + "unionCells", + "sr90Activity", + "totCpuTime", + "implFiss", + "allocMemsize", + "unknownMemsize", + "ompParallelFrac", + "fissionProductDecayHeat", + "totReaChannels", + "totTransportNuclides", + "ifcMemsize", + "i131Activity", + "balaSrcNeutronTot", + "balaSrcNeutronFiss", + "balaSrcNeutronNxn", + "balaLossNeutronFiss", + "balaLossNeutronTot", + "balaLossNeutronCapt", + "balaLossNeutronLeak", + "transportCycleTime", + "processTime", + "initTime", +} +""" +Set containing keys for objects stored in :attr:`ResultsReader.resdata` +that do not contain uncertainties. +""" diff --git a/serpentTools/parsers/base.py b/serpentTools/parsers/base.py index 17519b9..457d706 100644 --- a/serpentTools/parsers/base.py +++ b/serpentTools/parsers/base.py @@ -11,10 +11,11 @@ from six import add_metaclass from serpentTools.messages import info from serpentTools.settings import rc +from serpentTools.objects.base import BaseObject @add_metaclass(ABCMeta) -class BaseReader(object): +class BaseReader(BaseObject): """Parent class from which all parsers will inherit. Parameters diff --git a/serpentTools/parsers/depletion.py b/serpentTools/parsers/depletion.py index 76e94e0..528929d 100644 --- a/serpentTools/parsers/depletion.py +++ b/serpentTools/parsers/depletion.py @@ -13,9 +13,17 @@ from serpentTools.engines import KeywordParser from serpentTools.parsers.base import MaterialReader from serpentTools.objects.materials import DepletedMaterial -from serpentTools.messages import (warning, debug, error, - SerpentToolsException) - +from serpentTools.messages import ( + warning, debug, error, SerpentToolsException, +) +from serpentTools.utils import ( + getKeyMatchingShapes, + logDirectCompare, + compareDocDecorator, + DEF_COMP_LOWER, + DEF_COMP_UPPER, + DEF_COMP_SIGMA, +) METADATA_KEYS = {'ZAI', 'NAMES', 'BU', 'DAYS'} @@ -263,3 +271,109 @@ class DepletionReader(DepPlotMixin, MaterialReader): if 'bu' in self.metadata: self.metadata['burnup'] = self.metadata.pop('bu') + + def _compare(self, other, lower, upper, _sigma): + + similar = self._compareMetadata(other, lower, upper, _sigma) + if not self._comparePrecheckMetadata(other): + return False + similar &= self._compareMaterials(other, lower, upper, _sigma) + return similar + + def _comparePrecheckMetadata(self, other): + for key, myVec in iteritems(self.metadata): + otherVec = other.metadata[key] + if len(myVec) != len(otherVec): + error("Stopping comparison early due to mismatched {} vectors" + "\n\t>{}\n\t<{}".format(key, myVec, otherVec)) + return False + return True + + @compareDocDecorator + def compareMaterials(self, other, lower=DEF_COMP_LOWER, + upper=DEF_COMP_UPPER, sigma=DEF_COMP_SIGMA): + """ + Return the result of comparing all materials on two readers + + Parameters + ---------- + other: :class:`DepletionReader` + Reader to compare against + {compLimits} + {sigma} + + Returns + ------- + bool: + ``True`` if all materials agree to the given tolerances + + Raises + ------ + {compTypeErr} + """ + self._checkCompareObj(other) + self._compareLogPreMsg(other, lower, upper, quantity='materials') + + if not self._comparePrecheckMetadata(other): + return False + + return self._compareMaterials(other, lower, upper, sigma) + + def _compareMaterials(self, other, lower, upper, sigma): + """Private method for going directly into the comparison.""" + commonMats = getKeyMatchingShapes( + self.materials, other.materials, 'materials') + similar = ( + len(self.materials) == len(other.materials) == len(commonMats)) + + for matName in sorted(commonMats): + myMat = self[matName] + otherMat = other[matName] + similar &= myMat.compare(otherMat, lower, upper, sigma) + return similar + + @compareDocDecorator + def compareMetadata(self, other, lower=DEF_COMP_LOWER, + upper=DEF_COMP_UPPER, sigma=DEF_COMP_SIGMA): + """ + Return the result of comparing metadata on two readers + + Parameters + ---------- + other: :class:`DepletionReader` + Object to compare against + {compLimits} + {header} + + Returns + ------- + bool + True if the metadata agree within the given tolerances + + Raises + ------ + {compTypeErr} + """ + + self._checkCompareObj(other) + + self._compareLogPreMsg(other, lower, upper, quantity='metadata') + + return self._compareMetadata(other, lower, upper, sigma) + + def _compareMetadata(self, other, lower, upper, _sigma): + """Private method for comparing metadata""" + + similar = logDirectCompare( + self.metadata['names'], other.metadata['names'], + 0, 0, 'names') + similar &= logDirectCompare( + self.metadata['zai'], other.metadata['zai'], + 0, 0, 'zai') + similar &= logDirectCompare( + self.metadata['days'], other.metadata['days'], + lower, upper, 'days') + similar &= logDirectCompare( + self.metadata['burnup'], other.metadata['burnup'], + lower, upper, 'burnup') + return similar diff --git a/serpentTools/parsers/detector.py b/serpentTools/parsers/detector.py index 9053962..e63bfaa 100644 --- a/serpentTools/parsers/detector.py +++ b/serpentTools/parsers/detector.py @@ -4,6 +4,7 @@ from six import iteritems from numpy import empty from serpentTools.utils import str2vec +from serpentTools.utils.compare import getKeyMatchingShapes from serpentTools.engines import KeywordParser from serpentTools.objects.detectors import detectorFactory from serpentTools.parsers.base import BaseReader @@ -95,6 +96,20 @@ class DetectorReader(BaseReader): if not self.detectors: warning("No detectors stored from file {}".format(self.filePath)) + def _compare(self, other, lower, upper, sigma): + """Compare two detector readers.""" + similar = len(self.detectors) == len(other.detectors) + + commonKeys = getKeyMatchingShapes(self.detectors, other.detectors, + 'detectors') + similar &= len(commonKeys) == len(self.detectors) + + for detName in sorted(commonKeys): + myDetector = self[detName] + otherDetector = other[detName] + similar &= myDetector.compare(otherDetector, lower, upper, sigma) + return similar + def cleanDetChunk(chunk): """ diff --git a/serpentTools/parsers/results.py b/serpentTools/parsers/results.py index ccf30cf..d46a7fd 100644 --- a/serpentTools/parsers/results.py +++ b/serpentTools/parsers/results.py @@ -7,11 +7,26 @@ from serpentTools.settings import rc from serpentTools.utils import convertVariableName from serpentTools.objects.containers import HomogUniv from serpentTools.parsers.base import XSReader +from serpentTools.parsers._collections import RES_DATA_NO_UNCS +from serpentTools.objects.base import (DEF_COMP_LOWER, + DEF_COMP_SIGMA, DEF_COMP_UPPER) from serpentTools.utils import ( - str2vec, splitValsUncs, - STR_REGEX, VEC_REGEX, SCALAR_REGEX, FIRST_WORD_REGEX, + str2vec, + splitValsUncs, + getCommonKeys, + logDirectCompare, + compareDocDecorator, + getKeyMatchingShapes, + getLogOverlaps, + STR_REGEX, + VEC_REGEX, + SCALAR_REGEX, + FIRST_WORD_REGEX, +) +from serpentTools.messages import ( + warning, debug, SerpentToolsException, + info, ) -from serpentTools.messages import (warning, debug, SerpentToolsException) MapStrVersions = { @@ -65,6 +80,9 @@ Convert items in metadata dictionary from arrays to these data types """ +__all__ = ['ResultsReader', ] + + class ResultsReader(XSReader): """ Parser responsible for reading and working with result files. @@ -102,6 +120,16 @@ class ResultsReader(XSReader): IOError: file is unexpectedly closes while reading """ + __METADATA_COMP_SKIPS = { + 'title', + 'inputFileName', + 'workingDirectory', + 'startDate', + 'completeDate', + 'seed', + } + """Metadata keys that will not be compared.""" + def __init__(self, filePath): XSReader.__init__(self, filePath, 'results') self.__serpentVersion = rc['serpentVersion'] @@ -345,6 +373,135 @@ class ResultsReader(XSReader): self._inspectData() self._cleanMetadata() + def _compare(self, other, lower, upper, sigma): + similar = self.compareMetadata(other) + similar &= self.compareResults(other, lower, upper, sigma) + similar &= self.compareUniverses(other, lower, upper, sigma) + return similar + + @compareDocDecorator + def compareMetadata(self, other, header=False): + """ + Return True if the metadata (settings) are identical. + + Parameters + ---------- + other: :class:`ResultsReader` + Class against which to compare + {header} + + Returns + ------- + bool: + If the metadata are identical + + Raises + ------ + {compTypeErr} + """ + + self._checkCompareObj(other) + if header: + self._compareLogPreMsg(other, quantity='metadata') + myKeys = set(self.metadata.keys()) + otherKeys = set(other.metadata.keys()) + similar = not any(myKeys.symmetric_difference(otherKeys)) + commonKeys = getCommonKeys(myKeys, otherKeys, 'metadata') + skips = commonKeys.intersection(self.__METADATA_COMP_SKIPS) + if any(skips): + info("The following items will be skipped in the comparison\n\t{}" + .format(', '.join(sorted(skips)))) + for key in sorted(commonKeys): + if key in self.__METADATA_COMP_SKIPS: + continue + selfV = self.metadata[key] + otherV = other.metadata[key] + similar &= logDirectCompare(selfV, otherV, 0., 0., key) + + return similar + + @compareDocDecorator + def compareResults(self, other, lower=DEF_COMP_LOWER, + upper=DEF_COMP_UPPER, sigma=DEF_COMP_SIGMA, + header=False): + """ + Compare the contents of the results dictionary + + Parameters + ---------- + other: :class:`ResultsReader` + Class against which to compare + {compLimits} + {sigma} + {header} + + Returns + ------- + bool: + If the results data agree to given tolerances + + Raises + ------ + {compTypeErr} + """ + self._checkCompareObj(other) + if header: + self._compareLogPreMsg(other, lower, upper, sigma, 'results') + myRes = self.resdata + otherR = other.resdata + + commonTypeKeys = getKeyMatchingShapes(myRes, otherR, 'results') + + similar = len(commonTypeKeys) == len(myRes) == len(otherR) + + for key in sorted(commonTypeKeys): + mine = myRes[key] + theirs = otherR[key] + if key in RES_DATA_NO_UNCS: + similar &= logDirectCompare(mine, theirs, lower, upper, key) + continue + myVals, myUncs = splitValsUncs(mine) + theirVals, theirUncs = splitValsUncs(theirs) + similar &= getLogOverlaps(key, myVals, theirVals, myUncs, + theirUncs, sigma, relative=True) + return similar + + @compareDocDecorator + def compareUniverses(self, other, lower=DEF_COMP_LOWER, + upper=DEF_COMP_UPPER, sigma=DEF_COMP_SIGMA): + """ + Compare the contents of the ``universes`` dictionary + + Parameters + ---------- + other: :class:`ResultsReader` + Reader by which to compare + {compLimits} + {sigma} + + Returns + ------- + bool: + If the contents of the universes agree to given tolerances + + Raises + ------ + {compTypeErr} + """ + self._checkCompareObj(other) + myUniverses = self.universes + otherUniverses = other.universes + keyGoodTypes = getKeyMatchingShapes(myUniverses, otherUniverses, + 'universes') + + similar = len(keyGoodTypes) == len(myUniverses) == len(otherUniverses) + + for univKey in keyGoodTypes: + myUniv = myUniverses[univKey] + otherUniv = otherUniverses[univKey] + similar &= myUniv.compare(otherUniv, lower, upper, sigma) + return similar + def _cleanMetadata(self): """Replace some items in metadata dictionary with easier data types.""" mdata = self.metadata diff --git a/serpentTools/samplers/depletion.py b/serpentTools/samplers/depletion.py index 96d7207..de226c3 100644 --- a/serpentTools/samplers/depletion.py +++ b/serpentTools/samplers/depletion.py @@ -163,7 +163,7 @@ class SampledDepletedMaterial(SampledContainer, DepletedMaterialBase): ---------- {depAttrs:s} uncertainties: dict - Uncertainties for all variables stored in ``data`` + Absolute uncertainties for all variables stored in ``data`` allData: dict Dictionary where key, value pairs correspond to names of variables stored on this object and arrays of data from all files. diff --git a/serpentTools/utils/__init__.py b/serpentTools/utils/__init__.py index e17bbd1..b9dec17 100644 --- a/serpentTools/utils/__init__.py +++ b/serpentTools/utils/__init__.py @@ -1,190 +1,7 @@ """ Commonly used functions and utilities """ -from re import compile - -from numpy import array, ndarray - +from serpentTools.utils.core import * # noqa from serpentTools.utils.docstrings import * # noqa +from serpentTools.utils.compare import * # noqa from serpentTools.utils.plot import * # noqa - -# Regular expressions - -STR_REGEX = compile(r'\'.+\'') # string -VEC_REGEX = compile(r'(?<==.)\[.+?\]') # vector -SCALAR_REGEX = compile(r'=.+;') # scalar -FIRST_WORD_REGEX = compile(r'^\w+') # first word in the line - - -def str2vec(iterable, of=float, out=array): - """ - Convert a string or other iterable to vector. - - Parameters - ---------- - iterable: str or iterable - If string, will be split with ``split(splitAt)`` - to create a list. Every item in this list, or original - iterable, will be iterated over and converted accoring - to the other arguments. - of: type - Convert each value in ``iterable`` to this data type. - out: type - Return data type. Will be passed the iterable of - converted items of data dtype ``of``. - - Returns - ------- - vector - Iterable of all values of ``iterable``, or split variant, - converted to type ``of``. - - Examples - -------- - :: - - >>> v = "1 2 3 4" - >>> str2vec(v) - array([1., 2., 3., 4.,]) - - >>> str2vec(v, int, list) - [1, 2, 3, 4] - - >>> x = [1, 2, 3, 4] - >>> str2vec(x) - array([1., 2., 3., 4.,]) - - """ - vec = (iterable.split() if isinstance(iterable, str) - else iterable) - return out([of(xx) for xx in vec]) - - -def splitValsUncs(iterable, copy=False): - """ - Return even and odd indexed values from iterable - - Designed to extract expected values and uncertainties from - SERPENT vectors/matrices of the form - ``[x1, u1, x2, u2, ...]`` - - Slices along the last axis present on ``iterable``, e.g. - columns in 2D matrix. - - Parameters - ---------- - iterable: :class:`numpy.ndarray`or iterable - Initial arguments to be processed. If not - :class:`numpy.ndarray`, then strings will be converted - by calling :func:`str2vec`. Lists and tuples - will be sent directly to arrays with - :func:`numpy.array`. - copy: bool - If true, return a unique instance of the values - and uncertainties. Otherwise, returns a view - per numpy slicing methods - - Returns - ------- - :class:`numpy.ndarray` - Even indexed values from ``iterable`` - :class:`numpy.ndarray` - Odd indexed values from ``iterable`` - - Examples - -------- - :: - - >>> v = [1, 2, 3, 4] - >>> splitValsUncs(v) - array([1, 3]), array([2, 4]) - - >>> line = "1 2 3 4" - >>> splitValsUnc(line) - array([1, 3]), array([2, 4]) - - >>> v = [[1, 2], [3, 4]] - >>> splitValsUncs(v) - array([[1], [3]]), array([[2], [4]]) - - """ - - if not isinstance(iterable, ndarray): - iterable = (str2vec(iterable) if isinstance(iterable, str) - else array(iterable)) - vals = iterable[..., 0::2] - uncs = iterable[..., 1::2] - if copy: - return vals.copy(), uncs.copy() - return vals, uncs - - -def convertVariableName(variable): - """ - Return the mixedCase version of a SERPENT variable. - - Parameters - ---------- - variable: str - ``SERPENT_STYLE`` variable name to be converted - - Returns - ------- - str: - Variable name that has been split at underscores and - converted to ``mixedCase`` - - Examples - -------- - :: - - >>> v = "INF_KINF" - >>> convertVariableName(v) - infKinf - - >>> v = "VERSION" - >>> convertVariableName(v) - version - - """ - lowerSplits = [item.lower() for item in variable.split('_')] - if len(lowerSplits) == 1: - return lowerSplits[0] - return lowerSplits[0] + ''.join([item.capitalize() - for item in lowerSplits[1:]]) - - -LEADER_TO_WIKI = "http://serpent.vtt.fi/mediawiki/index.php/" - - -def linkToWiki(subLink, text=None): - """ - Return a string that will render as a hyperlink to the SERPENT wiki. - - Parameters - ---------- - subLink: str - Desired path inside the SERPENT wiki - following the - ``index.php`` - text: None or str - If given, use this as the shown text for the full link. - - Returns - ------- - str: - String that can be used as an rst hyperlink to the - SERPENT wiki - - Examples - -------- - >>> linkToWiki('Input_syntax_manual') - http://serpent.vtt.fi/mediawiki/index.php/Input_syntax_manual - >>> linkToWiki('Description_of_output_files#Burnup_calculation_output', - ... "Depletion Output") - `Depletion Output <http://serpent.vtt.fi/mediawiki/index.php/ - Description_of_output_files#Burnup_calculation_output>`_ - """ - fullLink = LEADER_TO_WIKI + subLink - if not text: - return fullLink - return "`{} <{}>`_".format(text, fullLink) diff --git a/serpentTools/utils/compare.py b/serpentTools/utils/compare.py new file mode 100644 index 0000000..fee2a3a --- /dev/null +++ b/serpentTools/utils/compare.py @@ -0,0 +1,634 @@ +""" +Comparison utilities +""" + +from collections import Iterable + +from numpy.core.defchararray import equal as charEqual +from numpy import ( + fabs, zeros_like, ndarray, array, greater, multiply, subtract, + equal, +) + +from serpentTools.messages import ( + error, + logIdentical, + logNotIdentical, + logAcceptableLow, + logAcceptableHigh, + logOutsideTols, + logDifferentTypes, + logMissingKeys, + logBadTypes, + logBadShapes, + logMapOfBadShapes, + logIdenticalWithUncs, + logInsideConfInt, + logOutsideConfInt, +) + +from serpentTools.utils.docstrings import compareDocDecorator + +LOWER_LIM_DIVISION = 1E-8 +"""Lower limit for denominator for division""" + +# +# Defaults for comparison +# +DEF_COMP_LOWER = 0 +DEF_COMP_UPPER = 10 +DEF_COMP_SIGMA = 2 + + +@compareDocDecorator +def getCommonKeys(d0, d1, quantity, desc0='first', desc1='second', + herald=error): + """ + Return a set of common keys from two dictionaries + + Also supports printing warning messages for keys not + found on one collection. + + If ``d0`` and ``d1`` are :class:`dict`, then the + keys will be obtained with ``d1.keys()``. Otherwise, + assume we have an iterable of keys and convert to + :class:`set`. + + Parameters + ---------- + d0: dict or iterable + d1: dict or iterable + Dictionary of keys or iterable of keys to be compared + quantity: str + Indicator as to what is being compared, e.g. ``'metadata'`` + {desc} + {herald} + Returns + ------- + set: + Keys found in both ``d{{0, 1}}`` + """ + k0 = d0.keys() if isinstance(d0, dict) else d0 + k1 = d1.keys() if isinstance(d1, dict) else d1 + s0 = set(k0) + s1 = set(k1) + + common = s0.intersection(s1) + missing = s0.symmetric_difference(s1) + if missing: + in0 = s0.difference(s1) + in1 = s1.difference(s0) + logMissingKeys(quantity, desc0, desc1, in0, in1, herald) + return common + + +TPL_FLOAT_INT = float, int + +# Error codes for direct compare +DC_STAT_GOOD = 0 +"""Values are identical to FP precision, or by ``==`` operator.""" +DC_STAT_LE_LOWER = 1 +"""Values are not identical, but max diff <= lower tolerance.""" +DC_STAT_MID = 10 +"""Values differ with max difference between lower and upper tolerance.""" +DC_STAT_GE_UPPER = 100 +"""Values differ with max difference greater than or equal to upper tolerance""" # noqa +DC_STAT_NOT_IDENTICAL = 200 +"""Values should be identical but are not, e.g. strings or bools.""" +DC_STAT_DIFF_TYPES = 255 +"""Values are of different types""" +DC_STAT_NOT_IMPLEMENTED = -1 +"""Direct compare is not implemented for these types""" +DC_STAT_DIFF_SHAPES = 250 +"""Values are of different shapes.""" + +COMPARE_STATUS_CODES = { + DC_STAT_GOOD: (logIdentical, True), + DC_STAT_LE_LOWER: (logAcceptableLow, True), + DC_STAT_MID: (logAcceptableHigh, True), + DC_STAT_NOT_IDENTICAL: (logNotIdentical, False), + DC_STAT_GE_UPPER: (logOutsideTols, False), + DC_STAT_DIFF_TYPES: (logDifferentTypes, False), + DC_STAT_DIFF_SHAPES: (logBadShapes, False), +} +"""Keys of status codes with ``(caller, return)`` values.""" + + +@compareDocDecorator +def directCompare(obj0, obj1, lower, upper): + """ + Return True if values are close enough to each other. + + Wrapper around various comparision tests for strings, numeric, and + arrays. + + Parameters + ---------- + obj0: str or float or int or :class:`numpy.ndarray` + obj1: str or float or int or :class:`numpy.ndarray` + Objects to compare + {compLimits} + quantity: str + Description of the value being compared. Will be + used to notify the user about any differences + + Returns + ------- + int: + Status code of the comparison. + + * {good} - Values are identical to floating point precision or, + for strings/booleans, are identical with the ``==`` operator + * {leLower} - Values are not identical, but the max difference + is less than ``lower``. + * {mid} - Values differ, with the max difference greater + than ``lower`` but less than ``upper`` + * {geUpper} - Values differ by greater than or equal to ``upper`` + * {notIdentical} - Values should be identical (strings, booleans), + but are not + * {diffShapes} - Numeric data has different shapes + * {diffTypes} - Values are of different types + * {notImplemented} - Type comparison is not supported. This means that + developers should either implement a test for this + data type, or use a different function + + See Also + -------- + * :func:`logDirectCompare` - Function that utilizes this and logs + the results using the :mod:`serpentTools.messages` module + """ + type0 = type(obj0) + type1 = type(obj1) + + if type0 != type1: + # can still compare floats and ints easily + if type0 not in TPL_FLOAT_INT or type1 not in TPL_FLOAT_INT: + return DC_STAT_DIFF_TYPES + if type0 in (str, bool): + if obj0 != obj1: + return DC_STAT_NOT_IDENTICAL + return DC_STAT_GOOD + + # Convert all to numpy arrays + if not isinstance(obj0, Iterable): + obj0 = array([obj0]) + obj1 = array([obj1]) + else: + # convert to array, but return if data-type is object + # need some indexable structure so dicts and sets won't work + obj0 = array(obj0) + obj1 = array(obj1) + if obj0.dtype.name == 'object': + return DC_STAT_NOT_IMPLEMENTED + if obj0.shape != obj1.shape: + return DC_STAT_DIFF_SHAPES + + if not upper: + return _directCompareIdentical(obj0, obj1) + return _directCompareWithTols(obj0, obj1, lower, upper) + + +def _directCompareIdentical(obj0, obj1): + """Compare arrays that should be identical""" + # special case for strings + if obj0.dtype.name[:3] == 'str': + compArray = charEqual(obj0, obj1) + else: + compArray = equal(obj0, obj1) + if compArray.all(): + return DC_STAT_GOOD + return DC_STAT_NOT_IDENTICAL + + +def _directCompareWithTols(obj0, obj1, lower, upper): + """Compare arrays that have some allowable tolerances""" + diff = multiply( + fabs(subtract(obj0, obj1)), 100 + ) + nonZI = greater(fabs(obj0), LOWER_LIM_DIVISION) + diff[nonZI] /= obj0[nonZI] + maxDiff = diff.max() + if maxDiff < LOWER_LIM_DIVISION: + return DC_STAT_GOOD + if maxDiff <= lower: + return DC_STAT_LE_LOWER + if maxDiff >= upper: + return DC_STAT_GE_UPPER + return DC_STAT_MID + + +directCompare.__doc__ = directCompare.__doc__.format( + good=DC_STAT_GOOD, + leLower=DC_STAT_LE_LOWER, + mid=DC_STAT_MID, + geUpper=DC_STAT_GE_UPPER, + notIdentical=DC_STAT_NOT_IDENTICAL, + diffTypes=DC_STAT_DIFF_TYPES, + notImplemented=DC_STAT_NOT_IMPLEMENTED, + diffShapes=DC_STAT_DIFF_SHAPES, +) + + +@compareDocDecorator +def logDirectCompare(obj0, obj1, lower, upper, quantity): + """ + Compare objects using :func:`directCompare` and log the result + + Parameters + ---------- + obj0: str or float or int or :class:`numpy.ndarray` + obj1: str or float or int or :class:`numpy.ndarray` + Objects to compare + {compLimits} + quantity: str + Description of the value being compared. Will be + used to notify the user about any differences + + Returns + ------- + bool: + ``True`` if the objects agree according to tolerances, or numerics + differ less than ``upper``. ``False`` otherwise + + Raises + ------ + TypeError: + If the objects being compared are not supported by + :func:`directCompare`. Developers should either extend the + function or utilize a different comparison function + + See Also + -------- + * :func:`directCompare` - function that does the comparison + * :func:`getOverlaps` - function for evaluating values with uncertainties + * :func:`getLogOverlaps` - function that logs the result of stastistical + comparisions + """ + result = directCompare(obj0, obj1, lower, upper) + if result < 0: # failures + if result == DC_STAT_NOT_IMPLEMENTED: + raise TypeError( + "directCompare is not configured to make tests on objects " + "of type {tp}\n\tQuantity: {k}\n\tUsers: Create a issue on " + "GitHub to alert developers.\n\tDevelopers: Update this " + "function or create a compare function " + "for {tp} objects.".format(k=quantity, tp=type(obj0))) + noticeTuple = [obj0, obj1, quantity] + if result in COMPARE_STATUS_CODES: + func, returnV = COMPARE_STATUS_CODES[result] + func(*noticeTuple) + return returnV + raise ValueError("Received value of {} from directCompare. Not sure " + "what this means.") + + +def splitDictByKeys(map0, map1, keySet=None): + """ + Return various sub-sets and dictionaries from two maps. + + Used to test the internal workings on :func:`getKeyMatchingShapes` + + Parameters + ---------- + map0: dict + map1: dict + Dictionaries to compare + keySet: set or None + Iterable collection of keys found in ``map0`` and ``map1``. + Missing keys will be returned from this function under + the ``missing0`` and ``missing1`` sets. If ``None``, take + to be the set of keys that exist in both maps + + Returns + ------- + missing0: set + Keys that exist in ``keySet`` but not in ``map0`` + missing1: set + Keys that exist in ``keySet`` but not in ``map1`` + differentTypes: dict + Dictionary with tuples ``{key: (t0, t1)}`` indicating the values + ``map0[key]`` and ``map1[key]`` are of different types + badShapes: dict + Dictionary with tuples ``{key: (t0, t1)}`` indicating the values + ``map0[key]`` and ``map1[key]`` are arrays of different shapes + goodKeys: set + Keys found in both ``map0`` and ``map1`` that are of the same type + or point to arrays of the same shape + """ + if keySet is None: + keySet = set(map1.keys()) + keySet.update(set(map0.keys())) + missing = {0: set(), 1: set()} + differentTypes = {} + badShapes = {} + goodKeys = set() + for key in keySet: + if key not in map0 or key not in map1: + for mapD, misK in zip((map0, map1), (0, 1)): + if key not in mapD: + missing[misK].add(key) + continue + v0 = map0[key] + v1 = map1[key] + t0 = type(v0) + t1 = type(v1) + if t0 != t1: + differentTypes[key] = (t0, t1) + continue + if t0 is ndarray: + if v0.shape != v1.shape: + badShapes[key] = (v0.shape, v1.shape) + continue + goodKeys.add(key) + + return missing[0], missing[1], differentTypes, badShapes, goodKeys + + +def getKeyMatchingShapes(map0, map1, quantity, keySet=None, desc0='first', + desc1='second'): + """ + Return a set of keys in map0/1 that point to arrays with identical shapes. + + Parameters + ---------- + keySet: set or list or tuple or iterable or None + Iterable container with keys that exist in map0 and map1. The contents + of ``map0/1`` under these keys will be compared. If ``None``, + will be determined by :func:`splitDictByKeys` + map0: dict + map1: dict + Two dictionaries containing at least all the keys in ``keySet``. + Objects under keys in ``keySet`` will have their sizes compared if + they are :class:`numpy.ndarray`. Non-arrays will be included only + if their types are identical + quantity: str + Indicator as to what is being compared, e.g. ``'metadata'`` + desc0: str + decs1: str + Descriptions of the two dictionaries being compared. Used to alert the + user to the shortcomings of the two dictionaries + + Returns + ------- + set: + Set of all keys that exist in both dictionaries and are either + identical types, or are arrays of identical shapes + + See Also + -------- + * :func:`splitDictByKeys` + """ + missing0, missing1, differentTypes, badShapes, goodKeys = ( + splitDictByKeys(map0, map1, keySet)) + + # raise some messages + if any(missing0) or any(missing1): + logMissingKeys(quantity, desc0, desc1, missing0, missing1) + if differentTypes: + logBadTypes(quantity, desc0, desc1, differentTypes) + if badShapes: + logMapOfBadShapes(quantity, desc0, desc1, badShapes) + return goodKeys + + +@compareDocDecorator +def getOverlaps(arr0, arr1, unc0, unc1, sigma, relative=True): + r""" + Return the indicies of overlapping confidence intervals + + Parameters + ---------- + arr0: :class:`numpy.ndarray` + arr1: :class:`numpy.ndarray` + Arrays containing the expected values to be compared + unc0: :class:`numpy.ndarray` + unc1: :class:`numpy.ndarray` + Associated absolute uncertainties, :math:`1\sigma`, + corresponding to the values in ``arr0`` and ``arr1`` + {sigma} + relative: bool + True if uncertainties are relative and should be multiplied + by their respective values. Otherwise, assume values are + absolute + + Returns + ------- + :class:`numpy.ndarray` + Boolean array of equal shape to incoming arrays. + Every index with ``True`` as the value indicates that + the confidence intervals for the arrays overlap + at those indices. + + Examples + -------- + Using absolute uncertainties:: + + >>> from numpy import ones, zeros, array + >>> a0 = ones(4) + >>> a1 = ones(4) * 0.5 + >>> u0 = array([0, 0.2, 0.1, 0.2]) + >>> u1 = array([1, 0.55, 0.25, 0.4]) + + Here, the first point in the confidence interval for + ``a0`` is completely contained within that of ``a1``. + The upper limit of ``a1[1]`` is contained within the confidence + interval for ``a0``. + The confidence intervals for the third point do not overlap, + while the lower bound of ``a0[3]`` is within the confidence interval + of ``a1[3]``. + :: + + >>> getOverlaps(a0, a1, u0, u1, 1, relative=False) + array([True, True, False, True]) + + This function also works for multi-dimensional arrays as well. + :: + + >>> a2 = a0.reshape(2, 2) + >>> a3 = a1.reshape(2, 2) + >>> u2 = u0.reshape(2, 2) + >>> u3 = u1.reshape(2, 2) + >>> getOverlaps(a2, a3, u2, u3 1, relative=False) + array([[ True, True], + [False, False]) + + Raises + ------ + IndexError + If the shapes of incoming arrays do not agree + + See Also + -------- + * :func:`getLogOverlaps` - High-level function that + uses this to report if two values have overlapping + confidence intervals + """ + shapes = {arg.shape for arg in (arr0, arr1, unc1, unc0)} + if len(shapes) != 1: + shapes = [str(a.shape) for a in [arr0, arr1, unc1, unc0]] + raise IndexError("Array shapes do not agree:\n{}" + .format(', '.join(shapes))) + err0 = fabs(unc0 * sigma) + err1 = fabs(unc1 * sigma) + + if relative: + err0 *= arr0 + err1 *= arr1 + + min0 = arr0 - err0 + max0 = arr0 + err0 + min1 = arr1 - err1 + max1 = arr1 + err1 + + overlap = zeros_like(arr0, dtype=bool) + + # Where values are identical to numerical precision + overlap[arr0 == arr1] = True + + min0le1 = min0 <= min1 + max0ge1 = max0 >= max1 + min1le0 = min1 <= min0 + max1ge0 = max1 >= max0 + + # locations where condidence intervals are completely contained + # in the other set + cont0In1 = min0le1 * (min0le1 == max0ge1) + overlap[cont0In1] = True + cont1In0 = min1le0 * (min1le0 == max1ge0) + overlap[cont1In0] = True + + # locations where min of 0 is less than 1, but max 0 > min 1 + # and the opposite + overlap[min0le1 * (max0 >= min1)] = True + overlap[min1le0 * (max1 >= min0)] = True + + # locations where max 0 > max 1, but min 0 < max 1 + # and the opposite + overlap[max0ge1 * (min0 <= max1)] = True + overlap[max1ge0 * (min1 <= max0)] = True + + return overlap + + +@compareDocDecorator +def getLogOverlaps(quantity, arr0, arr1, unc0, unc1, sigma, relative=True): + """ + Wrapper around :func:`getOverlaps` that logs the result + + Parameters + ---------- + quantity: str + Name of the value being compared + arr0: :class:`numpy.ndarray` + arr1: :class:`numpy.ndarray` + unc0: :class:`numpy.ndarray` + unc1: :class:`numpy.ndarray` + Arrays and their uncertainties to evaluate + {sigma} + relative: bool + If uncertainties are relative. Otherwise, assume absolute + uncertainties. + + Returns + ------- + bool: + ``True`` if all locations ``arr0`` and ``arr1`` are either + identical or within allowable statistical variations. + + See Also + -------- + * :func:`getOverlaps` - This function performs all the comparisons + while this function simply reports the output using + :mod:`serpentTools.messages` + """ + + if (arr0 == arr1).all(): + logIdenticalWithUncs(arr0, unc0, unc1, quantity) + return True + overlaps = getOverlaps(arr0, arr1, unc0, unc1, sigma, relative) + if overlaps.all(): + logInsideConfInt(arr0, unc0, arr1, unc1, quantity) + return True + logOutsideConfInt(arr0, unc0, arr1, unc1, quantity) + return False + + +@compareDocDecorator +def compareDictOfArrays(d0, d1, desc, lower=DEF_COMP_LOWER, + upper=DEF_COMP_UPPER, sigma=DEF_COMP_SIGMA, + u0={}, u1={}, relative=True): + """ + High-level routine for evaluating the similarities of two dictionaries + + The following tests are performed + + 1. Find a set of keys that both exist in ``d0`` and ``d1`` + and point to arrays with identical shapes using + :meth:`getKeyMatchingShapes` + 2. For each key in this common set, compare the values + with :meth:`logDirectCompare` or :meth:`getLogOverlaps`. + The latter is used if the key exists in ``u0`` and + ``u1``, provided uncertainty arrays are of identical shapes. + + Parameters + ---------- + d0: dict + d1: dict + Dictionaries to be compared + desc: str + Descption of the two dictionaries. What data do they represent? + {compLimits} + {sigma} + u0: dict + u1: dict + If uncKeys is not ``None``, then find the uncertainties for data in + ``d0`` and ``d1`` under the same keys. + relative: bool + If this evaluates to ``true``, then uncertainties in ``u0`` and ``u1`` + are relative. + + Returns + ------- + bool + ``True`` If all comparisons pass + """ + similar = len(d0) == len(d1) + keysMatchingTypes = getKeyMatchingShapes(d0, d1, desc) + similar &= len(d0) == len(keysMatchingTypes) + + for key in sorted(keysMatchingTypes): + val0 = d0[key] + val1 = d1[key] + if key in u0 and key in u1: + unc0 = u0[key] + unc1 = u1[key] + similar &= getLogOverlaps(key, val0, val1, unc0, unc1, + sigma, relative) + continue + similar &= logDirectCompare(val0, val1, lower, upper, key) + return similar + + +FINAL_COMPARE_MSG = "Objects {} and {}{} agree within given tolerances" + + +def finalCompareMsg(obj0, obj1, similar): + """ + Return the string used to signify the conclusion of a comparison + + Mainly exposed to developers for testing purposes. + + Parameters + ---------- + obj0: + obj1: Subclass of :class:`~serpentTools.objects.base.BaseObject` + Objects that have been compared + similar: bool + Result of comparison + + Returns + ------- + str: + Concluding remark about the comparison. + """ + return FINAL_COMPARE_MSG.format(obj0, obj1, "" if similar else "do not") diff --git a/serpentTools/utils/core.py b/serpentTools/utils/core.py new file mode 100644 index 0000000..3a19ea7 --- /dev/null +++ b/serpentTools/utils/core.py @@ -0,0 +1,189 @@ +""" +Core utilities +""" + +from re import compile + +from numpy import array, ndarray + + +# Regular expressions + +STR_REGEX = compile(r'\'.+\'') # string +VEC_REGEX = compile(r'(?<==.)\[.+?\]') # vector +SCALAR_REGEX = compile(r'=.+;') # scalar +FIRST_WORD_REGEX = compile(r'^\w+') # first word in the line + + +def str2vec(iterable, of=float, out=array): + """ + Convert a string or other iterable to vector. + + Parameters + ---------- + iterable: str or iterable + If string, will be split with ``split(splitAt)`` + to create a list. Every item in this list, or original + iterable, will be iterated over and converted accoring + to the other arguments. + of: type + Convert each value in ``iterable`` to this data type. + out: type + Return data type. Will be passed the iterable of + converted items of data dtype ``of``. + + Returns + ------- + vector + Iterable of all values of ``iterable``, or split variant, + converted to type ``of``. + + Examples + -------- + :: + + >>> v = "1 2 3 4" + >>> str2vec(v) + array([1., 2., 3., 4.,]) + + >>> str2vec(v, int, list) + [1, 2, 3, 4] + + >>> x = [1, 2, 3, 4] + >>> str2vec(x) + array([1., 2., 3., 4.,]) + + """ + vec = (iterable.split() if isinstance(iterable, str) + else iterable) + return out([of(xx) for xx in vec]) + + +def splitValsUncs(iterable, copy=False): + """ + Return even and odd indexed values from iterable + + Designed to extract expected values and uncertainties from + SERPENT vectors/matrices of the form + ``[x1, u1, x2, u2, ...]`` + + Slices along the last axis present on ``iterable``, e.g. + columns in 2D matrix. + + Parameters + ---------- + iterable: :class:`numpy.ndarray`or iterable + Initial arguments to be processed. If not + :class:`numpy.ndarray`, then strings will be converted + by calling :func:`str2vec`. Lists and tuples + will be sent directly to arrays with + :func:`numpy.array`. + copy: bool + If true, return a unique instance of the values + and uncertainties. Otherwise, returns a view + per numpy slicing methods + + Returns + ------- + :class:`numpy.ndarray` + Even indexed values from ``iterable`` + :class:`numpy.ndarray` + Odd indexed values from ``iterable`` + + Examples + -------- + :: + + >>> v = [1, 2, 3, 4] + >>> splitValsUncs(v) + array([1, 3]), array([2, 4]) + + >>> line = "1 2 3 4" + >>> splitValsUnc(line) + array([1, 3]), array([2, 4]) + + >>> v = [[1, 2], [3, 4]] + >>> splitValsUncs(v) + array([[1], [3]]), array([[2], [4]]) + + """ + + if not isinstance(iterable, ndarray): + iterable = (str2vec(iterable) if isinstance(iterable, str) + else array(iterable)) + vals = iterable[..., 0::2] + uncs = iterable[..., 1::2] + if copy: + return vals.copy(), uncs.copy() + return vals, uncs + + +def convertVariableName(variable): + """ + Return the mixedCase version of a SERPENT variable. + + Parameters + ---------- + variable: str + ``SERPENT_STYLE`` variable name to be converted + + Returns + ------- + str: + Variable name that has been split at underscores and + converted to ``mixedCase`` + + Examples + -------- + :: + + >>> v = "INF_KINF" + >>> convertVariableName(v) + infKinf + + >>> v = "VERSION" + >>> convertVariableName(v) + version + + """ + lowerSplits = [item.lower() for item in variable.split('_')] + if len(lowerSplits) == 1: + return lowerSplits[0] + return lowerSplits[0] + ''.join([item.capitalize() + for item in lowerSplits[1:]]) + + +LEADER_TO_WIKI = "http://serpent.vtt.fi/mediawiki/index.php/" + + +def linkToWiki(subLink, text=None): + """ + Return a string that will render as a hyperlink to the SERPENT wiki. + + Parameters + ---------- + subLink: str + Desired path inside the SERPENT wiki - following the + ``index.php`` + text: None or str + If given, use this as the shown text for the full link. + + Returns + ------- + str: + String that can be used as an rst hyperlink to the + SERPENT wiki + + Examples + -------- + >>> linkToWiki('Input_syntax_manual') + http://serpent.vtt.fi/mediawiki/index.php/Input_syntax_manual + >>> linkToWiki('Description_of_output_files#Burnup_calculation_output', + ... "Depletion Output") + `Depletion Output <http://serpent.vtt.fi/mediawiki/index.php/ + Description_of_output_files#Burnup_calculation_output>`_ + """ + fullLink = LEADER_TO_WIKI + subLink + if not text: + return fullLink + return "`{} <{}>`_".format(text, fullLink) diff --git a/serpentTools/utils/docstrings.py b/serpentTools/utils/docstrings.py index cb32b74..499eb16 100644 --- a/serpentTools/utils/docstrings.py +++ b/serpentTools/utils/docstrings.py @@ -4,8 +4,12 @@ Utilities for modifying docstrings from functools import wraps from textwrap import dedent +from six import iteritems + __all__ = [ 'magicPlotDocDecorator', + 'compareDocDecorator', + 'compareDocReplacer', ] _MPL_AX = ':py:class:`matplotlib.axes.Axes`' @@ -102,3 +106,73 @@ def magicPlotDocDecorator(f): doc = doc.replace(lookF, replace) decorated.__doc__ = doc return decorated + + +COMPARE_DOC_DESC = """ + desc0: dict or None + desc1: dict or None + Description of the origin of each value set. Only needed + if ``quiet`` evalues to ``True``.""" +COMPARE_DOC_HERALD = """herald: callable + Function that accepts a single string argument used to + notify that differences were found. If + the function is not a callable object, a + :func:`serpentTools.messages.critical` message + will be printed and :func:`serpentTools.messages.error` + will be used.""" +COMPARE_DOC_LIMITS = """ + lower: float or int + Lower limit for relative tolerances in percent + Differences below this will be considered allowable + upper: float or int + Upper limit for relative tolerances in percent. Differences + above this will be considered failure and errors + messages will be raised""" +COMPARE_DOC_SIGMA = """sigma: int + Size of confidence interval to apply to + quantities with uncertainties. Quantities that do not + have overlapping confidence intervals will fail""" +COMPARE_DOC_TYPE_ERR = """TypeError + If ``other`` is not of the same class as this class + nor a subclass of this class""" +COMPARE_DOC_HEADER = """header: bool + Print/log an ``info`` message about this comparison.""" +COMPARE_DOC_MAPPING = { + 'herald': COMPARE_DOC_HERALD, + 'desc': COMPARE_DOC_DESC, + 'compLimits': COMPARE_DOC_LIMITS, + 'sigma': COMPARE_DOC_SIGMA, + 'compTypeErr': COMPARE_DOC_TYPE_ERR, + 'header': COMPARE_DOC_HEADER, +} + +COMPARE_FAIL_MSG = "Values {desc0} and {desc1} are not identical:\n\t" +COMPARE_WARN_MSG = ("Values {desc0} and {desc1} are not identical, but within " + "tolerances:\n\t") +COMPARE_PASS_MSG = "Values {desc0} and {desc0} are identical:\n\t" + + +def compareDocReplacer(doc): + """Make replacements for comparison docstrings.""" + if not doc: + return "" + doc = dedent(doc) + for magic, replace in iteritems(COMPARE_DOC_MAPPING): + lookF = '{' + magic + '}' + if lookF in doc: + doc = doc.replace(lookF, dedent(replace)) + return doc + + +def compareDocDecorator(f): + """Decorator that updates doc strings for comparison methods. + + Similar to :func:`serpentTools.plot.magicPlotDocDecorator` + but for comparison functions + """ + + @wraps(f) + def decorated(*args, **kwargs): + return f(*args, **kwargs) + decorated.__doc__ = compareDocReplacer(f.__doc__) + return decorated
[ENH] Ability to compare readers and objects Implement a simple public method for readers and containers `.compare(other)` that would return `True` or `False` depending on the degree of difference between the two objects. This would be beneficial for comparing outputs across SERPENT versions, or sensitivity studies. ## Additional options 1. Allow tolerances (relative and/or absolute) to control the level of allowable difference 1. Take potential random behavior into account with an option to control confidence interval 1. Utilize the [`messages`](https://github.com/CORE-GATECH-GROUP/serpent-tools/blob/develop/serpentTools/messages.py) module for displaying more information on what passes/fails comparisons ## Tasks This issue can be closed when the following comparisons are implemented - [x] ResultsReader - [x] HomogUniv - [ ] BranchingReader - [ ] DepletionReader - [ ] DepletedMaterial - [x] DetectorReader - [x] Detector Open to including other readers and containers on this list, but these are the *big* ones in my opinion ## Path forward 1. Start a new feature branch for these comparisons off of develop - [`enh-compare`](https://github.com/CORE-GATECH-GROUP/serpent-tools/tree/enh-compare) 1. Treat this branch as a develop-style branch, with review and CI protected pull requests 1. Add compare methods for readers and containers with branches off of this sub-branch to keep process clean and segmented 1. Once a sufficient number of comparisons have been added to this main feature branch, merge, **not squash merge** into develop
CORE-GATECH-GROUP/serpent-tools
diff --git a/serpentTools/tests/compare/__init__.py b/serpentTools/tests/compare/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/serpentTools/tests/compare/test_depletion.py b/serpentTools/tests/compare/test_depletion.py new file mode 100644 index 0000000..195b9ac --- /dev/null +++ b/serpentTools/tests/compare/test_depletion.py @@ -0,0 +1,110 @@ +""" +Test the comparisons of the depleted materials +""" + +import serpentTools +from serpentTools.tests.utils import TestCaseWithLogCapture + +DATA_FILE = 'ref_dep.m' +REF_MATERIAL = 'fuel' + + +class DepletionCompareHelper(TestCaseWithLogCapture): + """Read the reference files for creating the readers""" + + @classmethod + def setUpClass(cls): + cls.refReader = serpentTools.readDataFile(DATA_FILE) + cls.refMaterial = cls.refReader[REF_MATERIAL] + + def setUp(self): + self.otherReader = serpentTools.readDataFile(DATA_FILE) + self.otherMaterial = self.otherReader[REF_MATERIAL] + TestCaseWithLogCapture.setUp(self) + + def compare(self, lower=0, upper=0, sigma=0, verbosity='info'): + raise NotImplementedError + + def test_compareIdentical(self): + """Compare the objects against themselves for passage""" + self.assertTrue(self.compare(0, 0, 0)) + + def test_mishapenMetadata(self): + """Verify that changes in the metadata shape fail the comparison""" + numNames = len(self.refMaterial.names) + self.otherMaterial.names = self.refMaterial.names[:numNames - 1] + self.assertFalse(self.compare(0, 0, 0)) + + def test_missingData(self): + """Verify that the test fails if one object is missing data.""" + keys = list(self.refMaterial.data.keys()) + for key in keys: + data = self.otherMaterial.data.pop(key) + self.assertFalse(self.compare(0, 0, 0)) + self.assertMsgInLogs('ERROR', key, partial=True) + + # put things back in place + self.handler.logMessages = {} + self.otherMaterial.data[key] = data + + def test_minorTweakData(self): + """Verify that the test passes after minor tweaks to data""" + key = 'adens' + diffInPercent = 1. + self.otherMaterial.data[key] *= (1 + diffInPercent / 100) + # Test by setting the lower tolerance to barely above perturbation + self.assertTrue(self.compare(diffInPercent + 1E-6, diffInPercent * 2, + verbosity='info')) + self.assertMsgInLogs("INFO", key, partial=True) + for level in ["WARNING", "ERROR", "CRITICAL"]: + self.assertTrue(level not in self.handler.logMessages) + # Test again, with tighter lower tolerance and look for warning + self.assertTrue(self.compare(diffInPercent, diffInPercent * 2, + verbosity='info')) + self.assertMsgInLogs("WARNING", key, partial=True) + + +class DepletedMaterialComparisonTester(DepletionCompareHelper): + """TestCase that only compares depleted materials""" + + def compare(self, lower=0, upper=0, sigma=0, verbosity='info'): + return self.refMaterial.compare(self.otherMaterial, lower, upper, + sigma, verbosity=verbosity) + + +class DepletionReaderComparisonTester(DepletionCompareHelper): + """Class for comparing the reader compare method""" + + def compare(self, lower=0, upper=0, sigma=0, verbosity='info'): + return self.refReader.compare(self.otherReader, lower, upper, sigma, + verbosity=verbosity) + + def test_badMetadataShapes(self): + """ + Verify the comparison fails early for dissimilar metadata shapes. + """ + newVecKey = 'badMetadataKey' + self.refReader.metadata[newVecKey] = [0, 1, 2, 3] + self.otherReader.metadata[newVecKey] = [0, 1, 2] + self.assertFalse(self.compare(100, 100, 100)) + self.assertMsgInLogs('ERROR', newVecKey, partial=True) + self.refReader.metadata.pop(newVecKey) + + def test_diffMaterialDicts(self): + """Verify the test fails if different materials are stored.""" + for key in self.otherReader.materials: + self.otherReader.materials.pop(key) + break + newMaterialKey = "newMateria" + self.otherReader.materials[newMaterialKey] = None + self.assertFalse(self.compare()) + self.assertMsgInLogs("ERROR", key, partial=True) + self.assertMsgInLogs("ERROR", newMaterialKey, partial=True) + + +del DepletionCompareHelper + + +if __name__ == '__main__': + from unittest import main + main() diff --git a/serpentTools/tests/compare/test_detector.py b/serpentTools/tests/compare/test_detector.py new file mode 100644 index 0000000..61dd824 --- /dev/null +++ b/serpentTools/tests/compare/test_detector.py @@ -0,0 +1,202 @@ +""" +Test the comparison utilities for detectors +""" + +from serpentTools.tests.utils import TestCaseWithLogCapture +from serpentTools.utils.compare import finalCompareMsg +from serpentTools import readDataFile + +TEST_FILE = 'bwr_det0.m' +DET_TO_MODIFY = 'xymesh' +UNMODIFIED_DETS = [ + 'spectrum', +] +# All other detectors that shall be unmodified + + +def compareObjs(ref, other): + """Fixture to call identical comparisons across tests.""" + return ref.compare(other, verbosity='debug') + + +class DetCompareHelper(TestCaseWithLogCapture): + """ + Helper case for testing the detector compare methods + + setUpClass: + *. Read the ``bwr_det0.m`` file + *. Store this reader on the class + + setUp: + *. Read the same file, but store this reader as the + one to mess up. + *. Call setUp from TestCaseWithLogCapture to capture + log messages + """ + + @classmethod + def setUpClass(cls): + cls.refReader = readDataFile(TEST_FILE) + cls.refDetector = cls.refReader.detectors[DET_TO_MODIFY] + + def setUp(self): + TestCaseWithLogCapture.setUp(self) + self.otherReader = readDataFile(TEST_FILE) + self.otherDetector = self.otherReader.detectors[DET_TO_MODIFY] + + +class DetGridsCompareTester(DetCompareHelper): + """ + Tweak the grid structure on a detector and ensure that logs + capture this. + """ + + GRID_FMT = "Values for {} are {}" + + def setUp(self): + DetCompareHelper.setUp(self) + self.gridKeys = sorted(list(self.refDetector.grids.keys())) + + def test_identicalGrids(self): + """Verify that the inner grid compare works.""" + similar = compareObjs(self.refDetector, self.otherDetector) + self.assertTrue(similar, msg='output from comparison') + for gridKey in self.gridKeys: + self.assertMsgInLogs( + "DEBUG", self.GRID_FMT.format(gridKey, 'identical'), + partial=True) + + def test_modifiedGrids(self): + """Pick up on some errors due to modified grids.""" + # Changes + # X grid unchanged + # E grid slightly modified, but inside tolerances + # Y grid modified to be outside tolerances + # Z grid set to be of different shape + # Added a bad grid that should not exist in the reference + grids = self.otherDetector.grids + missingKey = 'NOT PRESENT' + grids['E'] *= 1.05 + grids['Z'] = grids['X'] + grids['Y'] *= 2 + grids[missingKey] = list(range(5)) + similar = compareObjs(self.refDetector, self.otherDetector) + self.assertFalse(similar, + msg="Mismatched grids did not induce failure.") + self.assertMsgInLogs( + "WARNING", + self.GRID_FMT.format('E', 'different, but within tolerances'), + partial=True) + self.assertMsgInLogs( + "ERROR", + self.GRID_FMT.format('Y', 'outside acceptable tolerances'), + partial=True) + self.assertMsgInLogs( + "ERROR", + "Z: {} - {}".format( + self.refDetector.grids['Z'].shape, + grids['Z'].shape), + partial=True) + self.assertMsgInLogs( + 'DEBUG', + self.GRID_FMT.format('X', 'identical'), + partial=True) + + +class TallyModifier(DetCompareHelper): + """Base class that modifies detectors and checks the comparisons.""" + + IDENTICAL_TALLY_MSG = "Expected values for tallies are identical" + INSIDE_CONF_MSG = "Confidence intervals for tallies overlap" + OUTISDE_CONF_MSG = ("Values for tallies are outside acceptable " + "statistical limits") + + def compare(self): + """Compare the two test objects and return the result.""" + return compareObjs(self.refObj, self.otherObj) + + @property + def refObj(self): + raise AttributeError + + @property + def otherObj(self): + raise AttributeError + + def checkUnmodifiedDetectors(self): + """Check all other detectors in the comparison.""" + pass + + def checkFinalStatus(self, obj0, obj1, status): + """Assert that the correct final status is logged.""" + expected = finalCompareMsg(obj0, obj1, status) + level = "INFO" if status else "WARNING" + self.assertMsgInLogs(level, expected) + + def test_unmodifiedCompare(self): + """Verify that w/o modifications the test passes""" + similar = self.compare() + self.assertTrue(similar) + self.assertMsgInLogs( + "DEBUG", self.IDENTICAL_TALLY_MSG, + partial=True) + self.checkFinalStatus(self.refObj, self.otherObj, True) + self.checkUnmodifiedDetectors() + + def test_withinConfIntervals(self): + """Verify that slight differences in tallies are logged.""" + self.otherDetector.tallies *= 1.01 + similar = self.compare() + self.assertTrue(similar) + self.assertMsgInLogs( + "DEBUG", self.INSIDE_CONF_MSG, partial=True) + self.checkFinalStatus(self.refObj, self.otherObj, True) + self.checkUnmodifiedDetectors() + + def test_outsideConfIntervals(self): + """Verify that large differences in tallies are logged.""" + self.otherDetector.tallies *= 2.0 + similar = self.compare() + self.assertFalse(similar) + self.assertMsgInLogs( + "ERROR", self.OUTISDE_CONF_MSG, partial=True) + self.checkFinalStatus(self.refObj, self.otherObj, False) + self.checkUnmodifiedDetectors() + + +class DetectorCompareTester(TallyModifier): + """Class that tests a compare across detectors""" + + @property + def refObj(self): + return self.refDetector + + @property + def otherObj(self): + return self.otherDetector + + +class DetectorReaderCompareTester(TallyModifier): + """Class that also tests the reader-level compare for completeness.""" + + @property + def refObj(self): + return self.refReader + + @property + def otherObj(self): + return self.otherReader + + def checkUnmodifiedDetectors(self): + for detName in UNMODIFIED_DETS: + myDet = self.refReader.detectors[detName] + otherDet = self.otherReader.detectors[detName] + finalMsg = finalCompareMsg(myDet, otherDet, True) + self.assertMsgInLogs("INFO", finalMsg) + + +del TallyModifier + +if __name__ == '__main__': + from unittest import main + main() diff --git a/serpentTools/tests/compare/test_results.py b/serpentTools/tests/compare/test_results.py new file mode 100644 index 0000000..32a1b6f --- /dev/null +++ b/serpentTools/tests/compare/test_results.py @@ -0,0 +1,80 @@ +""" +Test object-level compare methods +""" + +from serpentTools.tests.utils import TestCaseWithLogCapture +from serpentTools.data import readDataFile + + +RES_DATA_FILE = 'pwr_res.m' +# strings that should appear in specific logging messages after formatted +# with a single value key +IDENTICAL_KEY_FMT = "for {} are identical" +OVERLAPPING_KEY_FMT = "{} overlap" +WITHIN_TOLS_KEY_FMT = "{} are different, but within tolerance" +OUTSIDE_TOLS_KEY_FMT = "{} are outside acceptable tolerances" + + +class ResultsCompareTester(TestCaseWithLogCapture): + """ + Test the ResultsReader comparison methods + """ + + @classmethod + def setUpClass(cls): + cls.r0 = readDataFile(RES_DATA_FILE) + cls.resultsKeys = set(cls.r0.resdata.keys()) + cls.univKeys = set() + for univ in cls.r0.universes.values(): + cls.univKeys.update(set(univ.infExp.keys())) + cls.univKeys.update(set(univ.b1Exp.keys())) + cls.univKeys.update(set(univ.gc.keys())) + break + + def setUp(self): + self.r1 = readDataFile(RES_DATA_FILE) + TestCaseWithLogCapture.setUp(self) + + def _runCompare(self, verbosity): + return self.r0.compare(self.r1, verbosity=verbosity) + + def test_fullCompare(self): + """Test the primary comparison method with no modifications.""" + out = self._runCompare('debug') + self.assertTrue(out, msg="Result from comparison") + self.assertMsgInLogs('DEBUG', 'Updated setting verbosity to debug') + for resKey in self.resultsKeys: + self.assertMsgInLogs('DEBUG', IDENTICAL_KEY_FMT.format(resKey), + partial=True) + + def test_moddedResults(self): + """ + Verify that the results comparison logs errors in modified data. + """ + resd = self.r1.resdata + # drastically increase one value with uncertainties + # to ensure disagreement + resd['anaKeff'][:, ::2] *= 2 + # slightly modify one value with uncertainties to force overlapping + # confidence intervals, but not identical quantities + resd['colKeff'][:, ::2] *= 1.01 + resd['colKeff'][:, 1::2] *= 1.05 + # modify a value w/o uncertainties slightly + resd['allocMemsize'] *= 1.01 + # drastically modify a value w/o uncertainties + resd['uresAvail'] *= -2 + out = self._runCompare('debug') + self.assertFalse(out) + self.assertMsgInLogs('ERROR', 'anaKeff', partial=True) + self.assertMsgInLogs( + 'DEBUG', OVERLAPPING_KEY_FMT.format('colKeff'), partial=True) + self.assertMsgInLogs( + 'WARNING', WITHIN_TOLS_KEY_FMT.format('allocMemsize'), + partial=True) + self.assertMsgInLogs( + 'ERROR', OUTSIDE_TOLS_KEY_FMT.format('uresAvail'), partial=True) + + +if __name__ == '__main__': + from unittest import main + main() diff --git a/serpentTools/tests/test_base_compare.py b/serpentTools/tests/test_base_compare.py new file mode 100644 index 0000000..82c5c19 --- /dev/null +++ b/serpentTools/tests/test_base_compare.py @@ -0,0 +1,56 @@ +"""Test the basic aspect of the comparison system.""" + +from unittest import TestCase + +from serpentTools.objects.base import BaseObject +from serpentTools.parsers.results import ResultsReader + + +class SafeCompare(BaseObject): + """Object that can use the comparison method.""" + + def _compare(self, *args, **kwargs): + return True + + +class SafeSubclass(SafeCompare): + pass + + +class BaseCompareTester(TestCase): + """Class responsible for testing the basic compare system.""" + + def setUp(self): + self.obj = SafeCompare() + + def test_badCompType(self): + """Verify that comparisons against bad-types raise errors.""" + with self.assertRaises(TypeError): + self.obj.compare(ResultsReader(None)) + with self.assertRaises(TypeError): + self.obj.compare(list()) + + def test_safeCompare(self): + """ + Verify that the compare method doesn't fail when + comparing against similar objects or subclasses + """ + self.obj.compare(SafeCompare()) + self.obj.compare(SafeSubclass()) + + def test_badArgs(self): + """Verify that the compare method fails for bad arguments.""" + other = SafeCompare() + with self.assertRaises(ValueError): + self.obj.compare(other, lower=-1) + with self.assertRaises(ValueError): + self.obj.compare(other, upper=-1) + with self.assertRaises(ValueError): + self.obj.compare(other, sigma=-1) + with self.assertRaises(ValueError): + self.obj.compare(other, lower=100, upper=1) + + +if __name__ == '__main__': + from unittest import main + main() diff --git a/serpentTools/tests/test_utils.py b/serpentTools/tests/test_utils.py index 89301b2..ce80756 100644 --- a/serpentTools/tests/test_utils.py +++ b/serpentTools/tests/test_utils.py @@ -2,16 +2,32 @@ Test the various utilities in serpentTools/utils.py """ -import unittest +from unittest import TestCase -from numpy import arange, ndarray, array +from numpy import arange, ndarray, array, ones, ones_like, zeros_like from numpy.testing import assert_array_equal from six import iteritems -from serpentTools.utils import convertVariableName, splitValsUncs, str2vec +from serpentTools.utils import ( + convertVariableName, + splitValsUncs, + str2vec, + getCommonKeys, + directCompare, + getOverlaps, + splitDictByKeys, + DC_STAT_GOOD, + DC_STAT_LE_LOWER, + DC_STAT_MID, + DC_STAT_GE_UPPER, + DC_STAT_NOT_IDENTICAL, + DC_STAT_NOT_IMPLEMENTED, + DC_STAT_DIFF_TYPES, + DC_STAT_DIFF_SHAPES, +) -class VariableConverterTester(unittest.TestCase): +class VariableConverterTester(TestCase): """Class for testing our variable name conversion function.""" def test_variableConversion(self): @@ -26,7 +42,7 @@ class VariableConverterTester(unittest.TestCase): self.assertEqual(expected, actual, msg=serpentStyle) -class VectorConverterTester(unittest.TestCase): +class VectorConverterTester(TestCase): """Class for testing the str2vec function""" def setUp(self): @@ -60,7 +76,7 @@ class VectorConverterTester(unittest.TestCase): self.assertEqual(expected, actual, msg=case) -class SplitValsTester(unittest.TestCase): +class SplitValsTester(TestCase): """Class that tests splitValsUncs.""" def setUp(self): @@ -93,5 +109,274 @@ class SplitValsTester(unittest.TestCase): assert_array_equal(expectedU, actualU, err_msg="Uncertainties") +class CommonKeysTester(TestCase): + """Class that tests getCommonKeys""" + + def test_goodKeys_dict(self): + """Verify a complete set of keys is returned from getCommonKeys""" + d0 = {1: None, '2': None, (1, 2, 3): "tuple"} + expected = set(d0.keys()) + actual = getCommonKeys(d0, d0, 'identical dictionary') + self.assertSetEqual(expected, actual, + msg="Passed two dictionaries") + actual = getCommonKeys(d0, expected, 'dictionary and set') + self.assertSetEqual(expected, actual, + msg="Passed dictionary and set") + + def test_getKeys_missing(self): + """Verify that missing keys are properly notified.""" + log = [] + d0 = {1, 2, 3} + emptyS = set() + desc0 = "xObj0x" + desc1 = "xObj1x" + common = getCommonKeys(d0, emptyS, 'missing keys', desc0, desc1, + herald=log.append) + self.assertSetEqual(emptyS, common) + self.assertEqual(len(log), 1, msg="Failed to append warning message") + warnMsg = log[0] + self.assertIsInstance(warnMsg, str, msg="Log messages is not string") + for desc in (desc0, desc1): + errMsg = "Description {} missing from warning message\n{}" + self.assertIn(desc, warnMsg, msg=errMsg.format(desc, warnMsg)) + + +class DirectCompareTester(TestCase): + """Class for testing utils.directCompare.""" + + NUMERIC_ITEMS = ( + [0, 0.001], + [1E-8, 0], + [array([1, 1, ]), array([1, 1.0001])], + ) + + def checkStatus(self, expected, obj0, obj1, lower, upper, msg=None): + """Wrapper around directCompare with ``args``. Pass ``kwargs`` to + assertEqual.""" + actual = directCompare(obj0, obj1, lower, upper) + self.assertEqual(expected, actual, msg=msg) + + def test_badTypes(self): + """Verify that two objects of different types return -1.""" + status = DC_STAT_DIFF_TYPES + value = 1 + for otherType in (bool, str): + self.checkStatus(status, value, otherType(value), 0, 1, + msg=str(otherType)) + asIterable = [value, ] + for otherType in (list, set, tuple, array): + self.checkStatus(status, value, otherType(asIterable), 0, 1, + msg=str(otherType)) + + def test_identicalString(self): + """Verify that identical strings return 0.""" + msg = obj = 'identicalStrings' + status = DC_STAT_GOOD + self.checkStatus(status, obj, obj, 0, 1, msg=msg) + + def test_dissimilarString(self): + """Verify returns the proper code for dissimilar strings.""" + msg = "dissimilarStrings" + status = DC_STAT_NOT_IDENTICAL + self.checkStatus(status, 'item0', 'item1', 0, 1, msg=msg) + + def _testNumericsForItems(self, status, lower, upper): + msg = "lower: {lower}, upper: {upper}\n{}\n{}" + for (obj0, obj1) in self.NUMERIC_ITEMS: + self.checkStatus(status, obj0, obj1, lower, upper, + msg=msg.format(obj0, obj1, lower=lower, + upper=upper)) + + def test_acceptableLow(self): + """Verify returns the proper code for close numerics.""" + lower = 5 + upper = 1E4 + self._testNumericsForItems(DC_STAT_LE_LOWER, lower, upper) + + def test_acceptableHigh(self): + """Verify returns the proper code for close but not quite values.""" + lower = 0 + upper = 1E4 + self._testNumericsForItems(DC_STAT_MID, lower, upper) + + def test_outsideTols(self): + """Verify returns the proper code for values outside tolerances.""" + lower = 1E-8 + upper = 1E-8 + self._testNumericsForItems(DC_STAT_GE_UPPER, lower, upper) + + def test_notImplemented(self): + """Check the not-implemented cases.""" + self.checkStatus(DC_STAT_NOT_IMPLEMENTED, {'hello': 'world'}, + {'hello': 'world'}, 0, 0, + msg="testing on dictionaries") + + def test_stringArrays(self): + """Verify the function handles string arrays properly.""" + stringList = ['hello', 'world', '1', '-1'] + vec0 = array(stringList) + self.checkStatus(DC_STAT_GOOD, vec0, vec0, 0, 0, + msg='Identical string arrays') + vec1 = array(stringList) + vec1[-1] = 'foobar' + self.checkStatus(DC_STAT_NOT_IDENTICAL, vec0, vec1, 0, 0, + msg='Dissimilar string arrays') + + def test_diffShapes(self): + """ + Verify that that directCompare fails for arrays of different shape. + """ + vec0 = [0, 1, 2, 3, 4] + vec1 = [0, 1] + mat0 = [[0, 1], [1, 2]] + mat1 = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + self.checkStatus(DC_STAT_DIFF_SHAPES, vec0, vec1, 0, 0, + msg="Compare two vectors.") + self.checkStatus(DC_STAT_DIFF_SHAPES, vec0, mat0, 0, 0, + msg="Compare vector and array.") + self.checkStatus(DC_STAT_DIFF_SHAPES, mat0, mat1, 0, 0, + msg="Compare vector and array.") + + +class OverlapTester(TestCase): + """Class for testing the Overlapping uncertainties function.""" + + a0 = ones(4) + a1 = ones(4) * 0.5 + u0 = array([0, 0.2, 0.1, 0.2]) + u1 = array([1, 0.55, 0.25, 0.4]) + expected = array([True, True, False, True]) + sigma = 1 + relative = False + + _errMsg = "Sigma:{}\na0:\n{}\nu0:\n{}\na1:\n{}\nu1:\n{}" + + def _test(self, expected, a0, a1, u0, u1, sigma, relative): + """Symmetric test on the data by switching the order of arguments.""" + assert_array_equal(expected, getOverlaps(a0, a1, u0, u1, sigma, + relative), + err_msg=self._errMsg.format(a0, u0, a1, u1, sigma)) + assert_array_equal(expected, getOverlaps(a1, a0, u1, u0, sigma, + relative), + err_msg=self._errMsg.format(sigma, a1, u1, a0, u0)) + + def _testWithReshapes(self, expected, a0, a1, u0, u1, sigma, shape, + relative): + """Call symmetric test twice, using reshaped arrays the second time.""" + self._test(expected, a0, a1, u0, u1, sigma, relative) + ra0, ra1, ru0, ru1 = [arg.reshape(*shape) for arg in [a0, a1, u0, u1]] + self._test(expected.reshape(*shape), ra0, ra1, ru0, ru1, sigma, + relative) + + def test_overlap_absolute(self): + """Verify the getOverlaps works using absolute uncertainties.""" + self._testWithReshapes(self.expected, self.a0, self.a1, self.u0, + self.u1, self.sigma, (2, 2), False) + + def test_overlap_relative(self): + """Verify the getOverlaps works using relative uncertainties.""" + u0 = self.u0 / self.a0 + u1 = self.u1 / self.a1 + self._testWithReshapes(self.expected, self.a0, self.a1, u0, + u1, self.sigma, (2, 2), True) + + @staticmethod + def _setupIdentical(nItems, shape=None): + arr = arange(nItems) + if shape is not None: + arr = arr.reshape(*shape) + unc = zeros_like(arr) + expected = ones_like(arr, dtype=bool) + return arr, unc, expected + + def test_overlap_identical_1D(self): + """ + Verify that all positions are found to overlap for identical arrays. + """ + vec, unc, expected = self._setupIdentical(8) + self._test(expected, vec, vec, unc, unc, 1, False) + + def test_overlap_identical_2D(self): + """ + Verify that all positions are found to overlap for identical arrays. + """ + vec, unc, expected = self._setupIdentical(8, (2, 4)) + self._test(expected, vec, vec, unc, unc, 1, False) + + def test_overlap_identical_3D(self): + """ + Verify that all positions are found to overlap for identical arrays. + """ + vec, unc, expected = self._setupIdentical(8, (2, 2, 2)) + self._test(expected, vec, vec, unc, unc, 1, False) + + def test_overlap_badshapes(self): + """Verify IndexError is raised for bad shapes.""" + vec = arange(4) + mat = vec.reshape(2, 2) + with self.assertRaises(IndexError): + getOverlaps(vec, mat, vec, mat, 1) + + +class SplitDictionaryTester(TestCase): + """Class for testing utils.splitDictByKeys.""" + + def setUp(self): + self.map0 = { + 'hello': 'world', + 'missingFrom1': True, + 'infKeff': arange(2), + 'float': 0.24, + 'absKeff': arange(2), + 'anaKeff': arange(6), + 'notBool': 1, + } + self.map1 = { + 'hello': 'world', + 'infKeff': arange(2), + 'float': 0.24, + 'missingFrom0': True, + 'notBool': False, + 'anaKeff': arange(2), + 'absKeff': arange(2), + } + self.badTypes = {'notBool': (int, bool)} + self.badShapes = {'anaKeff': ((6, ), (2, )), } + self.goodKeys = {'hello', 'absKeff', 'float', 'infKeff', } + + def callTest(self, keySet=None): + return splitDictByKeys(self.map0, self.map1, keySet) + + def test_noKeys(self): + """Verify that splitDictByKeys works when keySet is None.""" + m0, m1, badTypes, badShapes, goodKeys = self.callTest(None) + self.assertSetEqual({'missingFrom0', }, m0) + self.assertSetEqual({'missingFrom1', }, m1) + self.assertDictEqual(self.badTypes, badTypes) + self.assertDictEqual(self.badShapes, badShapes) + self.assertSetEqual(self.goodKeys, goodKeys) + + def test_keySet_all(self): + """Verify that splitDictByKeys works when keySet is all keys.""" + keys = set(self.map0.keys()) + keys.update(set(self.map1.keys())) + keys.add('NOT IN ANY MAP') + missing0 = set() + missing1 = set() + for key in keys: + if key not in self.map0: + missing0.add(key) + if key not in self.map1: + missing1.add(key) + + m0, m1, badTypes, badShapes, goodKeys = self.callTest(keys) + self.assertSetEqual(missing0, m0) + self.assertSetEqual(missing1, m1) + self.assertDictEqual(self.badTypes, badTypes) + self.assertDictEqual(self.badShapes, badShapes) + self.assertSetEqual(self.goodKeys, goodKeys) + + if __name__ == '__main__': - unittest.main() + from unittest import main + main()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 14 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements-test.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==3.6.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 backcall==0.2.0 bleach==4.1.0 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 comm==0.1.4 contextvars==2.4 coverage==6.2 cycler==0.11.0 dataclasses==0.8 decorator==5.1.1 defusedxml==0.7.1 entrypoints==0.4 execnet==1.9.0 flake8==5.0.4 idna==3.10 immutables==0.19 importlib-metadata==4.2.0 iniconfig==1.1.1 ipykernel==5.5.6 ipython==7.16.3 ipython-genutils==0.2.0 ipywidgets==7.8.5 jedi==0.17.2 Jinja2==3.0.3 json5==0.9.16 jsonschema==3.2.0 jupyter==1.1.1 jupyter-client==7.1.2 jupyter-console==6.4.3 jupyter-core==4.9.2 jupyter-server==1.13.1 jupyterlab==3.2.9 jupyterlab-pygments==0.1.2 jupyterlab-server==2.10.3 jupyterlab_widgets==1.1.11 kiwisolver==1.3.1 MarkupSafe==2.0.1 matplotlib==2.2.3 mccabe==0.7.0 mistune==0.8.4 nbclassic==0.3.5 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nest-asyncio==1.6.0 notebook==6.4.10 numpy==1.19.5 packaging==21.3 pandas==1.1.5 pandocfilters==1.5.1 parso==0.7.1 pexpect==4.9.0 pickleshare==0.7.5 pluggy==1.0.0 prometheus-client==0.17.1 prompt-toolkit==3.0.36 ptyprocess==0.7.0 py==1.11.0 pycodestyle==2.9.1 pycparser==2.21 pyflakes==2.5.0 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 pyzmq==25.1.2 requests==2.27.1 scipy==1.5.4 Send2Trash==1.8.3 -e git+https://github.com/CORE-GATECH-GROUP/serpent-tools.git@03997bdce0a5adb75cf5796278ea61b799f7b6dc#egg=serpentTools six==1.17.0 sniffio==1.2.0 terminado==0.12.1 testpath==0.6.0 tomli==1.2.3 tornado==6.1 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 wcwidth==0.2.13 webencodings==0.5.1 websocket-client==1.3.1 widgetsnbextension==3.6.10 zipp==3.6.0
name: serpent-tools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==3.6.2 - argon2-cffi==21.3.0 - argon2-cffi-bindings==21.2.0 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - backcall==0.2.0 - bleach==4.1.0 - cffi==1.15.1 - charset-normalizer==2.0.12 - comm==0.1.4 - contextvars==2.4 - coverage==6.2 - cycler==0.11.0 - dataclasses==0.8 - decorator==5.1.1 - defusedxml==0.7.1 - entrypoints==0.4 - execnet==1.9.0 - flake8==5.0.4 - idna==3.10 - immutables==0.19 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - ipykernel==5.5.6 - ipython==7.16.3 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - jedi==0.17.2 - jinja2==3.0.3 - json5==0.9.16 - jsonschema==3.2.0 - jupyter==1.1.1 - jupyter-client==7.1.2 - jupyter-console==6.4.3 - jupyter-core==4.9.2 - jupyter-server==1.13.1 - jupyterlab==3.2.9 - jupyterlab-pygments==0.1.2 - jupyterlab-server==2.10.3 - jupyterlab-widgets==1.1.11 - kiwisolver==1.3.1 - markupsafe==2.0.1 - matplotlib==2.2.3 - mccabe==0.7.0 - mistune==0.8.4 - nbclassic==0.3.5 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nest-asyncio==1.6.0 - notebook==6.4.10 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pandocfilters==1.5.1 - parso==0.7.1 - pexpect==4.9.0 - pickleshare==0.7.5 - pluggy==1.0.0 - prometheus-client==0.17.1 - prompt-toolkit==3.0.36 - ptyprocess==0.7.0 - py==1.11.0 - pycodestyle==2.9.1 - pycparser==2.21 - pyflakes==2.5.0 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - pyzmq==25.1.2 - requests==2.27.1 - scipy==1.5.4 - send2trash==1.8.3 - six==1.17.0 - sniffio==1.2.0 - terminado==0.12.1 - testpath==0.6.0 - tomli==1.2.3 - tornado==6.1 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - wcwidth==0.2.13 - webencodings==0.5.1 - websocket-client==1.3.1 - widgetsnbextension==3.6.10 - zipp==3.6.0 prefix: /opt/conda/envs/serpent-tools
[ "serpentTools/tests/compare/test_depletion.py::DepletedMaterialComparisonTester::test_compareIdentical", "serpentTools/tests/compare/test_depletion.py::DepletedMaterialComparisonTester::test_minorTweakData", "serpentTools/tests/compare/test_depletion.py::DepletedMaterialComparisonTester::test_mishapenMetadata", "serpentTools/tests/compare/test_depletion.py::DepletedMaterialComparisonTester::test_missingData", "serpentTools/tests/compare/test_depletion.py::DepletionReaderComparisonTester::test_badMetadataShapes", "serpentTools/tests/compare/test_depletion.py::DepletionReaderComparisonTester::test_compareIdentical", "serpentTools/tests/compare/test_depletion.py::DepletionReaderComparisonTester::test_diffMaterialDicts", "serpentTools/tests/compare/test_depletion.py::DepletionReaderComparisonTester::test_minorTweakData", "serpentTools/tests/compare/test_depletion.py::DepletionReaderComparisonTester::test_mishapenMetadata", "serpentTools/tests/compare/test_depletion.py::DepletionReaderComparisonTester::test_missingData", "serpentTools/tests/compare/test_detector.py::DetGridsCompareTester::test_identicalGrids", "serpentTools/tests/compare/test_detector.py::DetGridsCompareTester::test_modifiedGrids", "serpentTools/tests/compare/test_detector.py::DetectorCompareTester::test_outsideConfIntervals", "serpentTools/tests/compare/test_detector.py::DetectorCompareTester::test_unmodifiedCompare", "serpentTools/tests/compare/test_detector.py::DetectorCompareTester::test_withinConfIntervals", "serpentTools/tests/compare/test_detector.py::DetectorReaderCompareTester::test_outsideConfIntervals", "serpentTools/tests/compare/test_detector.py::DetectorReaderCompareTester::test_unmodifiedCompare", "serpentTools/tests/compare/test_detector.py::DetectorReaderCompareTester::test_withinConfIntervals", "serpentTools/tests/compare/test_results.py::ResultsCompareTester::test_fullCompare", "serpentTools/tests/compare/test_results.py::ResultsCompareTester::test_moddedResults", "serpentTools/tests/test_base_compare.py::BaseCompareTester::test_badArgs", "serpentTools/tests/test_base_compare.py::BaseCompareTester::test_badCompType", "serpentTools/tests/test_base_compare.py::BaseCompareTester::test_safeCompare", "serpentTools/tests/test_utils.py::VariableConverterTester::test_variableConversion", "serpentTools/tests/test_utils.py::VectorConverterTester::test_listOfInts", "serpentTools/tests/test_utils.py::VectorConverterTester::test_str2Arrays", "serpentTools/tests/test_utils.py::SplitValsTester::test_splitAtCols", "serpentTools/tests/test_utils.py::SplitValsTester::test_splitCopy", "serpentTools/tests/test_utils.py::SplitValsTester::test_splitVals", "serpentTools/tests/test_utils.py::CommonKeysTester::test_getKeys_missing", "serpentTools/tests/test_utils.py::CommonKeysTester::test_goodKeys_dict", "serpentTools/tests/test_utils.py::DirectCompareTester::test_acceptableHigh", "serpentTools/tests/test_utils.py::DirectCompareTester::test_acceptableLow", "serpentTools/tests/test_utils.py::DirectCompareTester::test_badTypes", "serpentTools/tests/test_utils.py::DirectCompareTester::test_diffShapes", "serpentTools/tests/test_utils.py::DirectCompareTester::test_dissimilarString", "serpentTools/tests/test_utils.py::DirectCompareTester::test_identicalString", "serpentTools/tests/test_utils.py::DirectCompareTester::test_notImplemented", "serpentTools/tests/test_utils.py::DirectCompareTester::test_outsideTols", "serpentTools/tests/test_utils.py::DirectCompareTester::test_stringArrays", "serpentTools/tests/test_utils.py::OverlapTester::test_overlap_absolute", "serpentTools/tests/test_utils.py::OverlapTester::test_overlap_badshapes", "serpentTools/tests/test_utils.py::OverlapTester::test_overlap_identical_1D", "serpentTools/tests/test_utils.py::OverlapTester::test_overlap_identical_2D", "serpentTools/tests/test_utils.py::OverlapTester::test_overlap_identical_3D", "serpentTools/tests/test_utils.py::OverlapTester::test_overlap_relative", "serpentTools/tests/test_utils.py::SplitDictionaryTester::test_keySet_all", "serpentTools/tests/test_utils.py::SplitDictionaryTester::test_noKeys" ]
[]
[]
[]
MIT License
3,022
[ "serpentTools/utils/docstrings.py", "docs/develop/utils.rst", "serpentTools/utils/__init__.py", "serpentTools/parsers/detector.py", "docs/changelog.rst", "serpentTools/utils/core.py", "serpentTools/messages.py", "serpentTools/objects/materials.py", "docs/develop/index.rst", "serpentTools/utils/compare.py", "serpentTools/objects/base.py", "serpentTools/parsers/results.py", "serpentTools/parsers/_collections.py", "serpentTools/samplers/depletion.py", "serpentTools/objects/containers.py", "serpentTools/parsers/depletion.py", "serpentTools/parsers/base.py", "docs/develop/comparisons.rst" ]
[ "serpentTools/utils/docstrings.py", "docs/develop/utils.rst", "serpentTools/utils/__init__.py", "serpentTools/parsers/detector.py", "docs/changelog.rst", "serpentTools/utils/core.py", "serpentTools/messages.py", "serpentTools/objects/materials.py", "docs/develop/index.rst", "serpentTools/utils/compare.py", "serpentTools/objects/base.py", "serpentTools/parsers/results.py", "serpentTools/parsers/_collections.py", "serpentTools/samplers/depletion.py", "serpentTools/objects/containers.py", "serpentTools/parsers/depletion.py", "serpentTools/parsers/base.py", "docs/develop/comparisons.rst" ]
oasis-open__cti-taxii-client-47
9c8130d5bcf793fde749a3b46bcb822168bcec30
2018-09-04 20:55:37
9c8130d5bcf793fde749a3b46bcb822168bcec30
diff --git a/taxii2client/__init__.py b/taxii2client/__init__.py index 36e5929..5536a20 100644 --- a/taxii2client/__init__.py +++ b/taxii2client/__init__.py @@ -498,7 +498,7 @@ class Collection(_TAXIIEndpoint): expires, or the operation completes. Args: - bundle (str): A STIX bundle with the objects to add. + bundle: A STIX bundle with the objects to add (string, dict, binary) wait_for_completion (bool): Whether to wait for the add operation to complete before returning poll_interval (int): If waiting for completion, how often to poll @@ -528,13 +528,21 @@ class Collection(_TAXIIEndpoint): } if isinstance(bundle, dict): - if six.PY2: - bundle = json.dumps(bundle, encoding="utf-8") - else: - bundle = json.dumps(bundle) + json_text = json.dumps(bundle, ensure_ascii=False) + data = json_text.encode("utf-8") + + elif isinstance(bundle, six.text_type): + data = bundle.encode("utf-8") + + elif isinstance(bundle, six.binary_type): + data = bundle + + else: + raise TypeError("Don't know how to handle type '{}'".format( + type(bundle).__name__)) status_json = self._conn.post(self.objects_url, headers=headers, - data=bundle) + data=data) status_url = urlparse.urljoin( self.url, @@ -875,25 +883,34 @@ class _HTTPConnection(object): return _to_json(resp) - def post(self, url, headers=None, params=None, data=None): + def post(self, url, headers=None, params=None, **kwargs): """Send a JSON POST request with the given request headers, additional URL query parameters, and the given JSON in the request body. The extra query parameters are merged with any which already exist in the - URL. + URL. The 'json' and 'data' parameters may not both be given. Args: url (str): URL to retrieve headers (dict): Any other headers to be added to the request. params: dictionary or bytes to be sent in the query string for the request. (optional) - data: data to post as dictionary, list of tuples, bytes, or - file-like object + json: json to send in the body of the Request. This must be a + JSON-serializable object. (optional) + data: raw request body data. May be a dictionary, list of tuples, + bytes, or file-like object to send in the body of the Request. + (optional) """ - merged_headers = self._merge_headers(headers) + if len(kwargs) > 1: + raise InvalidArgumentsError("Too many extra args ({} > 1)".format( + len(kwargs))) + + if kwargs: + kwarg = next(iter(kwargs)) + if kwarg not in ("json", "data"): + raise InvalidArgumentsError("Invalid kwarg: " + kwarg) - resp = self.session.post(url, headers=merged_headers, params=params, - data=data) + resp = self.session.post(url, headers=headers, params=params, **kwargs) resp.raise_for_status() return _to_json(resp)
Funny JSON handling I first started noticing this while working on the user-agent changes (#45), but this isn't directly related to those changes, so I am making a separate issue for this. One of the things I added (and forgot to mention in the commit message, oops) was to add parameter documentation to _HTTPConnection.post(), which for some reason had been missing. For the "data" parameter, I just copied text from the [Requests documentation](http://docs.python-requests.org/en/master/api/#requests.Session.post), since it's passed directly into Session.post(). Notice that JSON content is not listed as a legal value. That was odd, because aren't the TAXII services all JSON-based? Note also that there is a "json" parameter you can use specifically for JSON. Wouldn't it have made more sense to use that? This got me looking at the JSON handling. The usage of the "data" parameter within taxii2-client is via the Collection.add_objects() method; the actual value is obtained [here](https://github.com/oasis-open/cti-taxii-client/blob/d4fb580379b7ac0ea079cf8becd2348f4e878c9e/taxii2client/__init__.py?#L526-L530): ```python if isinstance(bundle, dict): if six.PY2: bundle = json.dumps(bundle, encoding="utf-8") else: bundle = json.dumps(bundle) ``` (I just picked master branch head commit as of this writing, to refer to; the aforementioned PR hasn't changed this part.) This code snip creates JSON which is subsequently passed via "data" parameter, instead of using the "json" parameter and letting Requests take care of the conversion. There may be some reinvention of the wheel here. What of that "encoding" parameter though? Why is it hard-coded, and why make encoding decisions in the Collection endpoint class anyway? To start with, here's what I think the encoding kwarg actually does: in case there are any binary values in the passed dict, they must be decoded to strings before being encoded as JSON (JSON is a textual format, after all). The encoding argument describes how to do that. I.e. the *en*coding argument is actually about *de*coding. What may have happened is a misunderstanding: that a decision needed to be made here about what to do with high-value codepoints, and the decision was to encode as utf-8. Lending credence to this idea is that the TAXII 2.0 spec actually defines the string type as `The string data type represents a finite-length string of valid characters from the Unicode coded character set [ISO10646] that are encoded in UTF-8.` So the spec actually mentions utf-8. Perhaps the idea was that a json.dumps() call could do double-duty, as a JSON-encode operation followed by a string encode operation. On the other hand, this code is written to behave differently depending on python version, and there is no usage of encoding on python3. If encoding needed to happen for python2, why not python3? On python3 the API changed: there actually is no "encoding" parameter in json.dumps(). Under this misunderstanding, that would mean that the result must remain text, and text is passed to _HTTPConnection.post(), then to Session.post(), which is invalid according to the documentation. So even if the misunderstanding were true, the code is incorrect. Also, the "ensure_ascii" param was left at default (True), which means there can be no high-value codepoints in the resulting JSON anyway. They will all be escaped using the format "\u1234". JSON specs I looked at do seem to recognize this as an escape syntax, but do we need to do that? Why not let them pass? Here's how I think it should work: - If we actually expect people to pass text-as-binary data in their JSON-serializable objects, we should also allow people to tell us how that text was encoded, not hard-code an assumption. - The above bullet is probably not worth worrying about; I can't see why anyone would want to do that. So I don't see any need to specify an encoding there, and have python-version-dependent code. - Do we really intend for people to be able to pass anything they want to TAXII server endpoints? If we want to restrict this to JSON, maybe we should be using the "json" kwarg to Session.post(), and mirror that in _HTTPConnection.post(). If want to allow anything, maybe we should mirror Session.post() and allow either "data=" or "json=" as kwargs. We would only need "json=" in our endpoint classes. (Btw, when using "json=", Requests automatically encodes the result as utf-8, so it would be compliant.) - Don't use funny escape syntax unless you really have to. No point in layering additional escaping which serves no purpose.
oasis-open/cti-taxii-client
diff --git a/taxii2client/test/test_client.py b/taxii2client/test/test_client.py index d80b2a3..0bae132 100644 --- a/taxii2client/test/test_client.py +++ b/taxii2client/test/test_client.py @@ -533,6 +533,34 @@ def test_add_object_to_collection_dict(writable_collection): assert status.pending_count == 0 [email protected] +def test_add_object_to_collection_bin(writable_collection): + responses.add(responses.POST, ADD_WRITABLE_OBJECTS_URL, + ADD_OBJECTS_RESPONSE, status=202, + content_type=MEDIA_TYPE_TAXII_V20) + + bin_bundle = STIX_BUNDLE.encode("utf-8") + + status = writable_collection.add_objects(bin_bundle) + + assert status.status == "complete" + assert status.total_count == 1 + assert status.success_count == 1 + assert len(status.successes) == 1 + assert status.failure_count == 0 + assert status.pending_count == 0 + + [email protected] +def test_add_object_to_collection_badtype(writable_collection): + responses.add(responses.POST, ADD_WRITABLE_OBJECTS_URL, + ADD_OBJECTS_RESPONSE, status=202, + content_type=MEDIA_TYPE_TAXII_V20) + + with pytest.raises(TypeError): + writable_collection.add_objects([1, 2, 3]) + + @responses.activate def test_add_object_rases_error_when_collection_id_does_not_match_url( bad_writable_collection): @@ -817,6 +845,19 @@ def test_collection_missing_can_write_property(collection_dict): assert "No 'can_write' in Collection for request 'https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/'" == str(excinfo.value) +def test_conn_post_kwarg_errors(): + conn = _HTTPConnection() + + with pytest.raises(InvalidArgumentsError): + conn.post(DISCOVERY_URL, data=1, json=2) + + with pytest.raises(InvalidArgumentsError): + conn.post(DISCOVERY_URL, data=1, foo=2) + + with pytest.raises(InvalidArgumentsError): + conn.post(DISCOVERY_URL, foo=1) + + def test_user_agent_defaulting(): conn = _HTTPConnection(user_agent="foo/1.0") headers = conn._merge_headers({})
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest_v2", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "responses" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest -xvs taxii2client/test/" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytz==2025.2 requests==2.27.1 responses==0.17.0 six==1.17.0 -e git+https://github.com/oasis-open/cti-taxii-client.git@9c8130d5bcf793fde749a3b46bcb822168bcec30#egg=taxii2_client tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: cti-taxii-client channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytz==2025.2 - requests==2.27.1 - responses==0.17.0 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/cti-taxii-client
[ "taxii2client/test/test_client.py::test_conn_post_kwarg_errors", "taxii2client/test/test_client.py::test_user_agent_defaulting", "taxii2client/test/test_client.py::test_user_agent_overriding", "taxii2client/test/test_client.py::test_user_agent_enforcing1", "taxii2client/test/test_client.py::test_user_agent_enforcing2", "taxii2client/test/test_client.py::test_user_agent_enforcing3", "taxii2client/test/test_client.py::test_header_merging", "taxii2client/test/test_client.py::test_header_merge_none" ]
[]
[ "taxii2client/test/test_client.py::test_server_discovery", "taxii2client/test/test_client.py::test_bad_json_response", "taxii2client/test/test_client.py::test_minimal_discovery_response", "taxii2client/test/test_client.py::test_discovery_with_no_default", "taxii2client/test/test_client.py::test_discovery_with_no_title", "taxii2client/test/test_client.py::test_api_root_no_title", "taxii2client/test/test_client.py::test_api_root_no_versions", "taxii2client/test/test_client.py::test_api_root_no_max_content_length", "taxii2client/test/test_client.py::test_api_root", "taxii2client/test/test_client.py::test_api_root_collections", "taxii2client/test/test_client.py::test_collection", "taxii2client/test/test_client.py::test_collection_unexpected_kwarg", "taxii2client/test/test_client.py::test_get_collection_objects", "taxii2client/test/test_client.py::test_get_object", "taxii2client/test/test_client.py::test_cannot_write_to_readonly_collection", "taxii2client/test/test_client.py::test_add_object_to_collection", "taxii2client/test/test_client.py::test_add_object_to_collection_dict", "taxii2client/test/test_client.py::test_add_object_to_collection_bin", "taxii2client/test/test_client.py::test_add_object_to_collection_badtype", "taxii2client/test/test_client.py::test_add_object_rases_error_when_collection_id_does_not_match_url", "taxii2client/test/test_client.py::test_cannot_read_from_writeonly_collection", "taxii2client/test/test_client.py::test_get_manifest", "taxii2client/test/test_client.py::test_get_status", "taxii2client/test/test_client.py::test_status_raw", "taxii2client/test/test_client.py::test_content_type_valid", "taxii2client/test/test_client.py::test_content_type_invalid", "taxii2client/test/test_client.py::test_url_filter_type", "taxii2client/test/test_client.py::test_filter_id", "taxii2client/test/test_client.py::test_filter_version", "taxii2client/test/test_client.py::test_filter_added_after", "taxii2client/test/test_client.py::test_filter_combo", "taxii2client/test/test_client.py::test_params_filter_unknown", "taxii2client/test/test_client.py::test_taxii_endpoint_raises_exception", "taxii2client/test/test_client.py::test_valid_content_type_for_connection", "taxii2client/test/test_client.py::test_invalid_content_type_for_connection", "taxii2client/test/test_client.py::test_status_missing_id_property", "taxii2client/test/test_client.py::test_status_missing_status_property", "taxii2client/test/test_client.py::test_status_missing_total_count_property", "taxii2client/test/test_client.py::test_status_missing_success_count_property", "taxii2client/test/test_client.py::test_status_missing_failure_count_property", "taxii2client/test/test_client.py::test_status_missing_pending_count_property", "taxii2client/test/test_client.py::test_collection_missing_id_property", "taxii2client/test/test_client.py::test_collection_missing_title_property", "taxii2client/test/test_client.py::test_collection_missing_can_read_property", "taxii2client/test/test_client.py::test_collection_missing_can_write_property" ]
[]
BSD 3-Clause "New" or "Revised" License
3,023
[ "taxii2client/__init__.py" ]
[ "taxii2client/__init__.py" ]
dask__dask-3944
ab1e21ca589d9ccf0b5f4a5a83161f0e175ef58b
2018-09-04 22:01:50
df1cee3b55706443303b85563e7c01e26611603d
rainwoodman: Sure. I am thinking of only doing the first fix (int cast) in this PR, and then use another PR to change all instance type checks to `Integral` from 'int'. mrocklin: > Sure. I am thinking of only doing the first fix (int cast) in this PR, and then use another PR to change all instance type checks to Integral from 'int'. No objection from me. I would like to include this PR in a bugfix release that goes out on Friday if possible. My guess is that we'll get things in easily by that time though.
diff --git a/dask/array/slicing.py b/dask/array/slicing.py index 964878798..4e170316f 100644 --- a/dask/array/slicing.py +++ b/dask/array/slicing.py @@ -391,7 +391,7 @@ def _slice_1d(dim_shape, lengths, index): ind = index - chunk_boundaries[i - 1] else: ind = index - return {i: ind} + return {int(i): int(ind)} assert isinstance(index, slice)
IndexError: tuple index out of range in optimization.py Hi, I apologize but unfortunately I can't reproduce this issue without copy and pasting a lot of code. I was hoping that perhaps this would look familiar to somebody here and they might be able to point me toward a workaround. If it looks like it's a more serious issue, I can try to isolate code to come up with a self-contained example. ``` 62 63 print("old_data.shape", old_data.shape, values_dset.shape) ---> 64 da.store(old_data, values_dset) 65 # f_new['values_' + str(z)][:] = old_data 66 ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/array/core.py in store(sources, targets, lock, regions, compute, return_stored, **kwargs) 959 960 if compute: --> 961 result.compute(**kwargs) 962 return None 963 else: ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/base.py in compute(self, **kwargs) 154 dask.base.compute 155 """ --> 156 (result,) = compute(self, traverse=False, **kwargs) 157 return result 158 ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/base.py in compute(*args, **kwargs) 400 keys = [x.__dask_keys__() for x in collections] 401 postcomputes = [x.__dask_postcompute__() for x in collections] --> 402 results = schedule(dsk, keys, **kwargs) 403 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)]) 404 ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/threaded.py in get(dsk, result, cache, num_workers, **kwargs) 73 results = get_async(pool.apply_async, len(pool._pool), dsk, result, 74 cache=cache, get_id=_thread_get_id, ---> 75 pack_exception=pack_exception, **kwargs) 76 77 # Cleanup pools associated to dead threads ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/local.py in get_async(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs) 519 _execute_task(task, data) # Re-execute locally 520 else: --> 521 raise_exception(exc, tb) 522 res, worker_id = loads(res_info) 523 state['cache'][key] = res ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/compatibility.py in reraise(exc, tb) 67 if exc.__traceback__ is not tb: 68 raise exc.with_traceback(tb) ---> 69 raise exc 70 71 else: ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception) 288 try: 289 task, data = loads(task_info) --> 290 result = _execute_task(task, data) 291 id = get_id() 292 result = dumps((result, id)) ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/local.py in _execute_task(arg, cache, dsk) 269 func, args = arg[0], arg[1:] 270 args2 = [_execute_task(a, cache) for a in args] --> 271 return func(*args2) 272 elif not ishashable(arg): 273 return arg ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/array/core.py in getter_inline(a, b, asarray, lock) 118 This inlining can be relevant to operations when running off of disk. 119 """ --> 120 return getter(a, b, asarray=asarray, lock=lock) 121 122 ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/array/core.py in getter(a, b, asarray, lock) 83 c = a[b] 84 if asarray: ---> 85 c = np.asarray(c) 86 finally: 87 if lock: ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/numpy/core/numeric.py in asarray(a, dtype, order) 499 500 """ --> 501 return array(a, dtype, copy=False, order=order) 502 503 ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/array/core.py in __array__(self, dtype, **kwargs) 1207 1208 def __array__(self, dtype=None, **kwargs): -> 1209 x = self.compute() 1210 if dtype and x.dtype != dtype: 1211 x = x.astype(dtype) ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/base.py in compute(self, **kwargs) 154 dask.base.compute 155 """ --> 156 (result,) = compute(self, traverse=False, **kwargs) 157 return result 158 ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/base.py in compute(*args, **kwargs) 397 collections=collections) 398 --> 399 dsk = collections_to_dsk(collections, optimize_graph, **kwargs) 400 keys = [x.__dask_keys__() for x in collections] 401 postcomputes = [x.__dask_postcompute__() for x in collections] ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/base.py in collections_to_dsk(collections, optimize_graph, **kwargs) 192 193 dsk = merge(*(opt(dsk, keys, **kwargs) --> 194 for opt, (dsk, keys) in groups.items())) 195 else: 196 dsk, _ = _extract_graph_and_keys(collections) ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/base.py in <genexpr>(.0) 192 193 dsk = merge(*(opt(dsk, keys, **kwargs) --> 194 for opt, (dsk, keys) in groups.items())) 195 else: 196 dsk, _ = _extract_graph_and_keys(collections) ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/array/optimization.py in optimize(dsk, keys, fuse_keys, fast_functions, inline_functions_fast_functions, rename_fused_keys, **kwargs) 44 else: 45 dsk4 = dsk3 ---> 46 dsk5 = optimize_slices(dsk4) 47 48 return dsk5 ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/array/optimization.py in optimize_slices(dsk) 132 break 133 try: --> 134 c_index = fuse_slice(b_index, a_index) 135 # rely on fact that nested gets never decrease in 136 # strictness e.g. `(getter_nofancy, (getter, ...))` never ~/miniconda3/envs/cenv3/lib/python3.6/site-packages/dask/array/optimization.py in fuse_slice(a, b) 280 result.append(a[i]) 281 continue --> 282 while b[j] is None: # insert any Nones on the rhs 283 result.append(None) 284 j += 1 IndexError: tuple index out of range ``` Python: 3.6.2 Dask: 0.18.1
dask/dask
diff --git a/dask/array/tests/test_optimization.py b/dask/array/tests/test_optimization.py index 5ba01d564..63bcb2686 100644 --- a/dask/array/tests/test_optimization.py +++ b/dask/array/tests/test_optimization.py @@ -273,3 +273,15 @@ def test_turn_off_fusion(): assert dask.get(a, y.__dask_keys__()) == dask.get(b, y.__dask_keys__()) assert len(a) < len(b) + + +def test_gh3937(): + # test for github issue #3937 + x = da.from_array([1, 2, 3.], (2,)) + x = da.concatenate((x, [x[-1]])) + y = x.rechunk((2,)) + # This will produce Integral type indices that are not ints (np.int64), failing + # the optimizer + y = da.coarsen(np.sum, y, {0: 2}) + # How to trigger the optimizer explicitly? + y.compute()
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
1.23
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[complete]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 click==8.0.4 cloudpickle==2.2.1 -e git+https://github.com/dask/dask.git@ab1e21ca589d9ccf0b5f4a5a83161f0e175ef58b#egg=dask distributed==1.28.1 HeapDict==1.0.1 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work locket==1.0.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work msgpack==1.0.5 numpy==1.19.5 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pandas==1.1.5 partd==1.2.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work psutil==7.0.0 py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 six==1.17.0 sortedcontainers==2.4.0 tblib==1.7.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work toolz==0.12.0 tornado==6.1 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zict==2.1.0 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: dask channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - click==8.0.4 - cloudpickle==2.2.1 - distributed==1.28.1 - heapdict==1.0.1 - locket==1.0.0 - msgpack==1.0.5 - numpy==1.19.5 - pandas==1.1.5 - partd==1.2.0 - psutil==7.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - six==1.17.0 - sortedcontainers==2.4.0 - tblib==1.7.0 - toolz==0.12.0 - tornado==6.1 - zict==2.1.0 prefix: /opt/conda/envs/dask
[ "dask/array/tests/test_optimization.py::test_gh3937" ]
[]
[ "dask/array/tests/test_optimization.py::test_fuse_getitem", "dask/array/tests/test_optimization.py::test_fuse_getitem_lock", "dask/array/tests/test_optimization.py::test_optimize_with_getitem_fusion", "dask/array/tests/test_optimization.py::test_optimize_slicing", "dask/array/tests/test_optimization.py::test_fuse_slice", "dask/array/tests/test_optimization.py::test_fuse_slice_with_lists", "dask/array/tests/test_optimization.py::test_nonfusible_fancy_indexing", "dask/array/tests/test_optimization.py::test_hard_fuse_slice_cases", "dask/array/tests/test_optimization.py::test_dont_fuse_numpy_arrays", "dask/array/tests/test_optimization.py::test_minimize_data_transfer", "dask/array/tests/test_optimization.py::test_fuse_slices_with_alias", "dask/array/tests/test_optimization.py::test_dont_fuse_fancy_indexing_in_getter_nofancy", "dask/array/tests/test_optimization.py::test_fuse_getter_with_asarray[10]", "dask/array/tests/test_optimization.py::test_fuse_getter_with_asarray[5]", "dask/array/tests/test_optimization.py::test_fuse_getter_with_asarray[3]", "dask/array/tests/test_optimization.py::test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy[getter-False]", "dask/array/tests/test_optimization.py::test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy[getter_nofancy-False]", "dask/array/tests/test_optimization.py::test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy[getitem-True]", "dask/array/tests/test_optimization.py::test_turn_off_fusion" ]
[]
BSD 3-Clause "New" or "Revised" License
3,024
[ "dask/array/slicing.py" ]
[ "dask/array/slicing.py" ]
zopefoundation__zope.schema-59
a409cbf2f88a19f9d4fc1bb0b8510634044bcc8c
2018-09-05 12:04:05
0a719f2ded189630a0a77e9292a66a3662c6512c
diff --git a/CHANGES.rst b/CHANGES.rst index 99b9c7c..9d29d73 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -115,6 +115,17 @@ - Make ``SimpleVocabulary`` and ``SimpleTerm`` have value-based equality and hashing methods. +- All fields of the schema of an ``Object`` field are bound to the + top-level value being validated before attempting validation of + their particular attribute. Previously only ``IChoice`` fields were + bound. See `issue 17 + <https://github.com/zopefoundation/zope.schema/issues/17>`_. + +- Share the internal logic of ``Object`` field validation and + ``zope.schema.getValidationErrors``. See `issue 57 + <https://github.com/zopefoundation/zope.schema/issues/57>`_. + + 4.5.0 (2017-07-10) ================== diff --git a/docs/api.rst b/docs/api.rst index 7314f20..5a1b9b3 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -9,97 +9,97 @@ better guide to the intended usage. Interfaces ========== -.. autoclass:: zope.schema.interfaces.IField -.. autoclass:: zope.schema.interfaces.IFromUnicode -.. autoclass:: zope.schema.interfaces.IChoice -.. autoclass:: zope.schema.interfaces.IContextAwareDefaultFactory -.. autoclass:: zope.schema.interfaces.IOrderable -.. autoclass:: zope.schema.interfaces.ILen -.. autoclass:: zope.schema.interfaces.IMinMax -.. autoclass:: zope.schema.interfaces.IMinMaxLen -.. autoclass:: zope.schema.interfaces.IInterfaceField -.. autoclass:: zope.schema.interfaces.IBool -.. autoclass:: zope.schema.interfaces.IObject -.. autoclass:: zope.schema.interfaces.IDict +.. autointerface:: zope.schema.interfaces.IField +.. autointerface:: zope.schema.interfaces.IFromUnicode +.. autointerface:: zope.schema.interfaces.IChoice +.. autointerface:: zope.schema.interfaces.IContextAwareDefaultFactory +.. autointerface:: zope.schema.interfaces.IOrderable +.. autointerface:: zope.schema.interfaces.ILen +.. autointerface:: zope.schema.interfaces.IMinMax +.. autointerface:: zope.schema.interfaces.IMinMaxLen +.. autointerface:: zope.schema.interfaces.IInterfaceField +.. autointerface:: zope.schema.interfaces.IBool +.. autointerface:: zope.schema.interfaces.IObject +.. autointerface:: zope.schema.interfaces.IDict Strings ------- -.. autoclass:: zope.schema.interfaces.IBytes -.. autoclass:: zope.schema.interfaces.IBytesLine -.. autoclass:: zope.schema.interfaces.IText -.. autoclass:: zope.schema.interfaces.ITextLine -.. autoclass:: zope.schema.interfaces.IASCII -.. autoclass:: zope.schema.interfaces.IASCIILine +.. autointerface:: zope.schema.interfaces.IBytes +.. autointerface:: zope.schema.interfaces.IBytesLine +.. autointerface:: zope.schema.interfaces.IText +.. autointerface:: zope.schema.interfaces.ITextLine +.. autointerface:: zope.schema.interfaces.IASCII +.. autointerface:: zope.schema.interfaces.IASCIILine -.. autoclass:: zope.schema.interfaces.IPassword -.. autoclass:: zope.schema.interfaces.IURI -.. autoclass:: zope.schema.interfaces.IId -.. autoclass:: zope.schema.interfaces.IDottedName +.. autointerface:: zope.schema.interfaces.IPassword +.. autointerface:: zope.schema.interfaces.IURI +.. autointerface:: zope.schema.interfaces.IId +.. autointerface:: zope.schema.interfaces.IDottedName Numbers ------- -.. autoclass:: zope.schema.interfaces.INumber -.. autoclass:: zope.schema.interfaces.IComplex -.. autoclass:: zope.schema.interfaces.IReal -.. autoclass:: zope.schema.interfaces.IRational -.. autoclass:: zope.schema.interfaces.IIntegral +.. autointerface:: zope.schema.interfaces.INumber +.. autointerface:: zope.schema.interfaces.IComplex +.. autointerface:: zope.schema.interfaces.IReal +.. autointerface:: zope.schema.interfaces.IRational +.. autointerface:: zope.schema.interfaces.IIntegral -.. autoclass:: zope.schema.interfaces.IInt -.. autoclass:: zope.schema.interfaces.IFloat -.. autoclass:: zope.schema.interfaces.IDecimal +.. autointerface:: zope.schema.interfaces.IInt +.. autointerface:: zope.schema.interfaces.IFloat +.. autointerface:: zope.schema.interfaces.IDecimal Date/Time --------- -.. autoclass:: zope.schema.interfaces.IDatetime -.. autoclass:: zope.schema.interfaces.IDate -.. autoclass:: zope.schema.interfaces.ITimedelta -.. autoclass:: zope.schema.interfaces.ITime +.. autointerface:: zope.schema.interfaces.IDatetime +.. autointerface:: zope.schema.interfaces.IDate +.. autointerface:: zope.schema.interfaces.ITimedelta +.. autointerface:: zope.schema.interfaces.ITime Collections ----------- -.. autoclass:: zope.schema.interfaces.IIterable -.. autoclass:: zope.schema.interfaces.IContainer -.. autoclass:: zope.schema.interfaces.ICollection -.. autoclass:: zope.schema.interfaces.ISequence -.. autoclass:: zope.schema.interfaces.IMutableSequence -.. autoclass:: zope.schema.interfaces.IUnorderedCollection -.. autoclass:: zope.schema.interfaces.IAbstractSet -.. autoclass:: zope.schema.interfaces.IAbstractBag - -.. autoclass:: zope.schema.interfaces.ITuple -.. autoclass:: zope.schema.interfaces.IList -.. autoclass:: zope.schema.interfaces.ISet -.. autoclass:: zope.schema.interfaces.IFrozenSet +.. autointerface:: zope.schema.interfaces.IIterable +.. autointerface:: zope.schema.interfaces.IContainer +.. autointerface:: zope.schema.interfaces.ICollection +.. autointerface:: zope.schema.interfaces.ISequence +.. autointerface:: zope.schema.interfaces.IMutableSequence +.. autointerface:: zope.schema.interfaces.IUnorderedCollection +.. autointerface:: zope.schema.interfaces.IAbstractSet +.. autointerface:: zope.schema.interfaces.IAbstractBag + +.. autointerface:: zope.schema.interfaces.ITuple +.. autointerface:: zope.schema.interfaces.IList +.. autointerface:: zope.schema.interfaces.ISet +.. autointerface:: zope.schema.interfaces.IFrozenSet Events ------ -.. autoclass:: zope.schema.interfaces.IBeforeObjectAssignedEvent -.. autoclass:: zope.schema.interfaces.IFieldEvent -.. autoclass:: zope.schema.interfaces.IFieldUpdatedEvent +.. autointerface:: zope.schema.interfaces.IBeforeObjectAssignedEvent +.. autointerface:: zope.schema.interfaces.IFieldEvent +.. autointerface:: zope.schema.interfaces.IFieldUpdatedEvent Vocabularies ------------ -.. autoclass:: zope.schema.interfaces.ITerm -.. autoclass:: zope.schema.interfaces.ITokenizedTerm -.. autoclass:: zope.schema.interfaces.ITitledTokenizedTerm -.. autoclass:: zope.schema.interfaces.ISource -.. autoclass:: zope.schema.interfaces.ISourceQueriables -.. autoclass:: zope.schema.interfaces.IContextSourceBinder -.. autoclass:: zope.schema.interfaces.IBaseVocabulary -.. autoclass:: zope.schema.interfaces.IIterableVocabulary -.. autoclass:: zope.schema.interfaces.IIterableSource -.. autoclass:: zope.schema.interfaces.IVocabulary -.. autoclass:: zope.schema.interfaces.IVocabularyTokenized -.. autoclass:: zope.schema.interfaces.ITreeVocabulary -.. autoclass:: zope.schema.interfaces.IVocabularyRegistry -.. autoclass:: zope.schema.interfaces.IVocabularyFactory +.. autointerface:: zope.schema.interfaces.ITerm +.. autointerface:: zope.schema.interfaces.ITokenizedTerm +.. autointerface:: zope.schema.interfaces.ITitledTokenizedTerm +.. autointerface:: zope.schema.interfaces.ISource +.. autointerface:: zope.schema.interfaces.ISourceQueriables +.. autointerface:: zope.schema.interfaces.IContextSourceBinder +.. autointerface:: zope.schema.interfaces.IBaseVocabulary +.. autointerface:: zope.schema.interfaces.IIterableVocabulary +.. autointerface:: zope.schema.interfaces.IIterableSource +.. autointerface:: zope.schema.interfaces.IVocabulary +.. autointerface:: zope.schema.interfaces.IVocabularyTokenized +.. autointerface:: zope.schema.interfaces.ITreeVocabulary +.. autointerface:: zope.schema.interfaces.IVocabularyRegistry +.. autointerface:: zope.schema.interfaces.IVocabularyFactory Exceptions ---------- diff --git a/docs/validation.rst b/docs/validation.rst index 01e4e68..0771b07 100644 --- a/docs/validation.rst +++ b/docs/validation.rst @@ -4,13 +4,11 @@ Schema Validation There are two helper methods to verify schemas and interfaces: -:func:`~.getValidationErrors` - first validates via the zope.schema field validators. If that succeeds the - invariants are checked. -:func:`~.getSchemaValidationErrors` - *only* validates via the zope.schema field validators. The invariants are - *not* checked. +.. autofunction:: zope.schema.getValidationErrors +.. autofunction:: zope.schema.getSchemaValidationErrors +Invariants are `documented by zope.interface +<https://zopeinterface.readthedocs.io/en/latest/README.html#invariants>`_. Create an interface to validate against: diff --git a/src/zope/schema/_bootstrapfields.py b/src/zope/schema/_bootstrapfields.py index 5904068..cde9df0 100644 --- a/src/zope/schema/_bootstrapfields.py +++ b/src/zope/schema/_bootstrapfields.py @@ -18,31 +18,41 @@ __docformat__ = 'restructuredtext' import decimal import fractions import numbers +import threading from math import isinf from zope.interface import Attribute +from zope.interface import Invalid +from zope.interface import Interface from zope.interface import providedBy from zope.interface import implementer +from zope.interface.interfaces import IInterface +from zope.interface.interfaces import IMethod + +from zope.event import notify -from zope.schema._bootstrapinterfaces import ValidationError from zope.schema._bootstrapinterfaces import ConstraintNotSatisfied +from zope.schema._bootstrapinterfaces import IBeforeObjectAssignedEvent from zope.schema._bootstrapinterfaces import IContextAwareDefaultFactory from zope.schema._bootstrapinterfaces import IFromUnicode +from zope.schema._bootstrapinterfaces import IValidatable from zope.schema._bootstrapinterfaces import NotAContainer from zope.schema._bootstrapinterfaces import NotAnIterator from zope.schema._bootstrapinterfaces import RequiredMissing +from zope.schema._bootstrapinterfaces import SchemaNotCorrectlyImplemented +from zope.schema._bootstrapinterfaces import SchemaNotFullyImplemented +from zope.schema._bootstrapinterfaces import SchemaNotProvided from zope.schema._bootstrapinterfaces import StopValidation from zope.schema._bootstrapinterfaces import TooBig from zope.schema._bootstrapinterfaces import TooLong from zope.schema._bootstrapinterfaces import TooShort from zope.schema._bootstrapinterfaces import TooSmall +from zope.schema._bootstrapinterfaces import ValidationError from zope.schema._bootstrapinterfaces import WrongType from zope.schema._compat import text_type from zope.schema._compat import integer_types -from zope.schema._schema import getFields - class _NotGiven(object): @@ -98,6 +108,17 @@ class DefaultProperty(ValidatedProperty): return value +def getFields(schema): + """Return a dictionary containing all the Fields in a schema. + """ + fields = {} + for name in schema: + attr = schema[name] + if IValidatable.providedBy(attr): + fields[name] = attr + return fields + + class Field(Attribute): # Type restrictions, if any @@ -187,10 +208,10 @@ class Field(Attribute): def constraint(self, value): return True - def bind(self, object): + def bind(self, context): clone = self.__class__.__new__(self.__class__) clone.__dict__.update(self.__dict__) - clone.context = object + clone.context = context return clone def validate(self, value): @@ -655,3 +676,184 @@ class Int(Integral): """ _type = integer_types _unicode_converters = (int,) + + +class _ObjectsBeingValidated(threading.local): + + def __init__(self): + super(_ObjectsBeingValidated, self).__init__() + self.ids_being_validated = set() + + +def get_schema_validation_errors(schema, value, + _validating_objects=_ObjectsBeingValidated()): + """ + Validate that *value* conforms to the schema interface *schema*. + + All :class:`zope.schema.interfaces.IField` members of the *schema* + are validated after being bound to *value*. (Note that we do not check for + arbitrary :class:`zope.interface.Attribute` members being present.) + + :return: A `dict` mapping field names to `ValidationError` subclasses. + A non-empty return value means that validation failed. + """ + errors = {} + # Interface can be used as schema property for Object fields that plan to + # hold values of any type. + # Because Interface does not include any Attribute, it is obviously not + # worth looping on its methods and filter them all out. + if schema is Interface: + return errors + # if `value` is part of a cyclic graph, we need to break the cycle to avoid + # infinite recursion. Collect validated objects in a thread local dict by + # it's python represenation. A previous version was setting a volatile + # attribute which didn't work with security proxy + id_value = id(value) + ids_being_validated = _validating_objects.ids_being_validated + if id_value in ids_being_validated: + return errors + ids_being_validated.add(id_value) + # (If we have gotten here, we know that `value` provides an interface + # other than zope.interface.Interface; + # iow, we can rely on the fact that it is an instance + # that supports attribute assignment.) + + try: + for name in schema.names(all=True): + attribute = schema[name] + if IMethod.providedBy(attribute): + continue # pragma: no cover + + try: + if IValidatable.providedBy(attribute): + # validate attributes that are fields + field_value = getattr(value, name) + attribute = attribute.bind(value) + attribute.validate(field_value) + except ValidationError as error: + errors[name] = error + except AttributeError as error: + # property for the given name is not implemented + errors[name] = SchemaNotFullyImplemented(error).with_field_and_value(attribute, None) + finally: + ids_being_validated.remove(id_value) + return errors + + +def get_validation_errors(schema, value, validate_invariants=True): + """ + Validate that *value* conforms to the schema interface *schema*. + + This includes checking for any schema validation errors (using + `get_schema_validation_errors`). If that succeeds, and + *validate_invariants* is true, then we proceed to check for any + declared invariants. + + Note that this does not include a check to see if the *value* + actually provides the given *schema*. + + :return: If there were any validation errors, either schema or + invariant, return a two tuple (schema_error_dict, + invariant_error_list). If there were no errors, returns a + two-tuple where both members are empty. + """ + schema_error_dict = get_schema_validation_errors(schema, value) + invariant_errors = [] + # Only validate invariants if there were no previous errors. Previous + # errors could be missing attributes which would most likely make an + # invariant raise an AttributeError. + + if validate_invariants and not schema_error_dict: + try: + schema.validateInvariants(value, invariant_errors) + except Invalid: + # validateInvariants raises a wrapper error around + # all the errors it got if it got errors, in addition + # to appending them to the errors list. We don't want + # that, we raise our own error. + pass + + return (schema_error_dict, invariant_errors) + + +class Object(Field): + """ + Implementation of :class:`zope.schema.interfaces.IObject`. + """ + schema = None + + def __init__(self, schema=_NotGiven, **kw): + """ + Object(schema=<Not Given>, *, validate_invariants=True, **kwargs) + + Create an `~.IObject` field. The keyword arguments are as for `~.Field`. + + .. versionchanged:: 4.6.0 + Add the keyword argument *validate_invariants*. When true (the default), + the schema's ``validateInvariants`` method will be invoked to check + the ``@invariant`` properties of the schema. + .. versionchanged:: 4.6.0 + The *schema* argument can be ommitted in a subclass + that specifies a ``schema`` attribute. + """ + if schema is _NotGiven: + schema = self.schema + + if not IInterface.providedBy(schema): + raise WrongType + + self.schema = schema + self.validate_invariants = kw.pop('validate_invariants', True) + super(Object, self).__init__(**kw) + + def _validate(self, value): + super(Object, self)._validate(value) + + # schema has to be provided by value + if not self.schema.providedBy(value): + raise SchemaNotProvided(self.schema, value).with_field_and_value(self, value) + + # check the value against schema + schema_error_dict, invariant_errors = get_validation_errors( + self.schema, + value, + self.validate_invariants + ) + + if schema_error_dict or invariant_errors: + errors = list(schema_error_dict.values()) + invariant_errors + exception = SchemaNotCorrectlyImplemented( + errors, + self.__name__ + ).with_field_and_value(self, value) + exception.schema_errors = schema_error_dict + exception.invariant_errors = invariant_errors + try: + raise exception + finally: + # Break cycles + del exception + del invariant_errors + del schema_error_dict + del errors + + def set(self, object, value): + # Announce that we're going to assign the value to the object. + # Motivation: Widgets typically like to take care of policy-specific + # actions, like establishing location. + event = BeforeObjectAssignedEvent(value, self.__name__, object) + notify(event) + # The event subscribers are allowed to replace the object, thus we need + # to replace our previous value. + value = event.object + super(Object, self).set(object, value) + + +@implementer(IBeforeObjectAssignedEvent) +class BeforeObjectAssignedEvent(object): + """An object is going to be assigned to an attribute on another object.""" + + def __init__(self, object, name, context): + self.object = object + self.name = name + self.context = context diff --git a/src/zope/schema/_bootstrapinterfaces.py b/src/zope/schema/_bootstrapinterfaces.py index 40d85e2..eeb4f0e 100644 --- a/src/zope/schema/_bootstrapinterfaces.py +++ b/src/zope/schema/_bootstrapinterfaces.py @@ -16,6 +16,7 @@ from functools import total_ordering import zope.interface +from zope.interface import Attribute from zope.schema._messageid import _ @@ -108,6 +109,31 @@ class NotAnIterator(ValidationError): __doc__ = _("""Not an iterator""") + +class WrongContainedType(ValidationError): + __doc__ = _("""Wrong contained type""") + + +class SchemaNotCorrectlyImplemented(WrongContainedType): + __doc__ = _("""An object failed schema or invariant validation.""") + + #: A dictionary mapping failed attribute names of the + #: *value* to the underlying exception + schema_errors = None + + #: A list of exceptions from validating the invariants + #: of the schema. + invariant_errors = () + + +class SchemaNotFullyImplemented(ValidationError): + __doc__ = _("""Schema not fully implemented""") + + +class SchemaNotProvided(ValidationError): + __doc__ = _("""Schema not provided""") + + class IFromUnicode(zope.interface.Interface): """Parse a unicode string to a value @@ -132,6 +158,36 @@ class IContextAwareDefaultFactory(zope.interface.Interface): """Returns a default value for the field.""" +class IBeforeObjectAssignedEvent(zope.interface.Interface): + """An object is going to be assigned to an attribute on another object. + + Subscribers to this event can change the object on this event to change + what object is going to be assigned. This is useful, e.g. for wrapping + or replacing objects before they get assigned to conform to application + policy. + """ + + object = Attribute("The object that is going to be assigned.") + + name = Attribute("The name of the attribute under which the object " + "will be assigned.") + + context = Attribute("The context object where the object will be " + "assigned to.") + + +class IValidatable(zope.interface.Interface): + # Internal interface, the base for IField, but used to prevent + # import recursion. This should *not* be implemented by anything + # other than IField. + def validate(value): + """Validate that the given value is a valid field value. + + Returns nothing but raises an error if the value is invalid. + It checks everything specific to a Field and also checks + with the additional constraint. + """ + class NO_VALUE(object): def __repr__(self): # pragma: no cover return '<NO_VALUE>' diff --git a/src/zope/schema/_field.py b/src/zope/schema/_field.py index c9dcd4b..f659877 100644 --- a/src/zope/schema/_field.py +++ b/src/zope/schema/_field.py @@ -27,20 +27,16 @@ from datetime import timedelta from datetime import time import decimal import re -import threading -from zope.event import notify + from zope.interface import classImplements from zope.interface import implementer -from zope.interface import Interface -from zope.interface import Invalid from zope.interface.interfaces import IInterface -from zope.interface.interfaces import IMethod + from zope.schema.interfaces import IASCII from zope.schema.interfaces import IASCIILine from zope.schema.interfaces import IBaseVocabulary -from zope.schema.interfaces import IBeforeObjectAssignedEvent from zope.schema.interfaces import IBool from zope.schema.interfaces import IBytes from zope.schema.interfaces import IBytesLine @@ -89,9 +85,6 @@ from zope.schema.interfaces import InvalidValue from zope.schema.interfaces import WrongType from zope.schema.interfaces import WrongContainedType from zope.schema.interfaces import NotUnique -from zope.schema.interfaces import SchemaNotProvided -from zope.schema.interfaces import SchemaNotCorrectlyImplemented -from zope.schema.interfaces import SchemaNotFullyImplemented from zope.schema.interfaces import InvalidURI from zope.schema.interfaces import InvalidId from zope.schema.interfaces import InvalidDottedName @@ -113,6 +106,7 @@ from zope.schema._bootstrapfields import Rational from zope.schema._bootstrapfields import Real from zope.schema._bootstrapfields import MinMaxLen from zope.schema._bootstrapfields import _NotGiven +from zope.schema._bootstrapfields import Object from zope.schema.fieldproperty import FieldProperty from zope.schema.vocabulary import getVocabularyRegistry from zope.schema.vocabulary import SimpleVocabulary @@ -150,6 +144,8 @@ classImplements(Rational, IRational) classImplements(Integral, IIntegral) classImplements(Int, IInt) +classImplements(Object, IObject) + @implementer(ISourceText) @@ -646,13 +642,13 @@ class Collection(MinMaxLen, Iterable): if unique is not _NotGiven: self.unique = unique - def bind(self, object): + def bind(self, context): """See zope.schema._bootstrapinterfaces.IField.""" - clone = super(Collection, self).bind(object) + clone = super(Collection, self).bind(context) # binding value_type is necessary for choices with named vocabularies, # and possibly also for other fields. if clone.value_type is not None: - clone.value_type = clone.value_type.bind(object) + clone.value_type = clone.value_type.bind(context) return clone def _validate(self, value): @@ -728,141 +724,6 @@ class FrozenSet(_AbstractSet): _type = frozenset -VALIDATED_VALUES = threading.local() - - -def _validate_fields(schema, value): - errors = {} - # Interface can be used as schema property for Object fields that plan to - # hold values of any type. - # Because Interface does not include any Attribute, it is obviously not - # worth looping on its methods and filter them all out. - if schema is Interface: - return errors - # if `value` is part of a cyclic graph, we need to break the cycle to avoid - # infinite recursion. Collect validated objects in a thread local dict by - # it's python represenation. A previous version was setting a volatile - # attribute which didn't work with security proxy - if id(value) in VALIDATED_VALUES.__dict__: - return errors - VALIDATED_VALUES.__dict__[id(value)] = True - # (If we have gotten here, we know that `value` provides an interface - # other than zope.interface.Interface; - # iow, we can rely on the fact that it is an instance - # that supports attribute assignment.) - try: - for name in schema.names(all=True): - attribute = schema[name] - if IMethod.providedBy(attribute): - continue # pragma: no cover - - try: - if IChoice.providedBy(attribute): - # Choice must be bound before validation otherwise - # IContextSourceBinder is not iterable in validation - bound = attribute.bind(value) - bound.validate(getattr(value, name)) - elif IField.providedBy(attribute): - # validate attributes that are fields - attribute.validate(getattr(value, name)) - except ValidationError as error: - errors[name] = error - except AttributeError as error: - # property for the given name is not implemented - errors[name] = SchemaNotFullyImplemented(error).with_field_and_value(attribute, None) - finally: - del VALIDATED_VALUES.__dict__[id(value)] - return errors - - -@implementer(IObject) -class Object(Field): - __doc__ = IObject.__doc__ - schema = None - - def __init__(self, schema=_NotGiven, **kw): - """ - Object(schema=<Not Given>, *, validate_invariants=True, **kwargs) - - Create an `~.IObject` field. The keyword arguments are as for `~.Field`. - - .. versionchanged:: 4.6.0 - Add the keyword argument *validate_invariants*. When true (the default), - the schema's ``validateInvariants`` method will be invoked to check - the ``@invariant`` properties of the schema. - .. versionchanged:: 4.6.0 - The *schema* argument can be ommitted in a subclass - that specifies a ``schema`` attribute. - """ - if schema is _NotGiven: - schema = self.schema - - if not IInterface.providedBy(schema): - raise WrongType - - self.schema = schema - self.validate_invariants = kw.pop('validate_invariants', True) - super(Object, self).__init__(**kw) - - def _validate(self, value): - super(Object, self)._validate(value) - - # schema has to be provided by value - if not self.schema.providedBy(value): - raise SchemaNotProvided(self.schema, value).with_field_and_value(self, value) - - # check the value against schema - schema_error_dict = _validate_fields(self.schema, value) - invariant_errors = [] - if self.validate_invariants: - try: - self.schema.validateInvariants(value, invariant_errors) - except Invalid: - # validateInvariants raises a wrapper error around - # all the errors it got if it got errors, in addition - # to appending them to the errors list. We don't want - # that, we raise our own error. - pass - - if schema_error_dict or invariant_errors: - errors = list(schema_error_dict.values()) + invariant_errors - exception = SchemaNotCorrectlyImplemented( - errors, - self.__name__ - ).with_field_and_value(self, value) - exception.schema_errors = schema_error_dict - exception.invariant_errors = invariant_errors - try: - raise exception - finally: - # Break cycles - del exception - del invariant_errors - del schema_error_dict - del errors - - def set(self, object, value): - # Announce that we're going to assign the value to the object. - # Motivation: Widgets typically like to take care of policy-specific - # actions, like establishing location. - event = BeforeObjectAssignedEvent(value, self.__name__, object) - notify(event) - # The event subscribers are allowed to replace the object, thus we need - # to replace our previous value. - value = event.object - super(Object, self).set(object, value) - - -@implementer(IBeforeObjectAssignedEvent) -class BeforeObjectAssignedEvent(object): - """An object is going to be assigned to an attribute on another object.""" - - def __init__(self, object, name, context): - self.object = object - self.name = name - self.context = context - - @implementer(IMapping) class Mapping(MinMaxLen, Iterable): """ diff --git a/src/zope/schema/_schema.py b/src/zope/schema/_schema.py index 50b3a49..b64b7b6 100644 --- a/src/zope/schema/_schema.py +++ b/src/zope/schema/_schema.py @@ -14,26 +14,24 @@ """Schema convenience functions """ -import zope.interface.verify +from zope.schema._bootstrapfields import get_validation_errors +from zope.schema._bootstrapfields import get_schema_validation_errors +from zope.schema._bootstrapfields import getFields + +__all__ = [ + 'getFieldNames', + 'getFields', + 'getFieldsInOrder', + 'getFieldNamesInOrder', + 'getValidationErrors', + 'getSchemaValidationErrors', +] def getFieldNames(schema): """Return a list of all the Field names in a schema. """ - from zope.schema.interfaces import IField - return [name for name in schema if IField.providedBy(schema[name])] - - -def getFields(schema): - """Return a dictionary containing all the Fields in a schema. - """ - from zope.schema.interfaces import IField - fields = {} - for name in schema: - attr = schema[name] - if IField.providedBy(attr): - fields[name] = attr - return fields + return list(getFields(schema).keys()) def getFieldsInOrder(schema, _field_key=lambda x: x[1].order): @@ -48,44 +46,43 @@ def getFieldNamesInOrder(schema): return [name for name, field in getFieldsInOrder(schema)] -def getValidationErrors(schema, object): - """Return a list of all validation errors. +def getValidationErrors(schema, value): + """ + Validate that *value* conforms to the schema interface *schema*. + + This includes checking for any schema validation errors (using + `getSchemaValidationErrors`). If that succeeds, then we proceed to + check for any declared invariants. + + Note that this does not include a check to see if the *value* + actually provides the given *schema*. + + :return: A sequence of (name, `zope.interface.Invalid`) tuples, + where *name* is None if the error was from an invariant. + If the sequence is empty, there were no errors. + """ + schema_error_dict, invariant_errors = get_validation_errors( + schema, + value, + ) + + if not schema_error_dict and not invariant_errors: + # Valid! Yay! + return [] + + return list(schema_error_dict.items()) + [(None, e) for e in invariant_errors] + + +def getSchemaValidationErrors(schema, value): + """ + Validate that *value* conforms to the schema interface *schema*. + + All :class:`zope.schema.interfaces.IField` members of the *schema* + are validated after being bound to *value*. (Note that we do not check for + arbitrary :class:`zope.interface.Attribute` members being present.) + + :return: A sequence of (name, `ValidationError`) tuples. A non-empty + sequence indicates validation failed. """ - errors = getSchemaValidationErrors(schema, object) - if errors: - return errors - - # Only validate invariants if there were no previous errors. Previous - # errors could be missing attributes which would most likely make an - # invariant raise an AttributeError. - invariant_errors = [] - try: - schema.validateInvariants(object, invariant_errors) - except zope.interface.exceptions.Invalid: - # Just collect errors - pass - errors = [(None, e) for e in invariant_errors] - return errors - - -def getSchemaValidationErrors(schema, object): - errors = [] - for name in schema.names(all=True): - if zope.interface.interfaces.IMethod.providedBy(schema[name]): - continue - attribute = schema[name] - if not zope.schema.interfaces.IField.providedBy(attribute): - continue - try: - value = getattr(object, name) - except AttributeError as error: - # property for the given name is not implemented - error = zope.schema.interfaces.SchemaNotFullyImplemented(error) - error = error.with_field_and_value(attribute, None) - errors.append((name, error)) - else: - try: - attribute.bind(object).validate(value) - except zope.schema.ValidationError as e: - errors.append((name, e)) - return errors + items = get_schema_validation_errors(schema, value).items() + return items if isinstance(items, list) else list(items) diff --git a/src/zope/schema/interfaces.py b/src/zope/schema/interfaces.py index 88988c4..26ddf5a 100644 --- a/src/zope/schema/interfaces.py +++ b/src/zope/schema/interfaces.py @@ -15,76 +15,151 @@ """ __docformat__ = "reStructuredText" -from zope.interface import Interface, Attribute +from zope.interface import Attribute +from zope.interface import Interface from zope.interface.common.mapping import IEnumerableMapping +from zope.interface.interfaces import IInterface -# Import from _bootstrapinterfaces only because other packages will expect -# to find these interfaces here. -from zope.schema._bootstrapfields import Field -from zope.schema._bootstrapfields import Text -from zope.schema._bootstrapfields import TextLine from zope.schema._bootstrapfields import Bool -from zope.schema._bootstrapfields import Number from zope.schema._bootstrapfields import Complex +from zope.schema._bootstrapfields import Field +from zope.schema._bootstrapfields import Int +from zope.schema._bootstrapfields import Integral +from zope.schema._bootstrapfields import Number +from zope.schema._bootstrapfields import Object from zope.schema._bootstrapfields import Rational from zope.schema._bootstrapfields import Real -from zope.schema._bootstrapfields import Integral -from zope.schema._bootstrapfields import Int -from zope.schema._bootstrapinterfaces import StopValidation -from zope.schema._bootstrapinterfaces import ValidationError -from zope.schema._bootstrapinterfaces import IFromUnicode -from zope.schema._bootstrapinterfaces import RequiredMissing -from zope.schema._bootstrapinterfaces import WrongType +from zope.schema._bootstrapfields import Text +from zope.schema._bootstrapfields import TextLine + +# Import from _bootstrapinterfaces only because other packages will expect +# to find these interfaces here. from zope.schema._bootstrapinterfaces import ConstraintNotSatisfied +from zope.schema._bootstrapinterfaces import IBeforeObjectAssignedEvent +from zope.schema._bootstrapinterfaces import IContextAwareDefaultFactory +from zope.schema._bootstrapinterfaces import IFromUnicode +from zope.schema._bootstrapinterfaces import IValidatable +from zope.schema._bootstrapinterfaces import InvalidValue from zope.schema._bootstrapinterfaces import NotAContainer from zope.schema._bootstrapinterfaces import NotAnIterator -from zope.schema._bootstrapinterfaces import TooSmall +from zope.schema._bootstrapinterfaces import RequiredMissing +from zope.schema._bootstrapinterfaces import SchemaNotCorrectlyImplemented +from zope.schema._bootstrapinterfaces import SchemaNotFullyImplemented +from zope.schema._bootstrapinterfaces import SchemaNotProvided +from zope.schema._bootstrapinterfaces import StopValidation from zope.schema._bootstrapinterfaces import TooBig from zope.schema._bootstrapinterfaces import TooLong from zope.schema._bootstrapinterfaces import TooShort -from zope.schema._bootstrapinterfaces import InvalidValue -from zope.schema._bootstrapinterfaces import IContextAwareDefaultFactory +from zope.schema._bootstrapinterfaces import TooSmall +from zope.schema._bootstrapinterfaces import ValidationError +from zope.schema._bootstrapinterfaces import WrongContainedType +from zope.schema._bootstrapinterfaces import WrongType from zope.schema._compat import PY3 from zope.schema._messageid import _ - -# pep 8 friendlyness -StopValidation, ValidationError, IFromUnicode, RequiredMissing, WrongType -ConstraintNotSatisfied, NotAContainer, NotAnIterator -TooSmall, TooBig, TooLong, TooShort, InvalidValue, IContextAwareDefaultFactory - - -class WrongContainedType(ValidationError): - __doc__ = _("""Wrong contained type""") - - -class SchemaNotCorrectlyImplemented(WrongContainedType): - __doc__ = _("""An object failed schema or invariant validation.""") - - #: A dictionary mapping failed attribute names of the - #: *value* to the underlying exception - schema_errors = None - - #: A list of exceptions from validating the invariants - #: of the schema. - invariant_errors = () +__all__ = [ + # Exceptions + 'ConstraintNotSatisfied', + 'InvalidDottedName', + 'InvalidId', + 'InvalidURI', + 'InvalidValue', + 'NotAContainer', + 'NotAnIterator', + 'NotUnique', + 'RequiredMissing', + 'SchemaNotCorrectlyImplemented', + 'SchemaNotFullyImplemented', + 'SchemaNotProvided', + 'StopValidation', + 'TooBig', + 'TooLong', + 'TooShort', + 'TooSmall', + 'Unbound', + 'ValidationError', + 'WrongContainedType', + 'WrongType', + + # Interfaces + 'IASCII', + 'IASCIILine', + 'IAbstractBag', + 'IAbstractSet', + 'IBaseVocabulary', + 'IBeforeObjectAssignedEvent', + 'IBool', + 'IBytes', + 'IBytesLine', + 'IChoice', + 'ICollection', + 'IComplex', + 'IContainer', + 'IContextAwareDefaultFactory', + 'IContextSourceBinder', + 'IDate', + 'IDatetime', + 'IDecimal', + 'IDict', + 'IDottedName', + 'IField', + 'IFieldEvent', + 'IFieldUpdatedEvent', + 'IFloat', + 'IFromUnicode', + 'IFrozenSet', + 'IId', + 'IInt', + 'IIntegral', + 'IInterfaceField', + 'IIterable', + 'IIterableSource', + 'IIterableVocabulary', + 'ILen', + 'IList', + 'IMapping', + 'IMinMax', + 'IMinMaxLen', + 'IMutableMapping', + 'IMutableSequence', + 'INativeString', + 'INativeStringLine', + 'INumber', + 'IObject', + 'IOrderable', + 'IPassword', + 'IRational', + 'IReal', + 'ISequence', + 'ISet', + 'ISource', + 'ISourceQueriables', + 'ISourceText', + 'ITerm', + 'IText', + 'ITextLine', + 'ITime', + 'ITimedelta', + 'ITitledTokenizedTerm', + 'ITokenizedTerm', + 'ITreeVocabulary', + 'ITuple', + 'IURI', + 'IUnorderedCollection', + 'IVocabulary', + 'IVocabularyFactory', + 'IVocabularyRegistry', + 'IVocabularyTokenized', +] class NotUnique(ValidationError): __doc__ = _("""One or more entries of sequence are not unique.""") -class SchemaNotFullyImplemented(ValidationError): - __doc__ = _("""Schema not fully implemented""") - - -class SchemaNotProvided(ValidationError): - __doc__ = _("""Schema not provided""") - - class InvalidURI(ValidationError): __doc__ = _("""The specified URI is not valid.""") @@ -101,7 +176,7 @@ class Unbound(Exception): __doc__ = _("""The field is not bound.""") -class IField(Interface): +class IField(IValidatable): """Basic Schema Field Interface. Fields are used for Interface specifications. They at least provide @@ -639,14 +714,14 @@ class IChoice(IField): # Abstract - class ICollection(IMinMaxLen, IIterable, IContainer): """Abstract interface containing a collection value. The Value must be iterable and may have a min_length/max_length. """ - value_type = Field( + value_type = Object( + IField, title=_("Value Type"), description=_("Field value items must conform to the given type, " "expressed via a Field.")) @@ -677,14 +752,14 @@ class IUnorderedCollection(ICollection): class IAbstractSet(IUnorderedCollection): """An unordered collection of unique values.""" - unique = Attribute("This ICollection interface attribute must be True") + unique = Bool(description="This ICollection interface attribute must be True") class IAbstractBag(IUnorderedCollection): """An unordered collection of values, with no limitations on whether members are unique""" - unique = Attribute("This ICollection interface attribute must be False") + unique = Bool(description="This ICollection interface attribute must be False") # Concrete @@ -720,34 +795,19 @@ class IObject(IField): Add the *validate_invariants* attribute. """ - schema = Attribute( - "schema", - _("The Interface that defines the Fields comprising the Object.") + schema = Object( + IInterface, + description=_("The Interface that defines the Fields comprising the Object.") ) - validate_invariants = Attribute( - "validate_invariants", - _("A boolean that says whether ``schema.validateInvariants`` " - "is called from ``self.validate()``. The default is true.") + validate_invariants = Bool( + title="validate_invariants", + description=_("A boolean that says whether ``schema.validateInvariants`` " + "is called from ``self.validate()``. The default is true."), + default=True, ) -class IBeforeObjectAssignedEvent(Interface): - """An object is going to be assigned to an attribute on another object. - - Subscribers to this event can change the object on this event to change - what object is going to be assigned. This is useful, e.g. for wrapping - or replacing objects before they get assigned to conform to application - policy. - """ - - object = Attribute("The object that is going to be assigned.") - - name = Attribute("The name of the attribute under which the object " - "will be assigned.") - - context = Attribute("The context object where the object will be " - "assigned to.") class IMapping(IMinMaxLen, IIterable, IContainer): """ @@ -757,15 +817,15 @@ class IMapping(IMinMaxLen, IIterable, IContainer): of restrictions for keys and values contained in the dict. """ - key_type = Attribute( - "key_type", - _("Field keys must conform to the given type, expressed via a Field.") + key_type = Object( + IField, + description=_("Field keys must conform to the given type, expressed via a Field.") ) - value_type = Attribute( - "value_type", - _("Field values must conform to the given type, expressed " - "via a Field.") + value_type = Object( + IField, + description=_("Field values must conform to the given type, expressed " + "via a Field.") ) @@ -973,7 +1033,9 @@ class IVocabularyFactory(Interface): class IFieldEvent(Interface): - field = Attribute("The field that has been changed") + field = Object( + IField, + description="The field that has been changed") object = Attribute("The object containing the field")
`getSchemaValidationErrors` is freakily similar to `_validate_fields` used by `Object` There are two differences: - `_validate_fields` correctly handles cycles; `gSVE` does not. - `_validate_fields` returns a dictionary (now); `gSVE` returns a list of tuples. On first blush, it appears that `gSVE` binds the field to the top-level object being validated, while `_validate_fields` does not. This is true, prior to #53. With that PR, they are bound to the same object. It seems like we could and should share this code. In addition, `getValidationErrors` is very similar to the internals of `Object._validate`. There may be some code sharing we can do there as well.
zopefoundation/zope.schema
diff --git a/src/zope/schema/tests/test__bootstrapfields.py b/src/zope/schema/tests/test__bootstrapfields.py index 419378a..733d5c9 100644 --- a/src/zope/schema/tests/test__bootstrapfields.py +++ b/src/zope/schema/tests/test__bootstrapfields.py @@ -14,7 +14,7 @@ import doctest import unittest -# pylint:disable=protected-access +# pylint:disable=protected-access,inherit-non-class,blacklisted-name class EqualityTestsMixin(object): @@ -991,6 +991,496 @@ class IntTests(IntegralTests): self.assertEqual(txt._type, integer_types) +class ObjectTests(EqualityTestsMixin, + unittest.TestCase): + + def setUp(self): + from zope.event import subscribers + self._before = subscribers[:] + + def tearDown(self): + from zope.event import subscribers + subscribers[:] = self._before + + def _getTargetClass(self): + from zope.schema._field import Object + return Object + + def _getTargetInterface(self): + from zope.schema.interfaces import IObject + return IObject + + def _makeOneFromClass(self, cls, schema=None, *args, **kw): + if schema is None: + schema = self._makeSchema() + return super(ObjectTests, self)._makeOneFromClass(cls, schema, *args, **kw) + + def _makeSchema(self, **kw): + from zope.interface import Interface + from zope.interface.interface import InterfaceClass + return InterfaceClass('ISchema', (Interface,), kw) + + def _getErrors(self, f, *args, **kw): + from zope.schema.interfaces import WrongContainedType + with self.assertRaises(WrongContainedType) as e: + f(*args, **kw) + return e.exception.args[0] + + def _makeCycles(self): + from zope.interface import Interface + from zope.interface import implementer + from zope.schema import Object + from zope.schema import List + from zope.schema._messageid import _ + + class IUnit(Interface): + """A schema that participate to a cycle""" + boss = Object( + schema=Interface, + title=_("Boss"), + description=_("Boss description"), + required=False, + ) + members = List( + value_type=Object(schema=Interface), + title=_("Member List"), + description=_("Member list description"), + required=False, + ) + + class IPerson(Interface): + """A schema that participate to a cycle""" + unit = Object( + schema=IUnit, + title=_("Unit"), + description=_("Unit description"), + required=False, + ) + + IUnit['boss'].schema = IPerson + IUnit['members'].value_type.schema = IPerson + + @implementer(IUnit) + class Unit(object): + def __init__(self, person, person_list): + self.boss = person + self.members = person_list + + @implementer(IPerson) + class Person(object): + def __init__(self, unit): + self.unit = unit + + return IUnit, Person, Unit + + def test_class_conforms_to_IObject(self): + from zope.interface.verify import verifyClass + from zope.schema.interfaces import IObject + verifyClass(IObject, self._getTargetClass()) + + def test_instance_conforms_to_IObject(self): + from zope.interface.verify import verifyObject + from zope.schema.interfaces import IObject + verifyObject(IObject, self._makeOne()) + + def test_ctor_w_bad_schema(self): + from zope.schema.interfaces import WrongType + self.assertRaises(WrongType, self._makeOne, object()) + + def test_validate_not_required(self): + schema = self._makeSchema() + objf = self._makeOne(schema, required=False) + objf.validate(None) # doesn't raise + + def test_validate_required(self): + from zope.schema.interfaces import RequiredMissing + field = self._makeOne(required=True) + self.assertRaises(RequiredMissing, field.validate, None) + + def test__validate_w_empty_schema(self): + from zope.interface import Interface + objf = self._makeOne(Interface) + objf.validate(object()) # doesn't raise + + def test__validate_w_value_not_providing_schema(self): + from zope.schema.interfaces import SchemaNotProvided + from zope.schema._bootstrapfields import Text + schema = self._makeSchema(foo=Text(), bar=Text()) + objf = self._makeOne(schema) + bad_value = object() + with self.assertRaises(SchemaNotProvided) as exc: + objf.validate(bad_value) + + not_provided = exc.exception + self.assertIs(not_provided.field, objf) + self.assertIs(not_provided.value, bad_value) + self.assertEqual(not_provided.args, (schema, bad_value), ) + + def test__validate_w_value_providing_schema_but_missing_fields(self): + from zope.interface import implementer + from zope.schema.interfaces import SchemaNotFullyImplemented + from zope.schema.interfaces import SchemaNotCorrectlyImplemented + from zope.schema._bootstrapfields import Text + schema = self._makeSchema(foo=Text(), bar=Text()) + + @implementer(schema) + class Broken(object): + pass + + objf = self._makeOne(schema) + broken = Broken() + with self.assertRaises(SchemaNotCorrectlyImplemented) as exc: + objf.validate(broken) + + wct = exc.exception + self.assertIs(wct.field, objf) + self.assertIs(wct.value, broken) + self.assertEqual(wct.invariant_errors, []) + self.assertEqual( + sorted(wct.schema_errors), + ['bar', 'foo'] + ) + for name in ('foo', 'bar'): + error = wct.schema_errors[name] + self.assertIsInstance(error, + SchemaNotFullyImplemented) + self.assertEqual(schema[name], error.field) + self.assertIsNone(error.value) + + # The legacy arg[0] errors list + errors = self._getErrors(objf.validate, Broken()) + self.assertEqual(len(errors), 2) + errors = sorted(errors, + key=lambda x: (type(x).__name__, str(x.args[0]))) + err = errors[0] + self.assertIsInstance(err, SchemaNotFullyImplemented) + nested = err.args[0] + self.assertIsInstance(nested, AttributeError) + self.assertIn("'bar'", str(nested)) + err = errors[1] + self.assertIsInstance(err, SchemaNotFullyImplemented) + nested = err.args[0] + self.assertIsInstance(nested, AttributeError) + self.assertIn("'foo'", str(nested)) + + def test__validate_w_value_providing_schema_but_invalid_fields(self): + from zope.interface import implementer + from zope.schema.interfaces import SchemaNotCorrectlyImplemented + from zope.schema.interfaces import RequiredMissing + from zope.schema.interfaces import WrongType + from zope.schema._bootstrapfields import Text + from zope.schema._compat import text_type + schema = self._makeSchema(foo=Text(), bar=Text()) + + @implementer(schema) + class Broken(object): + foo = None + bar = 1 + + objf = self._makeOne(schema) + broken = Broken() + with self.assertRaises(SchemaNotCorrectlyImplemented) as exc: + objf.validate(broken) + + wct = exc.exception + self.assertIs(wct.field, objf) + self.assertIs(wct.value, broken) + self.assertEqual(wct.invariant_errors, []) + self.assertEqual( + sorted(wct.schema_errors), + ['bar', 'foo'] + ) + self.assertIsInstance(wct.schema_errors['foo'], RequiredMissing) + self.assertIsInstance(wct.schema_errors['bar'], WrongType) + + # The legacy arg[0] errors list + errors = self._getErrors(objf.validate, Broken()) + self.assertEqual(len(errors), 2) + errors = sorted(errors, key=lambda x: type(x).__name__) + err = errors[0] + self.assertIsInstance(err, RequiredMissing) + self.assertEqual(err.args, ('foo',)) + err = errors[1] + self.assertIsInstance(err, WrongType) + self.assertEqual(err.args, (1, text_type, 'bar')) + + def test__validate_w_value_providing_schema(self): + from zope.interface import implementer + from zope.schema._bootstrapfields import Text + from zope.schema._field import Choice + + schema = self._makeSchema( + foo=Text(), + bar=Text(), + baz=Choice(values=[1, 2, 3]), + ) + + @implementer(schema) + class OK(object): + foo = u'Foo' + bar = u'Bar' + baz = 2 + objf = self._makeOne(schema) + objf.validate(OK()) # doesn't raise + + def test_validate_w_cycles(self): + IUnit, Person, Unit = self._makeCycles() + field = self._makeOne(schema=IUnit) + person1 = Person(None) + person2 = Person(None) + unit = Unit(person1, [person1, person2]) + person1.unit = unit + person2.unit = unit + field.validate(unit) # doesn't raise + + def test_validate_w_cycles_object_not_valid(self): + from zope.schema.interfaces import WrongContainedType + IUnit, Person, Unit = self._makeCycles() + field = self._makeOne(schema=IUnit) + person1 = Person(None) + person2 = Person(None) + person3 = Person(object()) + unit = Unit(person3, [person1, person2]) + person1.unit = unit + person2.unit = unit + self.assertRaises(WrongContainedType, field.validate, unit) + + def test_validate_w_cycles_collection_not_valid(self): + from zope.schema.interfaces import WrongContainedType + IUnit, Person, Unit = self._makeCycles() + field = self._makeOne(schema=IUnit) + person1 = Person(None) + person2 = Person(None) + person3 = Person(object()) + unit = Unit(person1, [person2, person3]) + person1.unit = unit + person2.unit = unit + self.assertRaises(WrongContainedType, field.validate, unit) + + def test_set_emits_IBOAE(self): + from zope.event import subscribers + from zope.interface import implementer + from zope.schema.interfaces import IBeforeObjectAssignedEvent + from zope.schema._bootstrapfields import Text + from zope.schema._field import Choice + + schema = self._makeSchema( + foo=Text(), + bar=Text(), + baz=Choice(values=[1, 2, 3]), + ) + + @implementer(schema) + class OK(object): + foo = u'Foo' + bar = u'Bar' + baz = 2 + log = [] + subscribers.append(log.append) + objf = self._makeOne(schema, __name__='field') + inst = DummyInst() + value = OK() + objf.set(inst, value) + self.assertIs(inst.field, value) + self.assertEqual(len(log), 5) + self.assertEqual(IBeforeObjectAssignedEvent.providedBy(log[-1]), True) + self.assertEqual(log[-1].object, value) + self.assertEqual(log[-1].name, 'field') + self.assertEqual(log[-1].context, inst) + + def test_set_allows_IBOAE_subscr_to_replace_value(self): + from zope.event import subscribers + from zope.interface import implementer + from zope.schema._bootstrapfields import Text + from zope.schema._field import Choice + + schema = self._makeSchema( + foo=Text(), + bar=Text(), + baz=Choice(values=[1, 2, 3]), + ) + + @implementer(schema) + class OK(object): + def __init__(self, foo=u'Foo', bar=u'Bar', baz=2): + self.foo = foo + self.bar = bar + self.baz = baz + ok1 = OK() + ok2 = OK(u'Foo2', u'Bar2', 3) + log = [] + subscribers.append(log.append) + + def _replace(event): + event.object = ok2 + subscribers.append(_replace) + objf = self._makeOne(schema, __name__='field') + inst = DummyInst() + self.assertEqual(len(log), 4) + objf.set(inst, ok1) + self.assertIs(inst.field, ok2) + self.assertEqual(len(log), 5) + self.assertEqual(log[-1].object, ok2) + self.assertEqual(log[-1].name, 'field') + self.assertEqual(log[-1].context, inst) + + def test_validates_invariants_by_default(self): + from zope.interface import invariant + from zope.interface import Interface + from zope.interface import implementer + from zope.interface import Invalid + from zope.schema import Text + from zope.schema import Bytes + + class ISchema(Interface): + + foo = Text() + bar = Bytes() + + @invariant + def check_foo(self): + if self.foo == u'bar': + raise Invalid("Foo is not valid") + + @invariant + def check_bar(self): + if self.bar == b'foo': + raise Invalid("Bar is not valid") + + @implementer(ISchema) + class O(object): + foo = u'' + bar = b'' + + + field = self._makeOne(ISchema) + inst = O() + + # Fine at first + field.validate(inst) + + inst.foo = u'bar' + errors = self._getErrors(field.validate, inst) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0].args[0], "Foo is not valid") + + del inst.foo + inst.bar = b'foo' + errors = self._getErrors(field.validate, inst) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0].args[0], "Bar is not valid") + + # Both invalid + inst.foo = u'bar' + errors = self._getErrors(field.validate, inst) + self.assertEqual(len(errors), 2) + errors.sort(key=lambda i: i.args) + self.assertEqual(errors[0].args[0], "Bar is not valid") + self.assertEqual(errors[1].args[0], "Foo is not valid") + + # We can specifically ask for invariants to be turned off. + field = self._makeOne(ISchema, validate_invariants=False) + field.validate(inst) + + def test_schema_defined_by_subclass(self): + from zope import interface + from zope.schema.interfaces import SchemaNotProvided + + class IValueType(interface.Interface): + "The value type schema" + + class Field(self._getTargetClass()): + schema = IValueType + + field = Field() + self.assertIs(field.schema, IValueType) + + # Non implementation is bad + self.assertRaises(SchemaNotProvided, field.validate, object()) + + # Actual implementation works + @interface.implementer(IValueType) + class ValueType(object): + "The value type" + + + field.validate(ValueType()) + + def test_bound_field_of_collection_with_choice(self): + # https://github.com/zopefoundation/zope.schema/issues/17 + from zope.interface import Interface, implementer + from zope.interface import Attribute + + from zope.schema import Choice, Object, Set + from zope.schema.fieldproperty import FieldProperty + from zope.schema.interfaces import IContextSourceBinder + from zope.schema.interfaces import WrongContainedType + from zope.schema.interfaces import SchemaNotCorrectlyImplemented + from zope.schema.vocabulary import SimpleVocabulary + + + @implementer(IContextSourceBinder) + class EnumContext(object): + def __call__(self, context): + return SimpleVocabulary.fromValues(list(context)) + + class IMultipleChoice(Interface): + choices = Set(value_type=Choice(source=EnumContext())) + # Provide a regular attribute to prove that binding doesn't + # choke. NOTE: We don't actually verify the existence of this attribute. + non_field = Attribute("An attribute") + + @implementer(IMultipleChoice) + class Choices(object): + + def __init__(self, choices): + self.choices = choices + + def __iter__(self): + # EnumContext calls this to make the vocabulary. + # Fields of the schema of the IObject are bound to the value being + # validated. + return iter(range(5)) + + class IFavorites(Interface): + fav = Object(title=u"Favorites number", schema=IMultipleChoice) + + + @implementer(IFavorites) + class Favorites(object): + fav = FieldProperty(IFavorites['fav']) + + # must not raise + good_choices = Choices({1, 3}) + IFavorites['fav'].validate(good_choices) + + # Ranges outside the context fail + bad_choices = Choices({1, 8}) + with self.assertRaises(WrongContainedType) as exc: + IFavorites['fav'].validate(bad_choices) + + e = exc.exception + self.assertEqual(IFavorites['fav'], e.field) + self.assertEqual(bad_choices, e.value) + + # Validation through field property + favorites = Favorites() + favorites.fav = good_choices + + # And validation through a field that wants IFavorites + favorites_field = Object(IFavorites) + favorites_field.validate(favorites) + + # Check the field property error + with self.assertRaises(SchemaNotCorrectlyImplemented) as exc: + favorites.fav = bad_choices + + e = exc.exception + self.assertEqual(IFavorites['fav'], e.field) + self.assertEqual(bad_choices, e.value) + self.assertEqual(['choices'], list(e.schema_errors)) + + class DummyInst(object): missing_value = object() diff --git a/src/zope/schema/tests/test__field.py b/src/zope/schema/tests/test__field.py index e959688..c176326 100644 --- a/src/zope/schema/tests/test__field.py +++ b/src/zope/schema/tests/test__field.py @@ -796,9 +796,9 @@ class ChoiceTests(EqualityTestsMixin, pass source = self._makeOne(vocabulary=Vocab()) - instance = DummyInstance() + instance = object() target = source.bind(instance) - self.assertTrue(target.vocabulary is source.vocabulary) + self.assertIs(target.vocabulary, source.vocabulary) def test_bind_w_voc_is_ICSB(self): from zope.interface import implementer @@ -818,9 +818,9 @@ class ChoiceTests(EqualityTestsMixin, source = self._makeOne(vocabulary='temp') source.vocabulary = Vocab(source) source.vocabularyName = None - instance = DummyInstance() + instance = object() target = source.bind(instance) - self.assertEqual(target.vocabulary.context, instance) + self.assertIs(target.vocabulary.context, instance) def test_bind_w_voc_is_ICSB_but_not_ISource(self): from zope.interface import implementer @@ -838,7 +838,7 @@ class ChoiceTests(EqualityTestsMixin, source = self._makeOne(vocabulary='temp') source.vocabulary = Vocab(source) source.vocabularyName = None - instance = DummyInstance() + instance = object() self.assertRaises(ValueError, source.bind, instance) def test_fromUnicode_miss(self): @@ -1630,422 +1630,6 @@ class FrozenSetTests(SetTests): return IFrozenSet -class ObjectTests(EqualityTestsMixin, - unittest.TestCase): - - def setUp(self): - from zope.event import subscribers - self._before = subscribers[:] - - def tearDown(self): - from zope.event import subscribers - subscribers[:] = self._before - - def _getTargetClass(self): - from zope.schema._field import Object - return Object - - def _getTargetInterface(self): - from zope.schema.interfaces import IObject - return IObject - - def _makeOneFromClass(self, cls, schema=None, *args, **kw): - if schema is None: - schema = self._makeSchema() - return super(ObjectTests, self)._makeOneFromClass(cls, schema, *args, **kw) - - def _makeSchema(self, **kw): - from zope.interface import Interface - from zope.interface.interface import InterfaceClass - return InterfaceClass('ISchema', (Interface,), kw) - - def _getErrors(self, f, *args, **kw): - from zope.schema.interfaces import WrongContainedType - with self.assertRaises(WrongContainedType) as e: - f(*args, **kw) - return e.exception.args[0] - - def _makeCycles(self): - from zope.interface import Interface - from zope.interface import implementer - from zope.schema import Object - from zope.schema import List - from zope.schema._messageid import _ - - class IUnit(Interface): - """A schema that participate to a cycle""" - boss = Object( - schema=Interface, - title=_("Boss"), - description=_("Boss description"), - required=False, - ) - members = List( - value_type=Object(schema=Interface), - title=_("Member List"), - description=_("Member list description"), - required=False, - ) - - class IPerson(Interface): - """A schema that participate to a cycle""" - unit = Object( - schema=IUnit, - title=_("Unit"), - description=_("Unit description"), - required=False, - ) - - IUnit['boss'].schema = IPerson - IUnit['members'].value_type.schema = IPerson - - @implementer(IUnit) - class Unit(object): - def __init__(self, person, person_list): - self.boss = person - self.members = person_list - - @implementer(IPerson) - class Person(object): - def __init__(self, unit): - self.unit = unit - - return IUnit, Person, Unit - - def test_class_conforms_to_IObject(self): - from zope.interface.verify import verifyClass - from zope.schema.interfaces import IObject - verifyClass(IObject, self._getTargetClass()) - - def test_instance_conforms_to_IObject(self): - from zope.interface.verify import verifyObject - from zope.schema.interfaces import IObject - verifyObject(IObject, self._makeOne()) - - def test_ctor_w_bad_schema(self): - from zope.schema.interfaces import WrongType - self.assertRaises(WrongType, self._makeOne, object()) - - def test_validate_not_required(self): - schema = self._makeSchema() - objf = self._makeOne(schema, required=False) - objf.validate(None) # doesn't raise - - def test_validate_required(self): - from zope.schema.interfaces import RequiredMissing - field = self._makeOne(required=True) - self.assertRaises(RequiredMissing, field.validate, None) - - def test__validate_w_empty_schema(self): - from zope.interface import Interface - objf = self._makeOne(Interface) - objf.validate(object()) # doesn't raise - - def test__validate_w_value_not_providing_schema(self): - from zope.schema.interfaces import SchemaNotProvided - from zope.schema._bootstrapfields import Text - schema = self._makeSchema(foo=Text(), bar=Text()) - objf = self._makeOne(schema) - bad_value = object() - with self.assertRaises(SchemaNotProvided) as exc: - objf.validate(bad_value) - - not_provided = exc.exception - self.assertIs(not_provided.field, objf) - self.assertIs(not_provided.value, bad_value) - self.assertEqual(not_provided.args, (schema, bad_value), ) - - def test__validate_w_value_providing_schema_but_missing_fields(self): - from zope.interface import implementer - from zope.schema.interfaces import SchemaNotFullyImplemented - from zope.schema.interfaces import SchemaNotCorrectlyImplemented - from zope.schema._bootstrapfields import Text - schema = self._makeSchema(foo=Text(), bar=Text()) - - @implementer(schema) - class Broken(object): - pass - - objf = self._makeOne(schema) - broken = Broken() - with self.assertRaises(SchemaNotCorrectlyImplemented) as exc: - objf.validate(broken) - - wct = exc.exception - self.assertIs(wct.field, objf) - self.assertIs(wct.value, broken) - self.assertEqual(wct.invariant_errors, []) - self.assertEqual( - sorted(wct.schema_errors), - ['bar', 'foo'] - ) - for name in ('foo', 'bar'): - error = wct.schema_errors[name] - self.assertIsInstance(error, - SchemaNotFullyImplemented) - self.assertEqual(schema[name], error.field) - self.assertIsNone(error.value) - - # The legacy arg[0] errors list - errors = self._getErrors(objf.validate, Broken()) - self.assertEqual(len(errors), 2) - errors = sorted(errors, - key=lambda x: (type(x).__name__, str(x.args[0]))) - err = errors[0] - self.assertIsInstance(err, SchemaNotFullyImplemented) - nested = err.args[0] - self.assertIsInstance(nested, AttributeError) - self.assertIn("'bar'", str(nested)) - err = errors[1] - self.assertIsInstance(err, SchemaNotFullyImplemented) - nested = err.args[0] - self.assertIsInstance(nested, AttributeError) - self.assertIn("'foo'", str(nested)) - - def test__validate_w_value_providing_schema_but_invalid_fields(self): - from zope.interface import implementer - from zope.schema.interfaces import SchemaNotCorrectlyImplemented - from zope.schema.interfaces import RequiredMissing - from zope.schema.interfaces import WrongType - from zope.schema._bootstrapfields import Text - from zope.schema._compat import text_type - schema = self._makeSchema(foo=Text(), bar=Text()) - - @implementer(schema) - class Broken(object): - foo = None - bar = 1 - - objf = self._makeOne(schema) - broken = Broken() - with self.assertRaises(SchemaNotCorrectlyImplemented) as exc: - objf.validate(broken) - - wct = exc.exception - self.assertIs(wct.field, objf) - self.assertIs(wct.value, broken) - self.assertEqual(wct.invariant_errors, []) - self.assertEqual( - sorted(wct.schema_errors), - ['bar', 'foo'] - ) - self.assertIsInstance(wct.schema_errors['foo'], RequiredMissing) - self.assertIsInstance(wct.schema_errors['bar'], WrongType) - - # The legacy arg[0] errors list - errors = self._getErrors(objf.validate, Broken()) - self.assertEqual(len(errors), 2) - errors = sorted(errors, key=lambda x: type(x).__name__) - err = errors[0] - self.assertIsInstance(err, RequiredMissing) - self.assertEqual(err.args, ('foo',)) - err = errors[1] - self.assertIsInstance(err, WrongType) - self.assertEqual(err.args, (1, text_type, 'bar')) - - def test__validate_w_value_providing_schema(self): - from zope.interface import implementer - from zope.schema._bootstrapfields import Text - from zope.schema._field import Choice - - schema = self._makeSchema( - foo=Text(), - bar=Text(), - baz=Choice(values=[1, 2, 3]), - ) - - @implementer(schema) - class OK(object): - foo = u'Foo' - bar = u'Bar' - baz = 2 - objf = self._makeOne(schema) - objf.validate(OK()) # doesn't raise - - def test_validate_w_cycles(self): - IUnit, Person, Unit = self._makeCycles() - field = self._makeOne(schema=IUnit) - person1 = Person(None) - person2 = Person(None) - unit = Unit(person1, [person1, person2]) - person1.unit = unit - person2.unit = unit - field.validate(unit) # doesn't raise - - def test_validate_w_cycles_object_not_valid(self): - from zope.schema.interfaces import WrongContainedType - IUnit, Person, Unit = self._makeCycles() - field = self._makeOne(schema=IUnit) - person1 = Person(None) - person2 = Person(None) - person3 = Person(DummyInstance()) - unit = Unit(person3, [person1, person2]) - person1.unit = unit - person2.unit = unit - self.assertRaises(WrongContainedType, field.validate, unit) - - def test_validate_w_cycles_collection_not_valid(self): - from zope.schema.interfaces import WrongContainedType - IUnit, Person, Unit = self._makeCycles() - field = self._makeOne(schema=IUnit) - person1 = Person(None) - person2 = Person(None) - person3 = Person(DummyInstance()) - unit = Unit(person1, [person2, person3]) - person1.unit = unit - person2.unit = unit - self.assertRaises(WrongContainedType, field.validate, unit) - - def test_set_emits_IBOAE(self): - from zope.event import subscribers - from zope.interface import implementer - from zope.schema.interfaces import IBeforeObjectAssignedEvent - from zope.schema._bootstrapfields import Text - from zope.schema._field import Choice - - schema = self._makeSchema( - foo=Text(), - bar=Text(), - baz=Choice(values=[1, 2, 3]), - ) - - @implementer(schema) - class OK(object): - foo = u'Foo' - bar = u'Bar' - baz = 2 - log = [] - subscribers.append(log.append) - objf = self._makeOne(schema, __name__='field') - inst = DummyInstance() - value = OK() - objf.set(inst, value) - self.assertEqual(inst.field is value, True) - self.assertEqual(len(log), 5) - self.assertEqual(IBeforeObjectAssignedEvent.providedBy(log[-1]), True) - self.assertEqual(log[-1].object, value) - self.assertEqual(log[-1].name, 'field') - self.assertEqual(log[-1].context, inst) - - def test_set_allows_IBOAE_subscr_to_replace_value(self): - from zope.event import subscribers - from zope.interface import implementer - from zope.schema._bootstrapfields import Text - from zope.schema._field import Choice - - schema = self._makeSchema( - foo=Text(), - bar=Text(), - baz=Choice(values=[1, 2, 3]), - ) - - @implementer(schema) - class OK(object): - def __init__(self, foo=u'Foo', bar=u'Bar', baz=2): - self.foo = foo - self.bar = bar - self.baz = baz - ok1 = OK() - ok2 = OK(u'Foo2', u'Bar2', 3) - log = [] - subscribers.append(log.append) - - def _replace(event): - event.object = ok2 - subscribers.append(_replace) - objf = self._makeOne(schema, __name__='field') - inst = DummyInstance() - self.assertEqual(len(log), 4) - objf.set(inst, ok1) - self.assertEqual(inst.field is ok2, True) - self.assertEqual(len(log), 5) - self.assertEqual(log[-1].object, ok2) - self.assertEqual(log[-1].name, 'field') - self.assertEqual(log[-1].context, inst) - - def test_validates_invariants_by_default(self): - from zope.interface import invariant - from zope.interface import Interface - from zope.interface import implementer - from zope.interface import Invalid - from zope.schema import Text - from zope.schema import Bytes - - class ISchema(Interface): - - foo = Text() - bar = Bytes() - - @invariant - def check_foo(self): - if self.foo == u'bar': - raise Invalid("Foo is not valid") - - @invariant - def check_bar(self): - if self.bar == b'foo': - raise Invalid("Bar is not valid") - - @implementer(ISchema) - class O(object): - foo = u'' - bar = b'' - - - field = self._makeOne(ISchema) - inst = O() - - # Fine at first - field.validate(inst) - - inst.foo = u'bar' - errors = self._getErrors(field.validate, inst) - self.assertEqual(len(errors), 1) - self.assertEqual(errors[0].args[0], "Foo is not valid") - - del inst.foo - inst.bar = b'foo' - errors = self._getErrors(field.validate, inst) - self.assertEqual(len(errors), 1) - self.assertEqual(errors[0].args[0], "Bar is not valid") - - # Both invalid - inst.foo = u'bar' - errors = self._getErrors(field.validate, inst) - self.assertEqual(len(errors), 2) - errors.sort(key=lambda i: i.args) - self.assertEqual(errors[0].args[0], "Bar is not valid") - self.assertEqual(errors[1].args[0], "Foo is not valid") - - # We can specifically ask for invariants to be turned off. - field = self._makeOne(ISchema, validate_invariants=False) - field.validate(inst) - - def test_schema_defined_by_subclass(self): - from zope import interface - from zope.schema.interfaces import SchemaNotProvided - - class IValueType(interface.Interface): - "The value type schema" - - class Field(self._getTargetClass()): - schema = IValueType - - field = Field() - self.assertIs(field.schema, IValueType) - - # Non implementation is bad - self.assertRaises(SchemaNotProvided, field.validate, object()) - - # Actual implementation works - @interface.implementer(IValueType) - class ValueType(object): - "The value type" - - - field.validate(ValueType()) - - class MappingTests(EqualityTestsMixin, unittest.TestCase): @@ -2147,7 +1731,7 @@ class MappingTests(EqualityTestsMixin, def test_bind_binds_key_and_value_types(self): from zope.schema import Int field = self._makeOne(key_type=Int(), value_type=Int()) - context = DummyInstance() + context = object() field2 = field.bind(context) self.assertEqual(field2.key_type.context, context) self.assertEqual(field2.value_type.context, context) @@ -2221,10 +1805,6 @@ class DictTests(MutableMappingTests): super(DictTests, self).test_mutable_mapping() -class DummyInstance(object): - pass - - def _makeSampleVocabulary(): from zope.interface import implementer from zope.schema.interfaces import IVocabulary
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 8 }
4.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work zope.event==5.0 zope.exceptions==5.1 zope.interface==6.4.post2 -e git+https://github.com/zopefoundation/zope.schema.git@a409cbf2f88a19f9d4fc1bb0b8510634044bcc8c#egg=zope.schema zope.testing==5.0.1 zope.testrunner==6.5
name: zope.schema channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - zope-event==5.0 - zope-exceptions==5.1 - zope-interface==6.4.post2 - zope-testing==5.0.1 - zope-testrunner==6.5 prefix: /opt/conda/envs/zope.schema
[ "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_bound_field_of_collection_with_choice" ]
[ "src/zope/schema/tests/test__bootstrapfields.py::test_suite" ]
[ "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___get__", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___not_missing_w_check", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___not_missing_wo_check", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___w_missing_wo_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___w_defaultFactory_not_ICAF_no_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___w_defaultFactory_w_ICAF_w_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___wo_defaultFactory_hit", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___wo_defaultFactory_miss", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test__get___wo_defaultFactory_in_dict", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_bind", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_order_madness", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_w_both_title_and_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_w_title_wo_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_wo_title_w_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_constraint_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_defaultFactory", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_defaultFactory_returning_missing_value", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_required_readonly_missingValue", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_get_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_get_miss", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_miss_no_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_miss_w_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_set_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_set_readonly", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_fails", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_raises_StopValidation", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_missing_and_required", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_missing_not_required", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_wrong_type", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_collection_but_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_not_collection_but_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_not_collection_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_w_collections", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_collection_but_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_not_collection_but_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_not_collection_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_w_collections", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_default_too_large", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_default_too_small", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_validate_too_long", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_validate_too_short", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_w_invalid_default", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_wrong_types", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_constraint", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_wrong_types", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_constraint", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_set_normal", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_set_unchanged", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_unchanged_already_set", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_unchanged_not_already_set", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test__validate_w_int", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_set_w_int", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_ctor_real_min_max", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_empty_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_not_providing_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_providing_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_providing_schema_but_invalid_fields", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_providing_schema_but_missing_fields", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_class_conforms_to_IObject", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_ctor_w_bad_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_instance_conforms_to_IObject", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_set_allows_IBOAE_subscr_to_replace_value", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_set_emits_IBOAE", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_w_cycles", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_w_cycles_collection_not_valid", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_w_cycles_object_not_valid", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validates_invariants_by_default", "src/zope/schema/tests/test__field.py::BytesTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::BytesTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::BytesTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::BytesTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::BytesTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::BytesTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::BytesTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::BytesTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::BytesTests::test_is_hashable", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_required", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_w_invalid_default", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ASCIITests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ASCIITests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ASCIITests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_empty", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_non_empty_hit", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_non_empty_miss", "src/zope/schema/tests/test__field.py::ASCIITests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIITests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ASCIITests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ASCIITests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIITests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ASCIITests::test_is_hashable", "src/zope/schema/tests/test__field.py::ASCIITests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::BytesLineTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::BytesLineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::BytesLineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::BytesLineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesLineTests::test_constraint", "src/zope/schema/tests/test__field.py::BytesLineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::BytesLineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::BytesLineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesLineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::BytesLineTests::test_is_hashable", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_required", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ASCIILineTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ASCIILineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ASCIILineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_constraint", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_is_hashable", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_required", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::FloatTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::FloatTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::FloatTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::FloatTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::FloatTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::FloatTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::FloatTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::FloatTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::FloatTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::FloatTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::FloatTests::test_is_hashable", "src/zope/schema/tests/test__field.py::FloatTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_min", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_required", "src/zope/schema/tests/test__field.py::DecimalTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DecimalTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DecimalTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DecimalTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DecimalTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DecimalTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::DecimalTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::DecimalTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DecimalTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DecimalTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DecimalTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DecimalTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_min", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DatetimeTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DatetimeTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DatetimeTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DatetimeTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DatetimeTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DatetimeTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DatetimeTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DatetimeTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DatetimeTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_w_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_w_min", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_w_min_and_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DateTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DateTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DateTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DateTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DateTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DateTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DateTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DateTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DateTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DateTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DateTests::test_validate_required", "src/zope/schema/tests/test__field.py::DateTests::test_validate_w_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_w_min", "src/zope/schema/tests/test__field.py::DateTests::test_validate_w_min_and_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::TimedeltaTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::TimedeltaTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::TimedeltaTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_is_hashable", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_min", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_required", "src/zope/schema/tests/test__field.py::TimeTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::TimeTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::TimeTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::TimeTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimeTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::TimeTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::TimeTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimeTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::TimeTests::test_is_hashable", "src/zope/schema/tests/test__field.py::TimeTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_min", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_required", "src/zope/schema/tests/test__field.py::ChoiceTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ChoiceTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ChoiceTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_int", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_mixed", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_source_is_ICSB_bound", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_source_is_ICSB_unbound", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_string", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_tuple", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary_invalid", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary_passes_context", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary_raises_LookupError", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_preconstructed_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_is_ICSB", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_is_ICSB_but_not_ISource", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_not_ICSB", "src/zope/schema/tests/test__field.py::ChoiceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_both_vocabulary_and_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_both_vocabulary_and_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_invalid_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_invalid_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_named_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_preconstructed_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_unicode_non_ascii_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_wo_values_vocabulary_or_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ChoiceTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::ChoiceTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::ChoiceTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ChoiceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ChoiceTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ChoiceTests::test_is_hashable", "src/zope/schema/tests/test__field.py::URITests::test___eq___different_type", "src/zope/schema/tests/test__field.py::URITests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::URITests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::URITests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::URITests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::URITests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::URITests::test_fromUnicode_ok", "src/zope/schema/tests/test__field.py::URITests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::URITests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::URITests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::URITests::test_is_hashable", "src/zope/schema/tests/test__field.py::URITests::test_validate_not_a_uri", "src/zope/schema/tests/test__field.py::URITests::test_validate_not_required", "src/zope/schema/tests/test__field.py::URITests::test_validate_required", "src/zope/schema/tests/test__field.py::URITests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DottedNameTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DottedNameTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DottedNameTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DottedNameTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_max_dots_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_max_dots_valid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_min_dots_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_min_dots_valid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DottedNameTests::test_fromUnicode_dotted_name_ok", "src/zope/schema/tests/test__field.py::DottedNameTests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DottedNameTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DottedNameTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DottedNameTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_not_a_dotted_name", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_required", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_w_max_dots", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_w_min_dots", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::IdTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::IdTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::IdTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::IdTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::IdTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_dotted_name_ok", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_url_ok", "src/zope/schema/tests/test__field.py::IdTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::IdTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::IdTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::IdTests::test_is_hashable", "src/zope/schema/tests/test__field.py::IdTests::test_validate_not_a_uri", "src/zope/schema/tests/test__field.py::IdTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::IdTests::test_validate_required", "src/zope/schema/tests/test__field.py::IdTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_is_hashable", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_required", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::CollectionTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::CollectionTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::CollectionTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::CollectionTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::CollectionTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::CollectionTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::CollectionTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::CollectionTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::CollectionTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::CollectionTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::CollectionTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::CollectionTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::CollectionTests::test_is_hashable", "src/zope/schema/tests/test__field.py::CollectionTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_required", "src/zope/schema/tests/test__field.py::SequenceTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::SequenceTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::SequenceTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::SequenceTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::SequenceTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::SequenceTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::SequenceTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::SequenceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::SequenceTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::SequenceTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::SequenceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::SequenceTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::SequenceTests::test_is_hashable", "src/zope/schema/tests/test__field.py::SequenceTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::SequenceTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::SequenceTests::test_sequence", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_required", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::TupleTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::TupleTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::TupleTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::TupleTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::TupleTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::TupleTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::TupleTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::TupleTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::TupleTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::TupleTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::TupleTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TupleTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::TupleTests::test_is_hashable", "src/zope/schema/tests/test__field.py::TupleTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::TupleTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::TupleTests::test_sequence", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_required", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_is_hashable", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_sequence", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_required", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ListTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ListTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ListTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ListTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::ListTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::ListTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::ListTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::ListTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::ListTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ListTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ListTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ListTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ListTests::test_is_hashable", "src/zope/schema/tests/test__field.py::ListTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::ListTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::ListTests::test_sequence", "src/zope/schema/tests/test__field.py::ListTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ListTests::test_validate_required", "src/zope/schema/tests/test__field.py::ListTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::SetTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::SetTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::SetTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::SetTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::SetTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::SetTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::SetTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::SetTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_disallows_unique", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::SetTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::SetTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::SetTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::SetTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::SetTests::test_is_hashable", "src/zope/schema/tests/test__field.py::SetTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::SetTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::SetTests::test_validate_required", "src/zope/schema/tests/test__field.py::SetTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::FrozenSetTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::FrozenSetTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::FrozenSetTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::FrozenSetTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_disallows_unique", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_is_hashable", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_required", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::MappingTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::MappingTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::MappingTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::MappingTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::MappingTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MappingTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::MappingTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::MappingTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::MappingTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::MappingTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MappingTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::MappingTests::test_is_hashable", "src/zope/schema/tests/test__field.py::MappingTests::test_mapping", "src/zope/schema/tests/test__field.py::MappingTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_required", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::MutableMappingTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::MutableMappingTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_is_hashable", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_mapping", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_required", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DictTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DictTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DictTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DictTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::DictTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DictTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::DictTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::DictTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DictTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DictTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DictTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DictTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DictTests::test_mapping", "src/zope/schema/tests/test__field.py::DictTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::DictTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::DictTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::DictTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DictTests::test_validate_required", "src/zope/schema/tests/test__field.py::DictTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::test_suite" ]
[]
Zope Public License 2.1
3,026
[ "src/zope/schema/_field.py", "src/zope/schema/_bootstrapfields.py", "src/zope/schema/_bootstrapinterfaces.py", "docs/validation.rst", "CHANGES.rst", "docs/api.rst", "src/zope/schema/interfaces.py", "src/zope/schema/_schema.py" ]
[ "src/zope/schema/_field.py", "src/zope/schema/_bootstrapfields.py", "src/zope/schema/_bootstrapinterfaces.py", "docs/validation.rst", "CHANGES.rst", "docs/api.rst", "src/zope/schema/interfaces.py", "src/zope/schema/_schema.py" ]
bbc__nmos-common-55
b35788d29bdcfb4b3a9cfbb3f34360641b3547b2
2018-09-05 15:36:53
b35788d29bdcfb4b3a9cfbb3f34360641b3547b2
diff --git a/CHANGELOG.md b/CHANGELOG.md index a7aaaaf..e7a86bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # NMOS Common Library Changelog +## 0.6.8 +- Resolve issue where interactions of aggregator.py with Registration API + failed to set Content-Type + ## 0.6.7 - Updated stdeb.cfg to include dependencies on mediajson and mediatimestamp diff --git a/nmoscommon/aggregator.py b/nmoscommon/aggregator.py index 249666e..359e24d 100644 --- a/nmoscommon/aggregator.py +++ b/nmoscommon/aggregator.py @@ -290,8 +290,10 @@ class Aggregator(object): if self.aggregator == "": self.aggregator = self.mdnsbridge.getHref(REGISTRATION_MDNSTYPE) + headers = None if data is not None: data = json.dumps(data) + headers = {"Content-Type": "application/json"} url = AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url for i in range(0, 3): @@ -308,9 +310,9 @@ class Aggregator(object): # majority of the time... try: if nmoscommonconfig.config.get('prefer_ipv6',False) == False: - R = requests.request(method, urljoin(self.aggregator, url), data=data, timeout=1.0) + R = requests.request(method, urljoin(self.aggregator, url), data=data, timeout=1.0, headers=headers) else: - R = requests.request(method, urljoin(self.aggregator, url), data=data, timeout=1.0, proxies={'http':''}) + R = requests.request(method, urljoin(self.aggregator, url), data=data, timeout=1.0, headers=headers, proxies={'http':''}) if R is None: # Try another aggregator self.logger.writeWarning("No response from aggregator {}".format(self.aggregator)) diff --git a/setup.py b/setup.py index da2426d..58cb34a 100644 --- a/setup.py +++ b/setup.py @@ -146,7 +146,7 @@ deps_required = [ setup(name="nmoscommon", - version="0.6.7", + version="0.6.8", description="Common components for the BBC's NMOS implementations", url='https://github.com/bbc/nmos-common', author='Peter Brightwell',
Aggregator.py fails to set Content-Type header in interactions with Registration API Noted by Tektronix. This is a breach of the spec, but our registry isn't strictly checking this as it doesn't expect to receive anything other than JSON.
bbc/nmos-common
diff --git a/tests/test_aggregator.py b/tests/test_aggregator.py index c0e4188..035b138 100644 --- a/tests/test_aggregator.py +++ b/tests/test_aggregator.py @@ -159,7 +159,7 @@ class TestMDNSUpdater(unittest.TestCase): UUT.mdns.update.assert_not_called() def test_update_mdns(self): - """A call to MDNSUpdater.update_mdns when P2P is enabled ought to call mdns.update to increment version numbers for devices. Device + """A call to MDNSUpdater.update_mdns when P2P is enabled ought to call mdns.update to increment version numbers for devices. Device version numbers should be 8-bit integers which roll over to 0 when incremented beyond the limits of 1 byte.""" mappings = {"device": "ver_dvc", "flow": "ver_flw", "source": "ver_src", "sender":"ver_snd", "receiver":"ver_rcv", "self":"ver_slf"} mdnstype = "_nmos-node._tcp" @@ -211,7 +211,7 @@ class TestAggregator(unittest.TestCase): # self.mocks['nmoscommon.aggregator.Logger'].return_value.writeDebug.side_effect = printmsg("DEBUG") # self.mocks['nmoscommon.aggregator.Logger'].return_value.writeError.side_effect = printmsg("ERROR") # self.mocks['nmoscommon.aggregator.Logger'].return_value.writeFatal.side_effect = printmsg("FATAL") - + def test_init(self): """Test a call to Aggregator()""" self.mocks['gevent.spawn'].side_effect = lambda f : mock.MagicMock(thread_function=f) @@ -299,7 +299,7 @@ class TestAggregator(unittest.TestCase): def test_heartbeat_registers(self): """The heartbeat thread should trigger a registration of the node if the node is not yet registered when it is run.""" a = Aggregator(mdns_updater=mock.MagicMock()) - a._registered["registered"] = False + a._registered["registered"] = False def killloop(*args, **kwargs): a._running = False @@ -879,7 +879,7 @@ class TestAggregator(unittest.TestCase): SEND_ITERATION_2 = 6 SEND_TOO_MANY_RETRIES = 7 - def assert_send_runs_correctly(self, method, url, data=None, to_point=SEND_ITERATION_0, initial_aggregator="", aggregator_urls=["http://example0.com/aggregator/", "http://example1.com/aggregator/", "http://example2.com/aggregator/"], request=None, expected_return=None, expected_exception=None, prefer_ipv6=False): + def assert_send_runs_correctly(self, method, url, data=None, headers=None, to_point=SEND_ITERATION_0, initial_aggregator="", aggregator_urls=["http://example0.com/aggregator/", "http://example1.com/aggregator/", "http://example2.com/aggregator/"], request=None, expected_return=None, expected_exception=None, prefer_ipv6=False): """This method checks that the SEND routine runs through its state machine as expected: The states are: @@ -921,23 +921,23 @@ class TestAggregator(unittest.TestCase): expected_request_calls = [] if to_point >= self.SEND_ITERATION_0: if not prefer_ipv6: - expected_request_calls.append(mock.call(method, urljoin(aggregator_urls[0], AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url), data=expected_data, timeout=1.0)) + expected_request_calls.append(mock.call(method, urljoin(aggregator_urls[0], AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url), data=expected_data, headers=headers, timeout=1.0)) else: - expected_request_calls.append(mock.call(method, urljoin(aggregator_urls[0], AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url), data=expected_data, timeout=1.0, proxies={'http':''})) + expected_request_calls.append(mock.call(method, urljoin(aggregator_urls[0], AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url), data=expected_data, headers=headers, timeout=1.0, proxies={'http':''})) if to_point > self.SEND_ITERATION_0: expected_gethref_calls.append(mock.call(REGISTRATION_MDNSTYPE)) if to_point >= self.SEND_ITERATION_1: if not prefer_ipv6: - expected_request_calls.append(mock.call(method, urljoin(aggregator_urls[1], AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url), data=expected_data, timeout=1.0)) + expected_request_calls.append(mock.call(method, urljoin(aggregator_urls[1], AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url), data=expected_data, headers=headers, timeout=1.0)) else: - expected_request_calls.append(mock.call(method, urljoin(aggregator_urls[1], AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url), data=expected_data, timeout=1.0, proxies={'http':''})) + expected_request_calls.append(mock.call(method, urljoin(aggregator_urls[1], AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url), data=expected_data, headers=headers, timeout=1.0, proxies={'http':''})) if to_point > self.SEND_ITERATION_1: expected_gethref_calls.append(mock.call(REGISTRATION_MDNSTYPE)) if to_point >= self.SEND_ITERATION_2: if not prefer_ipv6: - expected_request_calls.append(mock.call(method, urljoin(aggregator_urls[2], AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url), data=expected_data, timeout=1.0)) + expected_request_calls.append(mock.call(method, urljoin(aggregator_urls[2], AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url), data=expected_data, headers=headers, timeout=1.0)) else: - expected_request_calls.append(mock.call(method, urljoin(aggregator_urls[2], AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url), data=expected_data, timeout=1.0, proxies={'http':''})) + expected_request_calls.append(mock.call(method, urljoin(aggregator_urls[2], AGGREGATOR_APINAMESPACE + "/" + AGGREGATOR_APINAME + "/" + AGGREGATOR_APIVERSION + url), data=expected_data, headers=headers, timeout=1.0, proxies={'http':''})) if to_point > self.SEND_ITERATION_2: expected_gethref_calls.append(mock.call(REGISTRATION_MDNSTYPE)) @@ -984,7 +984,7 @@ class TestAggregator(unittest.TestCase): "dummy2" : [ "dummy3", "dummy4" ] } def request(*args, **kwargs): return mock.MagicMock(status_code = 204) - self.assert_send_runs_correctly("PUT", "/dummy/url", data=data, to_point=self.SEND_ITERATION_0, request=request, expected_return=None) + self.assert_send_runs_correctly("PUT", "/dummy/url", data=data, headers={"Content-Type": "application/json"}, to_point=self.SEND_ITERATION_0, request=request, expected_return=None) def test_send_get_which_returns_200_returns_content(self): """If the first attempt at sending gives a 200 success then the SEND method will return normally with a body."""
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 3 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "coverage", "mock", "nose2", "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==25.3.0 blinker==1.9.0 cachelib==0.13.0 certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work Flask==3.1.0 Flask-OAuthlib==0.9.6 Flask-Sockets==0.2.1 gevent==24.11.1 gevent-websocket==0.10.1 greenlet==3.1.1 idna==3.10 importlib_metadata==8.6.1 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work itsdangerous==2.2.0 Jinja2==3.1.6 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 MarkupSafe==3.0.2 mediajson==2.0.3 mediatimestamp==2.5.0 mock==5.2.0 netifaces==0.11.0 -e git+https://github.com/bbc/nmos-common.git@b35788d29bdcfb4b3a9cfbb3f34360641b3547b2#egg=nmoscommon nose2==0.15.1 oauthlib==2.1.0 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work Pygments==2.19.1 pytest @ file:///croot/pytest_1738938843180/work python-dateutil==2.9.0.post0 pyzmq==26.3.0 referencing==0.36.2 requests==2.32.3 requests-oauthlib==1.1.0 rpds-py==0.24.0 six==1.17.0 socketIO-client==0.7.2 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions==4.13.0 ujson==5.10.0 urllib3==2.3.0 websocket-client==1.8.0 Werkzeug==3.1.3 ws4py==0.6.0 wsaccel==0.6.7 zipp==3.21.0 zope.event==5.0 zope.interface==7.2
name: nmos-common channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==25.3.0 - blinker==1.9.0 - cachelib==0.13.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - click==8.1.8 - coverage==7.8.0 - flask==3.1.0 - flask-oauthlib==0.9.6 - flask-sockets==0.2.1 - gevent==24.11.1 - gevent-websocket==0.10.1 - greenlet==3.1.1 - idna==3.10 - importlib-metadata==8.6.1 - itsdangerous==2.2.0 - jinja2==3.1.6 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - markupsafe==3.0.2 - mediajson==2.0.3 - mediatimestamp==2.5.0 - mock==5.2.0 - netifaces==0.11.0 - nose2==0.15.1 - oauthlib==2.1.0 - pygments==2.19.1 - python-dateutil==2.9.0.post0 - pyzmq==26.3.0 - referencing==0.36.2 - requests==2.32.3 - requests-oauthlib==1.1.0 - rpds-py==0.24.0 - six==1.17.0 - socketio-client==0.7.2 - typing-extensions==4.13.0 - ujson==5.10.0 - urllib3==2.3.0 - websocket-client==1.8.0 - werkzeug==3.1.3 - ws4py==0.6.0 - wsaccel==0.6.7 - zipp==3.21.0 - zope-event==5.0 - zope-interface==7.2 prefix: /opt/conda/envs/nmos-common
[ "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_on_three_aggregators_raises", "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_then_returns_200_and_json_returns_content", "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_then_returns_200_returns_content", "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_then_returns_201_returns_content", "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_then_returns_204_returns_nothing", "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_then_returns_400_raises_exception", "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_twice_then_returns_200_and_json_returns_content", "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_twice_then_returns_200_returns_content", "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_twice_then_returns_201_returns_content", "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_twice_then_returns_204_returns_nothing", "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_twice_then_returns_400_raises_exception", "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_with_only_one_aggregator_fails_at_second_checkpoint", "tests/test_aggregator.py::TestAggregator::test_send_get_which_fails_with_only_two_aggregators_fails_at_third_checkpoint", "tests/test_aggregator.py::TestAggregator::test_send_get_which_raises_with_only_one_aggregator_fails_at_second_checkpoint", "tests/test_aggregator.py::TestAggregator::test_send_get_which_returns_200_and_json_returns_json", "tests/test_aggregator.py::TestAggregator::test_send_get_which_returns_200_returns_content", "tests/test_aggregator.py::TestAggregator::test_send_get_which_returns_201_returns_content", "tests/test_aggregator.py::TestAggregator::test_send_get_which_returns_204_returns_nothing", "tests/test_aggregator.py::TestAggregator::test_send_get_which_returns_400_raises_exception", "tests/test_aggregator.py::TestAggregator::test_send_get_which_returns_500_with_only_one_aggregator_fails_at_second_checkpoint", "tests/test_aggregator.py::TestAggregator::test_send_over_ipv6_get_which_returns_200_returns_content", "tests/test_aggregator.py::TestAggregator::test_send_put_which_returns_204_returns_nothing" ]
[]
[ "tests/test_aggregator.py::TestMDNSUpdater::test_P2P_disable_resets_enable_count", "tests/test_aggregator.py::TestMDNSUpdater::test_P2P_disable_when_enabled", "tests/test_aggregator.py::TestMDNSUpdater::test_inc_P2P_enable_count", "tests/test_aggregator.py::TestMDNSUpdater::test_init", "tests/test_aggregator.py::TestMDNSUpdater::test_p2p_enable", "tests/test_aggregator.py::TestMDNSUpdater::test_update_mdns", "tests/test_aggregator.py::TestMDNSUpdater::test_update_mdns_does_nothing_when_not_enabled", "tests/test_aggregator.py::TestAggregator::test_heartbeat_correctly", "tests/test_aggregator.py::TestAggregator::test_heartbeat_registers", "tests/test_aggregator.py::TestAggregator::test_heartbeat_unregisters_when_no_node", "tests/test_aggregator.py::TestAggregator::test_heartbeat_with_404_exception", "tests/test_aggregator.py::TestAggregator::test_heartbeat_with_500_exception", "tests/test_aggregator.py::TestAggregator::test_heartbeat_with_other_exception", "tests/test_aggregator.py::TestAggregator::test_init", "tests/test_aggregator.py::TestAggregator::test_process_queue_does_nothing_when_not_registered", "tests/test_aggregator.py::TestAggregator::test_process_queue_does_nothing_when_queue_empty", "tests/test_aggregator.py::TestAggregator::test_process_queue_handles_exception_in_unqueueing", "tests/test_aggregator.py::TestAggregator::test_process_queue_processes_queue_when_not_running", "tests/test_aggregator.py::TestAggregator::test_process_queue_processes_queue_when_running", "tests/test_aggregator.py::TestAggregator::test_process_queue_processes_queue_when_running_and_aborts_on_exception_in_general_register", "tests/test_aggregator.py::TestAggregator::test_process_queue_processes_queue_when_running_and_aborts_on_exception_in_general_unregister", "tests/test_aggregator.py::TestAggregator::test_process_queue_processes_queue_when_running_and_aborts_on_exception_in_node_register", "tests/test_aggregator.py::TestAggregator::test_process_queue_processes_queue_when_running_and_ignores_unknown_methods", "tests/test_aggregator.py::TestAggregator::test_process_reregister", "tests/test_aggregator.py::TestAggregator::test_process_reregister_bails_if_delete_throws_unknown_exception", "tests/test_aggregator.py::TestAggregator::test_process_reregister_bails_if_first_post_throws_unknown_exception", "tests/test_aggregator.py::TestAggregator::test_process_reregister_bails_if_node_not_registered", "tests/test_aggregator.py::TestAggregator::test_process_reregister_continues_when_delete_fails", "tests/test_aggregator.py::TestAggregator::test_process_reregister_handles_queue_exception", "tests/test_aggregator.py::TestAggregator::test_register", "tests/test_aggregator.py::TestAggregator::test_register_into", "tests/test_aggregator.py::TestAggregator::test_send_get_with_no_aggregators_fails_at_first_checkpoint", "tests/test_aggregator.py::TestAggregator::test_stop", "tests/test_aggregator.py::TestAggregator::test_unregister" ]
[]
Apache License 2.0
3,028
[ "setup.py", "nmoscommon/aggregator.py", "CHANGELOG.md" ]
[ "setup.py", "nmoscommon/aggregator.py", "CHANGELOG.md" ]
dask__dask-3955
92c8cba82e5e427f9acbdadcc6973a531466ae41
2018-09-05 17:06:45
df1cee3b55706443303b85563e7c01e26611603d
mrocklin: This looks good to me. One final thing is that we might want to do is to add a regression test. We might verify that the `.name` attribute of different calls differ. Perhaps something like the following: ```python assert da.argmin(x).name != da.argmin(x + 1).name ``` jakirkham: LGTM. Thanks @TAdeJong 😄
diff --git a/dask/array/creation.py b/dask/array/creation.py index e953c5055..1ae0e518d 100644 --- a/dask/array/creation.py +++ b/dask/array/creation.py @@ -943,7 +943,7 @@ def pad_udf(array, pad_width, mode, **kwargs): result = result.map_blocks( wrapped_pad_func, - token="pad", + name="pad", dtype=result.dtype, pad_func=mode, iaxis_pad_width=pad_width[d], diff --git a/dask/array/reductions.py b/dask/array/reductions.py index f9b004664..b4020821e 100644 --- a/dask/array/reductions.py +++ b/dask/array/reductions.py @@ -614,7 +614,8 @@ def arg_reduction(x, chunk, combine, agg, axis=None, split_every=None, out=None) "got '{0}'".format(axis)) # Map chunk across all blocks - name = 'arg-reduce-chunk-{0}'.format(tokenize(chunk, axis)) + name = 'arg-reduce-{0}'.format(tokenize(axis, x, chunk, + combine, split_every)) old = x.name keys = list(product(*map(range, x.numblocks))) offsets = list(product(*(accumulate(operator.add, bd[:-1], 0) @@ -714,7 +715,8 @@ def cumreduction(func, binop, ident, x, axis=None, dtype=None, out=None): m = x.map_blocks(func, axis=axis, dtype=dtype) - name = '%s-axis=%d-%s' % (func.__name__, axis, tokenize(x, dtype)) + name = '{0}-{1}'.format(func.__name__, tokenize(func, axis, binop, + ident, x, dtype)) n = x.numblocks[axis] full = slice(None, None, None) slc = (full,) * axis + (slice(-1, None),) + (full,) * (x.ndim - axis - 1)
Stacking list of arg reduction's produces duplicates of last list element This is weird. As I do often, I create a list of _n_ dask.arrays in a for-loop for a number of cases, and stack this together to one big dask array. However, if I stack together `da.argmin` operations, the array contains _n_ copies of the last argmin in the list. This does not happen for regular `da.min` or `da.sum`, but is does happen for `da.argmax`, so I suspect a issue in the general arg_reduction. A reproduction example: ```python import dask.array as da import numpy as np import matplotlib.pyplot as plt Data = da.random.random((3,3,20,3), chunks=(3,1,-1,1)) minima = [] argminima = [] for index in range(Data.shape[-1]): minima.append(da.min(Data[...,index], axis=2)) argminima.append(da.argmin(Data[...,index], axis=2)) fig,ax = plt.subplots(Data.shape[-1], 4, figsize=[9,6]) for index in range(Data.shape[-1]): ax[index,0].imshow(minima[index].compute()) ax[index,1].imshow(argminima[index].compute()) minimb = da.stack(minima) argminimb = da.stack(argminima) for index in range(Data.shape[-1]): ax[index,2].imshow(minimb[index,:,:].compute()) ax[index,3].imshow(argminimb[index,:,:].compute()) for i, text in enumerate(["min_before", "argmin_before", "min_after", "argmin_after"]): ax[0,i].set_title(text) plt.show() ``` ![image](https://user-images.githubusercontent.com/6816964/45033289-5c796080-b054-11e8-8362-53b6fa583cda.png) Note how for `da.min` the data before and after stacking is the same, but different for each iteration, while for `da.argmin` after stacking all slices are the same.
dask/dask
diff --git a/dask/array/tests/test_reductions.py b/dask/array/tests/test_reductions.py index 42afbbd4d..bfe54bf5f 100644 --- a/dask/array/tests/test_reductions.py +++ b/dask/array/tests/test_reductions.py @@ -6,7 +6,7 @@ np = pytest.importorskip('numpy') import dask.array as da from dask.array.utils import assert_eq as _assert_eq, same_keys from dask.core import get_deps -from dask.context import set_options +import dask.config as config def assert_eq(a, b): @@ -139,7 +139,7 @@ def test_arg_reductions(dfunc, func): assert_eq(dfunc(a, 0), func(x, 0)) assert_eq(dfunc(a, 1), func(x, 1)) assert_eq(dfunc(a, 2), func(x, 2)) - with set_options(split_every=2): + with config.set(split_every=2): assert_eq(dfunc(a), func(x)) assert_eq(dfunc(a, 0), func(x, 0)) assert_eq(dfunc(a, 1), func(x, 1)) @@ -368,7 +368,7 @@ def test_tree_reduce_depth(): def test_tree_reduce_set_options(): x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4)) - with set_options(split_every={0: 2, 1: 3}): + with config.set(split_every={0: 2, 1: 3}): assert_max_deps(x.sum(), 2 * 3) assert_max_deps(x.sum(axis=0), 2) @@ -487,3 +487,14 @@ def test_topk_argtopk3(): da.topk(a, 5, axis=1, split_every=2)) assert_eq(a.argtopk(5, axis=1, split_every=2), da.argtopk(a, 5, axis=1, split_every=2)) + + [email protected]('func', [da.cumsum, da.cumprod, + da.argmin, da.argmax, + da.min, da.max, + da.nansum, da.nanmax]) +def test_regres_3940(func): + a = da.ones((5,2), chunks=(2,2)) + assert func(a).name != func(a + 1).name + assert func(a, axis=0).name != func(a).name + assert func(a, axis=0).name != func(a, axis=1).name
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 2 }
1.23
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[complete]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi click==8.1.8 cloudpickle==2.2.1 -e git+https://github.com/dask/dask.git@92c8cba82e5e427f9acbdadcc6973a531466ae41#egg=dask distributed==1.28.1 flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core HeapDict==1.0.1 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work locket==1.0.0 msgpack==1.0.5 numpy==1.21.6 packaging @ file:///croot/packaging_1671697413597/work pandas==1.3.5 partd==1.4.1 pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work psutil==7.0.0 py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 six==1.17.0 sortedcontainers==2.4.0 tblib==2.0.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work toolz==0.12.1 tornado==6.2 typing_extensions @ file:///croot/typing_extensions_1669924550328/work zict==2.2.0 zipp @ file:///croot/zipp_1672387121353/work
name: dask channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - click==8.1.8 - cloudpickle==2.2.1 - distributed==1.28.1 - heapdict==1.0.1 - locket==1.0.0 - msgpack==1.0.5 - numpy==1.21.6 - pandas==1.3.5 - partd==1.4.1 - psutil==7.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - six==1.17.0 - sortedcontainers==2.4.0 - tblib==2.0.0 - toolz==0.12.1 - tornado==6.2 - zict==2.2.0 prefix: /opt/conda/envs/dask
[ "dask/array/tests/test_reductions.py::test_regres_3940[argmin]", "dask/array/tests/test_reductions.py::test_regres_3940[argmax]" ]
[ "dask/array/tests/test_reductions.py::test_nan_object[nansum]", "dask/array/tests/test_reductions.py::test_nan_object[sum]", "dask/array/tests/test_reductions.py::test_nan_object[nanmin]", "dask/array/tests/test_reductions.py::test_nan_object[min]", "dask/array/tests/test_reductions.py::test_nan_object[nanmax]", "dask/array/tests/test_reductions.py::test_nan_object[max]" ]
[ "dask/array/tests/test_reductions.py::test_reductions_1D[f4]", "dask/array/tests/test_reductions.py::test_reductions_1D[i4]", "dask/array/tests/test_reductions.py::test_reduction_errors", "dask/array/tests/test_reductions.py::test_arg_reductions[argmin-argmin]", "dask/array/tests/test_reductions.py::test_arg_reductions[argmax-argmax]", "dask/array/tests/test_reductions.py::test_arg_reductions[_nanargmin-nanargmin]", "dask/array/tests/test_reductions.py::test_arg_reductions[_nanargmax-nanargmax]", "dask/array/tests/test_reductions.py::test_nanarg_reductions[_nanargmin-nanargmin]", "dask/array/tests/test_reductions.py::test_nanarg_reductions[_nanargmax-nanargmax]", "dask/array/tests/test_reductions.py::test_reductions_2D_nans", "dask/array/tests/test_reductions.py::test_moment", "dask/array/tests/test_reductions.py::test_reductions_with_negative_axes", "dask/array/tests/test_reductions.py::test_nan", "dask/array/tests/test_reductions.py::test_0d_array", "dask/array/tests/test_reductions.py::test_reduction_on_scalar", "dask/array/tests/test_reductions.py::test_reductions_with_empty_array", "dask/array/tests/test_reductions.py::test_tree_reduce_depth", "dask/array/tests/test_reductions.py::test_tree_reduce_set_options", "dask/array/tests/test_reductions.py::test_reduction_names", "dask/array/tests/test_reductions.py::test_array_reduction_out[sum]", "dask/array/tests/test_reductions.py::test_array_reduction_out[argmax]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[None-cumsum]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[None-cumprod]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[0-cumsum]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[0-cumprod]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[1-cumsum]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[1-cumprod]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[-1-cumsum]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[-1-cumprod]", "dask/array/tests/test_reductions.py::test_array_cumreduction_out[cumsum]", "dask/array/tests/test_reductions.py::test_array_cumreduction_out[cumprod]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[8-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[8-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-3-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-3-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-3-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-3-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-3-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-3-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-3-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-3-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-3-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-3-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-3-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-3-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk3", "dask/array/tests/test_reductions.py::test_regres_3940[cumsum]", "dask/array/tests/test_reductions.py::test_regres_3940[cumprod]", "dask/array/tests/test_reductions.py::test_regres_3940[amin]", "dask/array/tests/test_reductions.py::test_regres_3940[amax]", "dask/array/tests/test_reductions.py::test_regres_3940[nansum]", "dask/array/tests/test_reductions.py::test_regres_3940[nanmax]" ]
[]
BSD 3-Clause "New" or "Revised" License
3,029
[ "dask/array/creation.py", "dask/array/reductions.py" ]
[ "dask/array/creation.py", "dask/array/reductions.py" ]
brian-rose__climlab-77
4c81da428b91881e394a67b9c3412881c37ed8fd
2018-09-05 19:07:14
9b4ab078a9ae22ba0b97e35408a8d7d4314ba8f8
diff --git a/climlab/__init__.py b/climlab/__init__.py index ab3f7fe..e531af9 100644 --- a/climlab/__init__.py +++ b/climlab/__init__.py @@ -7,7 +7,7 @@ Nevertheless also the underlying code of the ``climlab`` architecture has been documented for a comprehensive understanding and traceability. ''' -__version__ = '0.7.1.dev0' +__version__ = '0.7.1.dev1' # this should ensure that we can still import constants.py as climlab.constants from .utils import constants diff --git a/climlab/radiation/radiation.py b/climlab/radiation/radiation.py index 7b6dd33..f9a7134 100644 --- a/climlab/radiation/radiation.py +++ b/climlab/radiation/radiation.py @@ -16,13 +16,17 @@ State: Input arguments (both LW and SW): - ``specific_humidity`` (kg/kg) - - ``absorber_vmr`` (dict of volumetric mixing ratios) + - ``absorber_vmr = None`` (dictionary of volumetric mixing ratios. Default values supplied if ``None``) - ``cldfrac`` (layer cloud fraction - ``clwp`` (in-cloud liquid water path (g/m2)) - ``ciwp = 0.``, # in-cloud ice water path (g/m2) - ``r_liq = 0.``, # Cloud water drop effective radius (microns) - ``r_ice = 0.``, # Cloud ice particle effective size (microns) - - ``ozone_file = 'apeozone_cam3_5_54.nc'`` (file with ozone distribution) + - ``ozone_file = 'apeozone_cam3_5_54.nc'`` (file with ozone distribution -- + ignored if ``absorber_vmr`` is given) + +If ``absorber_vmr = None`` then ozone will be interpolated to the model grid +from a climatology file, or set to zero if ``ozone_file = None``. Additional input arguments for SW: @@ -128,34 +132,38 @@ def default_absorbers(Tatm, absorber_vmr['CFC22'] = 0. absorber_vmr['CCL4'] = 0. - datadir = os.path.join(os.path.dirname(__file__), 'data', 'ozone') - ozonefilepath = os.path.join(datadir, ozone_file) - # Open the ozone data file - if verbose: - print('Getting ozone data from', ozonefilepath) - ozonedata = nc.Dataset(ozonefilepath) - ozone_lev = ozonedata.variables['lev'][:] - ozone_lat = ozonedata.variables['lat'][:] - # zonal and time average - ozone_zon = np.mean(ozonedata.variables['OZONE'], axis=(0,3)) - ozone_global = np.average(ozone_zon, weights=np.cos(np.deg2rad(ozone_lat)), axis=1) - lev = Tatm.domain.axes['lev'].points - if Tatm.shape == lev.shape: - # 1D interpolation on pressure levels using global average data - f = interp1d(ozone_lev, ozone_global) - # interpolate data to model levels - absorber_vmr['O3'] = f(lev) - else: - # Attempt 2D interpolation in pressure and latitude - f2d = interp2d(ozone_lat, ozone_lev, ozone_zon) - try: - lat = Tatm.domain.axes['lat'].points + # Ozone: start with all zeros, interpolate to data if we can + absorber_vmr['O3'] = np.zeros_like(Tatm) + if ozone_file is not None: + datadir = os.path.join(os.path.dirname(__file__), 'data', 'ozone') + ozonefilepath = os.path.join(datadir, ozone_file) + # Open the ozone data file + if verbose: + print('Getting ozone data from', ozonefilepath) + ozonedata = nc.Dataset(ozonefilepath) + ozone_lev = ozonedata.variables['lev'][:] + ozone_lat = ozonedata.variables['lat'][:] + # zonal and time average + ozone_zon = np.mean(ozonedata.variables['OZONE'], axis=(0,3)) + ozone_global = np.average(ozone_zon, + weights=np.cos(np.deg2rad(ozone_lat)), axis=1) + lev = Tatm.domain.axes['lev'].points + if Tatm.shape == lev.shape: + # 1D interpolation on pressure levels using global average data + f = interp1d(ozone_lev, ozone_global) + # interpolate data to model levels + absorber_vmr['O3'] = f(lev) + else: + # Attempt 2D interpolation in pressure and latitude f2d = interp2d(ozone_lat, ozone_lev, ozone_zon) - absorber_vmr['O3'] = f2d(lat, lev).transpose() - except: - print('Interpolation of ozone data failed.') - print('Reverting to default O3.') - absorber_vmr['O3'] = np.zeros_like(Tatm) + try: + lat = Tatm.domain.axes['lat'].points + f2d = interp2d(ozone_lat, ozone_lev, ozone_zon) + absorber_vmr['O3'] = f2d(lat, lev).transpose() + except: + print('Interpolation of ozone data failed.') + print('Reverting to default O3.') + absorber_vmr['O3'] = np.zeros_like(Tatm) return absorber_vmr def init_interface(field): diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index 3302166..71f308f 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,5 +1,5 @@ {% set name = "climlab" %} -{% set version = "0.7.1.dev0" %} +{% set version = "0.7.1.dev1" %} package: name: {{ name|lower }} diff --git a/setup.py b/setup.py index 03f544d..a632935 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ import os, sys import textwrap -VERSION = '0.7.1.dev0' +VERSION = '0.7.1.dev1' # BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be # properly updated when the contents of directories change (true for distutils,
Interpolation of ozone fails with surface pressures higher than 1056 hPa Trying to create a radiation model for a massive atmosphere, following method laid out in #39, creates a new error at model initialization time: First here is the hack to create a domain with altered surface pressure: ``` import numpy as np import climlab def make_column(num_lev=20, ps=const.ps, water_depth=5.): state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=water_depth) lev = state.Tatm.domain.lev lev.bounds = np.linspace(0., ps, num_lev+1) lev.points = lev.bounds[:-1] + np.diff(lev.bounds)/2. lev.delta = np.abs(np.diff(lev.bounds)) return state ``` Now I try to use this to create a radiation model with `climlab.radiation.RRTMG`: ``` state = make_column(num_lev=10, ps=1056.) r = climlab.radiation.RRTMG(state=state) ``` This works fine. But any larger surface pressure throws an error: ``` state = make_column(num_lev=10, ps=1057.) r = climlab.radiation.RRTMG(state=state) ``` produces ``` ValueError: A value in x_new is above the interpolation range. ``` This occurs because of code in `climlab.radiation.radiation.py` that attempts to initialize the ozone distribution by interpolation from a climatology file with maximum surface pressure of 1056 hPa. It's possible to sidestep this problem by passing the `absorber_vmr` dictionary as input to `climlab.radiation.RRTMG`. But we need a more graceful handling of this case. Perhaps just assuming that ozone is zero at higher pressures, and alerting the user about this.
brian-rose/climlab
diff --git a/climlab/tests/test_rrtm.py b/climlab/tests/test_rrtm.py index 9613b19..e3737c9 100644 --- a/climlab/tests/test_rrtm.py +++ b/climlab/tests/test_rrtm.py @@ -150,3 +150,19 @@ def test_latitude(): grad = np.diff(model.Ts, axis=0) assert np.all(grad[0:(int(num_lat/2)-1)] > 0.) assert np.all(grad[int(num_lat/2):] < 0.) + [email protected] [email protected] +def test_no_ozone(): + '''When user gives None as the ozone_file, the model is initialized + with zero ozone. This should work on arbitrary grids.''' + ps = 1060. + num_lev=4000 + state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.) + lev = state.Tatm.domain.lev + lev.bounds = np.linspace(0., ps, num_lev+1) + lev.points = lev.bounds[:-1] + np.diff(lev.bounds)/2. + lev.delta = np.abs(np.diff(lev.bounds)) + # Create a RRTM radiation model + rad = climlab.radiation.RRTMG(state=state, ozone_file=None) + assert np.all(rad.absorber_vmr['O3']==0.)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 4 }
0.7
{ "env_vars": null, "env_yml_path": [ "docs/environment.yml" ], "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": true, "packages": "environment.yml", "pip_packages": [ "pytest", "codecov" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster @ file:///home/conda/feedstock_root/build_artifacts/alabaster_1673645646525/work argon2-cffi @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi_1633990451307/work async_generator @ file:///home/conda/feedstock_root/build_artifacts/async_generator_1722652753231/work attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1671632566681/work Babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1667688356751/work backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work backports.functools-lru-cache @ file:///home/conda/feedstock_root/build_artifacts/backports.functools_lru_cache_1702571698061/work bleach @ file:///home/conda/feedstock_root/build_artifacts/bleach_1696630167146/work brotlipy==0.7.0 certifi==2021.5.30 cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1625835287197/work cftime @ file:///home/conda/feedstock_root/build_artifacts/cftime_1632539733990/work charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1661170624537/work -e git+https://github.com/brian-rose/climlab.git@4c81da428b91881e394a67b9c3412881c37ed8fd#egg=climlab codecov==2.1.13 colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1655412516417/work coverage==6.2 cryptography @ file:///home/conda/feedstock_root/build_artifacts/cryptography_1634230300355/work cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1635519461629/work dataclasses @ file:///home/conda/feedstock_root/build_artifacts/dataclasses_1628958435052/work decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work defusedxml @ file:///home/conda/feedstock_root/build_artifacts/defusedxml_1615232257335/work docutils @ file:///home/conda/feedstock_root/build_artifacts/docutils_1618676244774/work entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1643888246732/work future @ file:///home/conda/feedstock_root/build_artifacts/future_1610147328086/work idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1726459485162/work imagesize @ file:///home/conda/feedstock_root/build_artifacts/imagesize_1656939531508/work importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1630267465156/work iniconfig==1.1.1 ipykernel @ file:///home/conda/feedstock_root/build_artifacts/ipykernel_1620912934572/work/dist/ipykernel-5.5.5-py3-none-any.whl ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1609697613279/work ipython_genutils @ file:///home/conda/feedstock_root/build_artifacts/ipython_genutils_1716278396992/work ipywidgets @ file:///home/conda/feedstock_root/build_artifacts/ipywidgets_1679421482533/work jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1605054537831/work Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1636510082894/work jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema_1634752161479/work jupyter @ file:///home/conda/feedstock_root/build_artifacts/jupyter_1696255489086/work jupyter-client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1642858610849/work jupyter-console @ file:///home/conda/feedstock_root/build_artifacts/jupyter_console_1676328545892/work jupyter-core @ file:///home/conda/feedstock_root/build_artifacts/jupyter_core_1631852698933/work jupyterlab-pygments @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_pygments_1601375948261/work jupyterlab-widgets @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_widgets_1655961217661/work kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1610099771815/work latexcodec @ file:///home/conda/feedstock_root/build_artifacts/latexcodec_1592937263153/work MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1621455668064/work matplotlib @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-suite_1611858699142/work mistune @ file:///home/conda/feedstock_root/build_artifacts/mistune_1673904152039/work nbclient @ file:///home/conda/feedstock_root/build_artifacts/nbclient_1637327213451/work nbconvert @ file:///home/conda/feedstock_root/build_artifacts/nbconvert_1605401832871/work nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1617383142101/work nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1705850609492/work netCDF4 @ file:///home/conda/feedstock_root/build_artifacts/netcdf4_1633096406418/work notebook @ file:///home/conda/feedstock_root/build_artifacts/notebook_1616419146127/work numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1626681920064/work numpydoc @ file:///home/conda/feedstock_root/build_artifacts/numpydoc_1648619272706/work olefile @ file:///home/conda/feedstock_root/build_artifacts/olefile_1602866521163/work packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1637239678211/work pandas==1.1.5 pandocfilters @ file:///home/conda/feedstock_root/build_artifacts/pandocfilters_1631603243851/work parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1595548966091/work pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1667297516076/work pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work Pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1630696616009/work pluggy==1.0.0 prometheus-client @ file:///home/conda/feedstock_root/build_artifacts/prometheus_client_1689032443210/work prompt-toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1670414775770/work ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl py==1.11.0 pybtex @ file:///home/conda/feedstock_root/build_artifacts/pybtex_1638467081712/work pybtex-docutils @ file:///home/conda/feedstock_root/build_artifacts/pybtex-docutils_1627685481852/work pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1636257122734/work Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1672682006896/work pyOpenSSL @ file:///home/conda/feedstock_root/build_artifacts/pyopenssl_1663846997386/work pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1724616129934/work PyQt5==5.12.3 PyQt5_sip==4.19.18 PyQtChart==5.12 PyQtWebEngine==5.12.1 pyrsistent @ file:///home/conda/feedstock_root/build_artifacts/pyrsistent_1610146795286/work PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1610291458349/work pytest==7.0.1 python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1626286286081/work pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1693930252784/work PyYAML==5.4.1 pyzmq @ file:///home/conda/feedstock_root/build_artifacts/pyzmq_1631793305981/work qtconsole @ file:///home/conda/feedstock_root/build_artifacts/qtconsole-base_1640876679830/work QtPy @ file:///home/conda/feedstock_root/build_artifacts/qtpy_1643828301492/work requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1656534056640/work scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy_1629411471490/work Send2Trash @ file:///home/conda/feedstock_root/build_artifacts/send2trash_1682601222253/work six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work snowballstemmer @ file:///home/conda/feedstock_root/build_artifacts/snowballstemmer_1637143057757/work Sphinx @ file:///home/conda/feedstock_root/build_artifacts/sphinx_1658872348413/work sphinx-rtd-theme @ file:///home/conda/feedstock_root/build_artifacts/sphinx_rtd_theme_1701183475238/work sphinxcontrib-applehelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-applehelp_1674487779667/work sphinxcontrib-bibtex @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-bibtex_1661629118308/work sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-htmlhelp_1675256494457/work sphinxcontrib-jquery @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-jquery_1678808969227/work sphinxcontrib-jsmath @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-jsmath_1691604704163/work sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-serializinghtml_1649380998999/work terminado @ file:///home/conda/feedstock_root/build_artifacts/terminado_1631128154882/work testpath @ file:///home/conda/feedstock_root/build_artifacts/testpath_1645693042223/work tomli==1.2.3 tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1610094701020/work traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1631041982274/work typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1644850595256/work urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1678635778344/work wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1699959196938/work webencodings @ file:///home/conda/feedstock_root/build_artifacts/webencodings_1694681268211/work widgetsnbextension @ file:///home/conda/feedstock_root/build_artifacts/widgetsnbextension_1655939017940/work xarray @ file:///home/conda/feedstock_root/build_artifacts/xarray_1621474818012/work zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1633302054558/work
name: climlab channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - alabaster=0.7.13=pyhd8ed1ab_0 - alsa-lib=1.2.3.2=h166bdaf_0 - argon2-cffi=21.1.0=py36h8f6f2f9_0 - async_generator=1.10=pyhd8ed1ab_1 - attrs=22.2.0=pyh71513ae_0 - babel=2.11.0=pyhd8ed1ab_0 - backcall=0.2.0=pyh9f0ad1d_0 - backports=1.0=pyhd8ed1ab_4 - backports.functools_lru_cache=2.0.0=pyhd8ed1ab_0 - bleach=6.1.0=pyhd8ed1ab_0 - brotlipy=0.7.0=py36h8f6f2f9_1001 - bzip2=1.0.8=h7f98852_4 - c-ares=1.18.1=h7f98852_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - cffi=1.14.6=py36hc120d54_0 - cftime=1.5.1=py36he33b4a0_0 - charset-normalizer=2.1.1=pyhd8ed1ab_0 - colorama=0.4.5=pyhd8ed1ab_0 - cryptography=35.0.0=py36hb60f036_0 - curl=7.79.1=h2574ce0_1 - cycler=0.11.0=pyhd8ed1ab_0 - dataclasses=0.8=pyh787bdff_2 - dbus=1.13.6=h48d8840_2 - decorator=5.1.1=pyhd8ed1ab_0 - defusedxml=0.7.1=pyhd8ed1ab_0 - docutils=0.17.1=py36h5fab9bb_0 - entrypoints=0.4=pyhd8ed1ab_0 - expat=2.4.8=h27087fc_0 - fontconfig=2.14.0=h8e229c2_0 - freetype=2.10.4=h0708190_1 - future=0.18.2=py36h5fab9bb_3 - gettext=0.19.8.1=h0b5b191_1005 - glib=2.68.4=h9c3ff4c_0 - glib-tools=2.68.4=h9c3ff4c_0 - gst-plugins-base=1.18.5=hf529b03_0 - gstreamer=1.18.5=h76c114f_0 - hdf4=4.2.15=h10796ff_3 - hdf5=1.12.1=nompi_h2750804_100 - icu=68.2=h9c3ff4c_0 - idna=3.10=pyhd8ed1ab_0 - imagesize=1.4.1=pyhd8ed1ab_0 - importlib-metadata=4.8.1=py36h5fab9bb_0 - importlib_metadata=4.8.1=hd8ed1ab_1 - ipykernel=5.5.5=py36hcb3619a_0 - ipython=7.16.1=py36he448a4c_2 - ipython_genutils=0.2.0=pyhd8ed1ab_1 - ipywidgets=7.7.4=pyhd8ed1ab_0 - jbig=2.1=h7f98852_2003 - jedi=0.17.2=py36h5fab9bb_1 - jinja2=3.0.3=pyhd8ed1ab_0 - jpeg=9e=h166bdaf_1 - jsonschema=4.1.2=pyhd8ed1ab_0 - jupyter=1.0.0=pyhd8ed1ab_10 - jupyter_client=7.1.2=pyhd8ed1ab_0 - jupyter_console=6.5.1=pyhd8ed1ab_0 - jupyter_core=4.8.1=py36h5fab9bb_0 - jupyterlab_pygments=0.1.2=pyh9f0ad1d_0 - jupyterlab_widgets=1.1.1=pyhd8ed1ab_0 - keyutils=1.6.1=h166bdaf_0 - kiwisolver=1.3.1=py36h605e78d_1 - krb5=1.19.3=h3790be6_0 - latexcodec=2.0.1=pyh9f0ad1d_0 - lcms2=2.12=hddcbb42_0 - ld_impl_linux-64=2.40=h12ee557_0 - lerc=2.2.1=h9c3ff4c_0 - libblas=3.9.0=16_linux64_openblas - libcblas=3.9.0=16_linux64_openblas - libclang=11.1.0=default_ha53f305_1 - libcurl=7.79.1=h2574ce0_1 - libdeflate=1.7=h7f98852_5 - libedit=3.1.20191231=he28a2e2_2 - libev=4.33=h516909a_1 - libevent=2.1.10=h9b69904_4 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgfortran-ng=13.2.0=h69a702a_0 - libgfortran5=13.2.0=ha4646dd_0 - libglib=2.68.4=h3e27bee_0 - libgomp=11.2.0=h1234567_1 - libiconv=1.17=h166bdaf_0 - liblapack=3.9.0=16_linux64_openblas - libllvm11=11.1.0=hf817b99_2 - libnetcdf=4.8.1=nompi_hb3fd0d9_101 - libnghttp2=1.43.0=h812cca2_1 - libogg=1.3.4=h7f98852_1 - libopenblas=0.3.21=h043d6bf_0 - libopus=1.3.1=h7f98852_1 - libpng=1.6.37=h21135ba_2 - libpq=13.3=hd57d9b9_0 - libsodium=1.0.18=h36c2ea0_1 - libssh2=1.10.0=ha56f1ee_2 - libstdcxx-ng=11.2.0=h1234567_1 - libtiff=4.3.0=hf544144_1 - libuuid=2.32.1=h7f98852_1000 - libvorbis=1.3.7=h9c3ff4c_0 - libwebp-base=1.2.2=h7f98852_1 - libxcb=1.13=h7f98852_1004 - libxkbcommon=1.0.3=he3ba5ed_0 - libxml2=2.9.12=h72842e0_0 - libzip=1.8.0=h4de3113_1 - lz4-c=1.9.3=h9c3ff4c_1 - markupsafe=2.0.1=py36h8f6f2f9_0 - matplotlib=3.3.4=py36h5fab9bb_0 - matplotlib-base=3.3.4=py36hd391965_0 - mistune=0.8.4=pyh1a96a4e_1006 - mysql-common=8.0.25=ha770c72_2 - mysql-libs=8.0.25=hfa10184_2 - nbclient=0.5.9=pyhd8ed1ab_0 - nbconvert=6.0.7=py36h5fab9bb_3 - nbformat=5.1.3=pyhd8ed1ab_0 - ncurses=6.4=h6a678d5_0 - nest-asyncio=1.6.0=pyhd8ed1ab_0 - netcdf4=1.5.7=nompi_py36h775750b_103 - notebook=6.3.0=py36h5fab9bb_0 - nspr=4.32=h9c3ff4c_1 - nss=3.69=hb5efdd6_1 - numpy=1.19.5=py36hfc0c790_2 - numpydoc=1.2.1=pyhd8ed1ab_0 - olefile=0.46=pyh9f0ad1d_1 - openjpeg=2.4.0=hb52868f_1 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd8ed1ab_0 - pandas=1.1.5=py36h284efc9_0 - pandoc=2.19.2=ha770c72_0 - pandocfilters=1.5.0=pyhd8ed1ab_0 - parso=0.7.1=pyh9f0ad1d_0 - pcre=8.45=h9c3ff4c_0 - pexpect=4.8.0=pyh1a96a4e_2 - pickleshare=0.7.5=py_1003 - pillow=8.3.2=py36h676a545_0 - pip=21.2.2=py36h06a4308_0 - prometheus_client=0.17.1=pyhd8ed1ab_0 - prompt-toolkit=3.0.36=pyha770c72_0 - prompt_toolkit=3.0.36=hd8ed1ab_0 - pthread-stubs=0.4=h36c2ea0_1001 - ptyprocess=0.7.0=pyhd3deb0d_0 - pybtex=0.24.0=pyhd8ed1ab_2 - pybtex-docutils=1.0.1=py36h5fab9bb_0 - pycparser=2.21=pyhd8ed1ab_0 - pygments=2.14.0=pyhd8ed1ab_0 - pyopenssl=22.0.0=pyhd8ed1ab_1 - pyparsing=3.1.4=pyhd8ed1ab_0 - pyqt=5.12.3=py36h5fab9bb_7 - pyqt-impl=5.12.3=py36h7ec31b9_7 - pyqt5-sip=4.19.18=py36hc4f0c31_7 - pyqtchart=5.12=py36h7ec31b9_7 - pyqtwebengine=5.12.1=py36h7ec31b9_7 - pyrsistent=0.17.3=py36h8f6f2f9_2 - pysocks=1.7.1=py36h5fab9bb_3 - python=3.6.13=h12debd9_1 - python-dateutil=2.8.2=pyhd8ed1ab_0 - python_abi=3.6=2_cp36m - pytz=2023.3.post1=pyhd8ed1ab_0 - pyyaml=5.4.1=py36h8f6f2f9_1 - pyzmq=22.3.0=py36h7068817_0 - qt=5.12.9=hda022c4_4 - qtconsole-base=5.2.2=pyhd8ed1ab_1 - qtpy=2.0.1=pyhd8ed1ab_0 - readline=8.2=h5eee18b_0 - requests=2.28.1=pyhd8ed1ab_0 - scipy=1.5.3=py36h81d768a_1 - send2trash=1.8.2=pyh41d4057_0 - setuptools=58.0.4=py36h06a4308_0 - six=1.16.0=pyh6c4a22f_0 - snowballstemmer=2.2.0=pyhd8ed1ab_0 - sphinx=5.1.1=pyh6c4a22f_0 - sphinx_rtd_theme=2.0.0=pyha770c72_0 - sphinxcontrib-applehelp=1.0.4=pyhd8ed1ab_0 - sphinxcontrib-bibtex=2.5.0=pyhd8ed1ab_0 - sphinxcontrib-devhelp=1.0.2=py_0 - sphinxcontrib-htmlhelp=2.0.1=pyhd8ed1ab_0 - sphinxcontrib-jquery=4.1=pyhd8ed1ab_0 - sphinxcontrib-jsmath=1.0.1=pyhd8ed1ab_0 - sphinxcontrib-qthelp=1.0.3=py_0 - sphinxcontrib-serializinghtml=1.1.5=pyhd8ed1ab_2 - sqlite=3.45.3=h5eee18b_0 - terminado=0.12.1=py36h5fab9bb_0 - testpath=0.6.0=pyhd8ed1ab_0 - tk=8.6.14=h39e8969_0 - tornado=6.1=py36h8f6f2f9_1 - traitlets=4.3.3=pyhd8ed1ab_2 - typing_extensions=4.1.1=pyha770c72_0 - urllib3=1.26.15=pyhd8ed1ab_0 - wcwidth=0.2.10=pyhd8ed1ab_0 - webencodings=0.5.1=pyhd8ed1ab_2 - wheel=0.37.1=pyhd3eb1b0_0 - widgetsnbextension=3.6.1=pyha770c72_0 - xarray=0.18.2=pyhd8ed1ab_0 - xorg-libxau=1.0.9=h7f98852_0 - xorg-libxdmcp=1.1.3=h7f98852_0 - xz=5.6.4=h5eee18b_1 - yaml=0.2.5=h7f98852_2 - zeromq=4.3.4=h9c3ff4c_1 - zipp=3.6.0=pyhd8ed1ab_0 - zlib=1.2.13=h5eee18b_1 - zstd=1.5.0=ha95c52a_0 - pip: - codecov==2.1.13 - coverage==6.2 - iniconfig==1.1.1 - pluggy==1.0.0 - py==1.11.0 - pytest==7.0.1 - tomli==1.2.3 prefix: /opt/conda/envs/climlab
[ "climlab/tests/test_rrtm.py::test_no_ozone" ]
[ "climlab/tests/test_rrtm.py::test_rrtm_creation", "climlab/tests/test_rrtm.py::test_swap_component", "climlab/tests/test_rrtm.py::test_multidim", "climlab/tests/test_rrtm.py::test_cloud", "climlab/tests/test_rrtm.py::test_radiative_forcing", "climlab/tests/test_rrtm.py::test_latitude" ]
[ "climlab/tests/test_rrtm.py::test_rrtmg_lw_creation" ]
[]
MIT License
3,030
[ "setup.py", "conda-recipe/meta.yaml", "climlab/__init__.py", "climlab/radiation/radiation.py" ]
[ "setup.py", "conda-recipe/meta.yaml", "climlab/__init__.py", "climlab/radiation/radiation.py" ]
spacetx__starfish-523
9b53c8a84d586af1f906c742b52617b036f14221
2018-09-06 14:39:17
656690f8d3f562c6b29fc7bd6cf4fd37563407d5
joshmoore: Workings towards this type of output: ``` test_fuzz.py::test_fuzz_codebook > Fuzzing unknown... A D I S X X X X - codeword: X X X - r: X 0 X X X c: X X 0 X X X v: X X 1 X X X - r: X 0 X X X c: X X 1 X X X v: X X 1 X X X - r: X 1 X X X c: X X 1 X X X v: X X 1 X X X X target: X X SCUBE2 ``` where: * A = add a new value * D = delete this value * I = change value to an int * S = change value to a str joshmoore: Forced pushed now that gh-514 is merged. Suggestions on others tests that would be interesting welcome. ("Replace with object" and "replace with list"?) codecov-io: # [Codecov](https://codecov.io/gh/spacetx/starfish/pull/523?src=pr&el=h1) Report > Merging [#523](https://codecov.io/gh/spacetx/starfish/pull/523?src=pr&el=desc) into [master](https://codecov.io/gh/spacetx/starfish/commit/9b53c8a84d586af1f906c742b52617b036f14221?src=pr&el=desc) will **increase** coverage by `0.82%`. > The diff coverage is `87.5%`. [![Impacted file tree graph](https://codecov.io/gh/spacetx/starfish/pull/523/graphs/tree.svg?width=650&token=kliQHejt6T&height=150&src=pr)](https://codecov.io/gh/spacetx/starfish/pull/523?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #523 +/- ## ========================================== + Coverage 82.67% 83.49% +0.82% ========================================== Files 87 91 +4 Lines 3255 3411 +156 ========================================== + Hits 2691 2848 +157 + Misses 564 563 -1 ``` | [Impacted Files](https://codecov.io/gh/spacetx/starfish/pull/523?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [starfish/test/full\_pipelines/cli/test\_validate.py](https://codecov.io/gh/spacetx/starfish/pull/523/diff?src=pr&el=tree#diff-c3RhcmZpc2gvdGVzdC9mdWxsX3BpcGVsaW5lcy9jbGkvdGVzdF92YWxpZGF0ZS5weQ==) | `100% <100%> (ø)` | | | [starfish/starfish.py](https://codecov.io/gh/spacetx/starfish/pull/523/diff?src=pr&el=tree#diff-c3RhcmZpc2gvc3RhcmZpc2gucHk=) | `83.63% <100%> (+0.94%)` | :arrow_up: | | [validate\_sptx/test\_fuzz.py](https://codecov.io/gh/spacetx/starfish/pull/523/diff?src=pr&el=tree#diff-dmFsaWRhdGVfc3B0eC90ZXN0X2Z1enoucHk=) | `100% <100%> (ø)` | | | [validate\_sptx/validate\_sptx.py](https://codecov.io/gh/spacetx/starfish/pull/523/diff?src=pr&el=tree#diff-dmFsaWRhdGVfc3B0eC92YWxpZGF0ZV9zcHR4LnB5) | `27.77% <23.07%> (+27.77%)` | :arrow_up: | | [starfish/util/exec.py](https://codecov.io/gh/spacetx/starfish/pull/523/diff?src=pr&el=tree#diff-c3RhcmZpc2gvdXRpbC9leGVjLnB5) | `78.37% <78.37%> (ø)` | | | [validate\_sptx/cli.py](https://codecov.io/gh/spacetx/starfish/pull/523/diff?src=pr&el=tree#diff-dmFsaWRhdGVfc3B0eC9jbGkucHk=) | `92.85% <92.85%> (ø)` | | | [validate\_sptx/util.py](https://codecov.io/gh/spacetx/starfish/pull/523/diff?src=pr&el=tree#diff-dmFsaWRhdGVfc3B0eC91dGlsLnB5) | `95.61% <97.22%> (+2.43%)` | :arrow_up: | | ... and [2 more](https://codecov.io/gh/spacetx/starfish/pull/523/diff?src=pr&el=tree-more) | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/spacetx/starfish/pull/523?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/spacetx/starfish/pull/523?src=pr&el=footer). Last update [9b53c8a...16d3591](https://codecov.io/gh/spacetx/starfish/pull/523?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). joshmoore: Ooh, I'm finally doing something _positive_ for coverage numbers. joshmoore: Pushed a sufficient state for review. Example output (from the unit tests): ``` validate_sptx/test_fuzz.py::test_fuzz_experiment... > Fuzzing unknown... A D I S M L If the letter is present, mutation is valid! ----------- -------------------------------------------- . . . . . . version: . . . . . . 0.0.0 . . . . . . primary_images: . . . . . . primary_images.json . D . . M . auxiliary_images: . D . . . . nuclei: . D . . . . nuclei.json . . . . . . codebook: . . . . . . codebook.json . D I S M L extras: A D I S M L is_space_tx_cool: A D I S M L True PASSED ``` joshmoore: > I've added a few suggestions that are mostly stylistic. Wow. Now I know what to be ready for with an @ambrosejcarr review! > Do you think it's worth pointing them at this? Potentially. While setting it up, I did wonder if it would actually be better as a standalone tool that `starfish` consumes. If you'd prefer not to have it in this codebase, I understand and/or it can be migrated later. Changes pushed.
diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..6b8710a7 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,1 @@ +.git diff --git a/.travis.yml b/.travis.yml index f1c6753e..77f8c4e9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,5 +23,7 @@ script: .venv/bin/pip install starfish; make PYTHON=.venv/bin/python run_notebooks; fi +- |- + docker run -ti --rm spacetx/starfish build --fov-count 1 --hybridization-dimensions '{"z": 1}' /tmp/ after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/Dockerfile b/Dockerfile index 7c6850ca..0fbc37cc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,18 +36,22 @@ ## ## $ docker run -e TEST_ISS_KEEP_DATA=true --entrypoint=pytest spacetx/starfish -vsxk TestWithIssData ## -FROM python:3.6 - -COPY REQUIREMENTS.txt /src/ -COPY REQUIREMENTS-DEV.txt /src/ -COPY REQUIREMENTS-NOTEBOOK.txt /src/ -RUN pip install -r /src/REQUIREMENTS-DEV.txt -r /src/REQUIREMENTS-NOTEBOOK.txt - +FROM continuumio/miniconda3 RUN useradd -m starfish -COPY . /src -RUN chown -R starfish:starfish /src USER starfish + +# Set up the initial conda environment +COPY --chown=starfish:starfish environment.yml /src/environment.yml WORKDIR /src -RUN pip install --user -e . -ENV PATH=${PATH}:/home/starfish/.local/bin +RUN conda env create -f environment.yml + +# Prepare for build +COPY --chown=starfish:starfish . /src +RUN echo "source activate starfish" >> ~/.bashrc +ENV PATH /home/starfish/.conda/envs/starfish/bin:$PATH + +# Build and configure for running +RUN pip install -e . + +env MPLBACKEND Agg ENTRYPOINT ["starfish"] diff --git a/environment.yml b/environment.yml new file mode 100644 index 00000000..450fc052 --- /dev/null +++ b/environment.yml @@ -0,0 +1,30 @@ +name: starfish +channels: +- defaults +- bioconda +- conda-forge +dependencies: +- python>=3.6 +- click +- flake8 +- jsonschema +- matplotlib +- mypy +- numpy!=1.13.0 +- pandas>=0.23.4 +- pytest>=3.6.3 +- regional +- requests +- scikit-image>=0.14.0 +- scikit-learn +- scipy +- seaborn +- semantic_version +- showit>=1.1.4 +- slicedimage==0.0.4 +- tqdm +- trackpy +- validators +- xarray +- pip: + - jsonpath-rw diff --git a/sptx-format/Dockerfile b/sptx-format/Dockerfile deleted file mode 100644 index df84967f..00000000 --- a/sptx-format/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -## Dockerfile for sptx -## -## Default entrypoint is the validate-sptx script. - -FROM continuumio/miniconda3 -RUN useradd -m sptx -USER sptx - -# Set up the initial conda environment -COPY --chown=sptx:sptx environment.yml /src/environment.yml -WORKDIR /src -RUN conda env create -f environment.yml - -# Prepare for build -COPY --chown=sptx:sptx . /src -RUN echo "source activate sptx" >> ~/.bashrc -ENV PATH /home/sptx/.conda/envs/sptx/bin:$PATH - -# Build and configure for running -RUN pip install -e . - -env MPLBACKEND Agg -ENTRYPOINT ["validate-sptx"] diff --git a/sptx-format/environment.yml b/sptx-format/environment.yml deleted file mode 100644 index c2b420f7..00000000 --- a/sptx-format/environment.yml +++ /dev/null @@ -1,14 +0,0 @@ -# Also update REQUIREMENTS.txt -name: sptx -channels: -- defaults -- bioconda -- conda-forge -dependencies: -- python>=3.6 -- click -- flake8 -- jsonschema -- mypy -- pytest -- slicedimage==0.0.1 diff --git a/starfish/starfish.py b/starfish/starfish.py index 0b764490..02127a7d 100755 --- a/starfish/starfish.py +++ b/starfish/starfish.py @@ -4,6 +4,7 @@ import argparse import cProfile from pstats import Stats +from validate_sptx.cli import Cli as ValidateCli from starfish.experiment.builder.cli import Cli as BuilderCli from starfish.image import ( Filter, @@ -47,6 +48,9 @@ def build_parser(): build_group = subparsers.add_parser("build") BuilderCli.add_to_parser(build_group) + validate_group = subparsers.add_parser("validate") + ValidateCli.add_to_parser(validate_group) + return parser diff --git a/starfish/util/exec.py b/starfish/util/exec.py new file mode 100644 index 00000000..aea84ddd --- /dev/null +++ b/starfish/util/exec.py @@ -0,0 +1,118 @@ +import json +import os +import shutil +import subprocess +import sys +import tempfile +import unittest +from typing import Callable, Sequence, Union + +import jsonpath_rw +import numpy as np +import pandas as pd + +from starfish.intensity_table import IntensityTable +from starfish.types import Features +from starfish.util import clock + + +def stages(commands: Sequence[Sequence[Union[str, Callable]]], + subdirs: Sequence[str]=None, + keep_data:bool=False) -> None: + """ + Execute a list of commands in a temporary directory + cleaning them up unless otherwise requested. + + Parameters + ---------- + commands : Sequence[Sequence[Union[str, Callable]]] + A collection of tuples of commands composed either of + str elements or callable objects which will be invoked + with the keyword argument "tempdir". The resulting list + of strings will be passed to subprocess.check_call. + subdirs : Sequence[str] + A collection of paths which should be created as subdirectories + within the temporary directory used by this invocation. + keep_data : bool + If not true, shutil.rmtree will be called on the temporary + directory used by this invocation. + + Environment variables + --------------------- + STARFISH_COVERAGE: + If set, then command lists will have `coverage run ...` options + prepended before execution. + """ + tempdir = tempfile.mkdtemp() + + def callback(interval): + print(" ".join(stage[:2]), " ==> {} seconds".format(interval)) + + try: + + if subdirs: + for subdir in subdirs: + os.makedirs("{tempdir}".format( + tempdir=os.path.join(tempdir, subdir))) + + for stage in commands: + cmdline = prepare_stage(stage, tempdir) + with clock.timeit(callback): + subprocess.check_call(cmdline) + + finally: + if not keep_data: + shutil.rmtree(tempdir) + + +def prepare_stage(stage: Sequence[Union[str, Callable]], + tempdir: str) -> Sequence[str]: + """ + Loop through elements of stage, building them into a commandline. + If an element is a callable, it will be invoked with the "tempdir" + keyword. + + Parameters + ---------- + stage: Sequence[Union[str, Callable]] + A collection of commands composed either of + str elements or callable objects which will be invoked + with the keyword argument "tempdir". The resulting list + of strings will be passed to subprocess.check_call. + tempdir: str + Temporary directory that will be used by the invoking method. + + Return + ------ + Sequence of strings for passing to subprocess.check_call + + Environment variables + --------------------- + STARFISH_COVERAGE: + If set, then command lists will have `coverage run ...` options + prepended before execution. + """ + coverage_enabled = "STARFISH_COVERAGE" in os.environ + cmdline = [ + element(tempdir=tempdir) if callable(element) else element + for element in stage + ] + if cmdline[0] == "starfish" and coverage_enabled: + coverage_cmdline = [ + "coverage", "run", + "-p", + "--source", "starfish", + "-m", "starfish", + ] + coverage_cmdline.extend(cmdline[1:]) + cmdline = coverage_cmdline + elif cmdline[0] == "validate-sptx" and coverage_enabled: + coverage_cmdline = [ + "coverage", "run", + "-p", + "--source", "validate_sptx", + "-m", "validate_sptx", + ] + coverage_cmdline.extend(cmdline[1:]) + cmdline = coverage_cmdline + return cmdline diff --git a/validate_sptx/cli.py b/validate_sptx/cli.py new file mode 100644 index 00000000..8e8751fb --- /dev/null +++ b/validate_sptx/cli.py @@ -0,0 +1,30 @@ +import argparse +import json +import sys + +from starfish.util.argparse import FsExistsType +from validate_sptx.validate_sptx import validate + + +class Cli: + parser_group = None + + @staticmethod + def add_to_parser(parser): + """adds experiment-json and fuzz arguments to the given parser""" + parser.add_argument( + "--experiment-json", + required=True, + metavar="JSON_FILE_OR_URL") + parser.add_argument( + "--fuzz", action="store_true") + parser.set_defaults(starfish_command=Cli.run) + Cli.parser_group = parser + + @staticmethod + def run(args, print_help=False): + """invokes validate with the parsed commandline arguments""" + try: + validate(args.experiment_json, fuzz=args.fuzz) + except KeyboardInterrupt: + sys.exit(3) diff --git a/validate_sptx/util.py b/validate_sptx/util.py index 127d4760..29f5b252 100644 --- a/validate_sptx/util.py +++ b/validate_sptx/util.py @@ -1,21 +1,28 @@ +import copy import json import os +import sys import warnings + from pkg_resources import resource_filename -from typing import Dict, Iterator +from typing import Any, Dict, IO, Iterator, List, Optional from jsonschema import RefResolver, Draft4Validator, ValidationError class SpaceTxValidator: - def __init__(self, schema: str) -> None: + def __init__(self, schema: str, fuzz: bool=False) -> None: """create a validator for a json-schema compliant spaceTx specification file Parameters ---------- schema : str file path to schema + fuzz : bool + if true, then the json documents which are validated will + be modified piece-wise and a statement printed to standard + out about whether or not they are still valid. """ self._schema: Dict = self.load_json(schema) @@ -59,7 +66,7 @@ class SpaceTxValidator: iterator over ValidationErrors that occur during validation level : int current level of recursion - filename : str + filename: str informational string regarding the source file of the given object """ @@ -98,7 +105,7 @@ class SpaceTxValidator: target_object = self.load_json(target_file) return self.validate_object(target_object, target_file) - def validate_object(self, target_object: Dict, target_file: str=None) -> bool: + def validate_object(self, target_object: Dict, target_file: str=None, fuzz: bool=False) -> bool: """validate a loaded json object, returning True if valid, and False otherwise Parameters @@ -107,16 +114,203 @@ class SpaceTxValidator: loaded json object to be validated against the schem passed to this object's constructor target_file : str informational string regarding the source file of the given object + fuzz: bool + whether or not to perform element-by-element fuzzing. + If true, will return true and will *not* use warnings. Returns ------- bool : - True, if object valid, else False + True, if object valid or fuzz=True, else False """ + + if fuzz: + if target_file: + print(f"> Fuzzing {target_file}...") + else: + print("> Fuzzing unknown...") + fuzzer = Fuzzer(self._validator, target_object) + fuzzer.fuzz() + return True + if self._validator.is_valid(target_object): return True else: es: Iterator[ValidationError] = self._validator.iter_errors(target_object) self._recurse_through_errors(es, filename=target_file) return False + + +class Fuzzer(object): + + def __init__(self, validator: Draft4Validator, obj: Any, out: IO=sys.stdout) -> None: + """create a fuzzer which will check different situations against the validator + + Parameters + ---------- + validator : SpaceTxValidator + validator which should match the given object type + obj : Any + JSON-like object which will be checked against the validator + out : IO + if true, then the json documents which are validated will + be modified piece-wise and a statement printed to standard + out about whether or not they are still valid. + + """ + self.validator = validator + self.obj = obj + self.out = out + self.stack: Optional[List[Any]] = None + + def fuzz(self) -> None: + """prints to the out field the state of the object tree after types of fuzzing + + Each line is prefixed by the output of {state()} followed by a YAML-like + representation of the branch of the object tree. + """ + header = f"{self.state()}" + header += "If the letter is present, mutation is valid!" + self.out.write(f"{header}\n") + self.out.write("".join([x in ("\t", "\n") and x or "-" for x in header])) + self.out.write("\n") + self.stack: List[Any] = [] + try: + self.descend(self.obj) + finally: + self.stack = None + + def state(self) -> str: + """primary driver for the checks of individual trees + + Returns + ------- + str : + space-separated representation of the fuzzing conditions. + If a letter is present, then mutation leaves the tree in + a valid state: + + A: inserting a fake key or appending to a list + D: deleting a key or index + I: converting value to an integer + I: converting value to a string + M: converting value to an empty dict + L: converting value to an empty list + + """ + rv = [ + Add().check(self), + Del().check(self), + Change("I", lambda *args: 123456789).check(self), + Change("S", lambda *args: "fake").check(self), + Change("M", lambda *args: dict()).check(self), + Change("L", lambda *args: list()).check(self), + ] + return ' '.join(rv) + "\t" + + def descend(self, obj: Any, depth: int=0, prefix: str="") -> None: + """walk a JSON-like object tree printing the state of the tree + at each level. A YAML representation is used for simplicity. + + Parameters + ---------- + obj : Any + JSON-like object tree + depth : int + depth in the tree that is currently being evaluated + prefix : str + value which should be prepended to printouts at this level + """ + if self.stack is None: + raise Exception("invalid state") + if isinstance(obj, list): + for i, o in enumerate(obj): + depth += 1 + self.stack.append(i) + self.descend(o, depth, prefix="- ") + self.stack.pop() + depth -= 1 + elif isinstance(obj, dict): + for k in obj: + # This is something of a workaround in that we need a special + # case for object keys since no __getitem__ method will suffice. + self.stack.append((k,)) + self.out.write(f"{self.state()}{' ' * depth}{prefix}{k}:\n") + self.stack.pop() + if prefix == "- ": prefix = " " + depth += 1 + self.stack.append(k) + self.descend(obj[k], depth, prefix=" "+prefix) + self.stack.pop() + depth -= 1 + else: + self.out.write(f"{self.state()}{' ' * depth}{prefix}{obj}\n") + +class Checker(object): + + LETTER : str = "?" + + def check(self, fuzz: Fuzzer) -> str: + """create a copy of the current state of the object tree, + mutate it, and run it through is_valid on the validator. + + Parameters + ---------- + fuzz : Fuzzer + the containing instance + + Returns + ------- + str : + A single character string representation of the check + + """ + # Don't mess with the top level + if fuzz.stack is None: return self.LETTER + if not fuzz.stack: return "-" + # Operate on a copy for mutating + dupe = copy.deepcopy(fuzz.obj) + target = dupe + for level in fuzz.stack[0:-1]: + target = target.__getitem__(level) + self.handle(fuzz, target) + valid = fuzz.validator.is_valid(dupe) + return valid and self.LETTER or "." + + def handle(self, fuzz, target): + raise NotImplementedError() + +class Add(Checker): + + LETTER = "A" + + def handle(self, fuzz, target): + if isinstance(target, dict): + target["fake"] = "!" + elif isinstance(target, list): + target.append("!") + else: + raise Exception("unknown") + +class Del(Checker): + + LETTER = "D" + + def handle(self, fuzz, target): + key = fuzz.stack[-1] + if isinstance(key, tuple): + key = key[0] + target.__delitem__(key) + +class Change(Checker): + + def __init__(self, letter, call): + self.LETTER = letter + self.call = call + + def handle(self, fuzz, target): + key = fuzz.stack[-1] + if isinstance(key, tuple): + key = key[0] + target.__setitem__(key, self.call()) diff --git a/validate_sptx/validate_sptx.py b/validate_sptx/validate_sptx.py index db707d30..cafd0b75 100644 --- a/validate_sptx/validate_sptx.py +++ b/validate_sptx/validate_sptx.py @@ -19,9 +19,26 @@ def _get_absolute_schema_path(schema_name: str) -> str: @click.command() @click.option('--experiment-json', help='image metadata file to validate') -def validate_sptx(experiment_json: str) -> None: +def validate_sptx(experiment_json: str, fuzz: bool=False) -> bool: + return validate(experiment_json, fuzz) + +def validate(experiment_json: str, fuzz: bool=False) -> bool: """validate a spaceTx formatted experiment. Accepts local filepaths or files hosted at http links. + + Parameters + ---------- + experiment_json : str + path or URL to a target json object to be validated against the schema passed to this + object's constructor + fuzz : bool + whether or not to perform element-by-element fuzzing. + If true, will return true and will *not* use warnings. + + Returns + ------- + bool : + True, if object valid or fuzz=True, else False """ valid = True @@ -33,27 +50,27 @@ def validate_sptx(experiment_json: str) -> None: # validate experiment.json experiment_validator = SpaceTxValidator(_get_absolute_schema_path('experiment.json')) - valid &= experiment_validator.validate_object(experiment) + valid &= experiment_validator.validate_object(experiment, name, fuzz=fuzz) # validate manifests that it links to. possible_manifests = [] manifest_validator = SpaceTxValidator(_get_absolute_schema_path('fov_manifest.json')) with backend.read_contextmanager(experiment['primary_images']) as fh: - possible_manifests.append(json.load(fh)) + possible_manifests.append((json.load(fh), experiment['primary_images'])) # loop over all the manifests that are stored in auxiliary images. Disallowed names will # have already been excluded by experiment validation. for manifest in experiment['auxiliary_images'].values(): with backend.read_contextmanager(manifest) as fh: - possible_manifests.append(json.load(fh)) + possible_manifests.append((json.load(fh), manifest)) # we allow the objects linked from primary_images and auxiliary images to either be # manifests OR field_of_view files. We distinguish these by checking if they have a `contents` # flag, which indicates it is a manifest. fovs = [] - for manifest in possible_manifests: + for manifest, filename in possible_manifests: if 'contents' in manifest: # is a manifest; validate - valid &= manifest_validator.validate_object(manifest) + valid &= manifest_validator.validate_object(manifest, filename, fuzz=fuzz) # contains fields of view for key, fov in manifest['contents'].items(): @@ -67,7 +84,7 @@ def validate_sptx(experiment_json: str) -> None: assert len(fovs) != 0 fov_validator = SpaceTxValidator(_get_absolute_schema_path('field_of_view/field_of_view.json')) for fov, filename in fovs: - valid &= fov_validator.validate_object(fov, filename) + valid &= fov_validator.validate_object(fov, filename, fuzz=fuzz) # validate codebook codebook_validator = SpaceTxValidator(_get_absolute_schema_path('codebook/codebook.json')) @@ -76,13 +93,15 @@ def validate_sptx(experiment_json: str) -> None: if codebook_file is not None: with backend.read_contextmanager(codebook_file) as fh: codebook = json.load(fh) - valid &= codebook_validator.validate_object(codebook, codebook_file) + valid &= codebook_validator.validate_object(codebook, codebook_file, fuzz=fuzz) + + return valid + + +if __name__ == "__main__": + valid = validate_sptx() if valid: sys.exit(0) else: sys.exit(1) - - -if __name__ == "__main__": - validate_sptx()
Add validate-sptx fuzz command cF: https://github.com/spacetx/sptx-format/pull/21 Approximate output: ``` validate_sptx/test_field_of_view.py::test_fuzz_input[0] 0 corrupt b'{\n' validate_sptx/test_field_of_view.py::test_fuzz_input[1] 1 invalid b' "version": "0.0.0",\n' validate_sptx/test_field_of_view.py::test_fuzz_input[2] 2 invalid b' "fov": 0,\n' validate_sptx/test_field_of_view.py::test_fuzz_input[3] 3 corrupt b' "dimensions": [\n' validate_sptx/test_field_of_view.py::test_fuzz_input[4] 4 passes b' "x",\n' validate_sptx/test_field_of_view.py::test_fuzz_input[5] 5 passes b' "y",\n' validate_sptx/test_field_of_view.py::test_fuzz_input[6] 6 passes b' "z",\n' validate_sptx/test_field_of_view.py::test_fuzz_input[7] 7 passes b' "r",\n' validate_sptx/test_field_of_view.py::test_fuzz_input[8] 8 corrupt b' "c"\n' validate_sptx/test_field_of_view.py::test_fuzz_input[9] 9 corrupt b' ],\n' ... ```
spacetx/starfish
diff --git a/starfish/test/full_pipelines/cli/test_validate.py b/starfish/test/full_pipelines/cli/test_validate.py new file mode 100644 index 00000000..16f3dd3e --- /dev/null +++ b/starfish/test/full_pipelines/cli/test_validate.py @@ -0,0 +1,21 @@ +import json +import os +import shutil +import subprocess +import sys +import tempfile +import unittest + +from starfish.util import exec + + +class TestValidateCommand(unittest.TestCase): + + STAGES = ( + [ + "starfish", "validate", "--help" + ], + ) + + def test_run_pipeline(self): + exec.stages(TestValidateCommand.STAGES) diff --git a/validate_sptx/test_fuzz.py b/validate_sptx/test_fuzz.py new file mode 100644 index 00000000..0fb1b00b --- /dev/null +++ b/validate_sptx/test_fuzz.py @@ -0,0 +1,98 @@ +import os +from pkg_resources import resource_filename + +from .util import SpaceTxValidator +from .util import Fuzzer + +codebook_schema_path = resource_filename("validate_sptx", "schema/codebook/codebook.json") +experiment_schema_path = resource_filename("validate_sptx", "schema/experiment.json") +codebook_validator = SpaceTxValidator(codebook_schema_path) +experiment_validator = SpaceTxValidator(experiment_schema_path) + + +def test_fuzz_mock(): + """ + Starting from the simple obj test class, the Fuzzer will + propose all the different mutations contained in the the + values list. The mocked validator will pop them off, in + order, and compare what it is being given. For testing + purposes, it will always return true. + """ + obj = { + "list": [1, 2, 3] + } + values = [ + { "list": [1, 2, 3], "fake": "!" }, + { }, + { "list": 123456789 }, + { "list": "fake" }, + { "list": {} }, + { "list": [] }, + { "list": [1, 2, 3, '!'] }, + { "list": [2, 3] }, + { "list": [123456789, 2, 3] }, + { "list": ["fake", 2, 3] }, + { "list": [{}, 2, 3] }, + { "list": [[], 2, 3] }, + { "list": [1, 2, 3, "!"]}, + { "list": [1, 3] }, + { "list": [1, 123456789, 3] }, + { "list": [1, "fake", 3] }, + { "list": [1, {}, 3] }, + { "list": [1, [], 3] }, + { "list": [1, 2, 3, '!'] }, + { "list": [1, 2] }, + { "list": [1, 2, 123456789] }, + { "list": [1, 2, "fake"] }, + { "list": [1, 2, {}] }, + { "list": [1, 2, []] }, + ] + class Validator(object): + def __init__(self): + self.called = 0 + def is_valid(self, obj): + self.called += 1 + assert obj == values.pop(0), self.called + return True + Fuzzer(Validator(), obj).fuzz() + + +def test_fuzz_codebook(): + """ + simple validation of a hard-coded example codebook. + + The actual values don't matter overly much, but it + provides a good example of the output. + """ + codebook = [ + { + "codeword": [ + {"r": 0, "c": 0, "v": 1}, + {"r": 0, "c": 1, "v": 1}, + {"r": 1, "c": 1, "v": 1} + ], + "target": "SCUBE2" + } + ] + assert codebook_validator.validate_object(codebook, fuzz=True) + + +def test_fuzz_experiment(): + """ + simple validation of a hard-coded example experiment. + + The actual values don't matter overly much, but it + provides a good example of the output. + """ + experiment = { + "version": "0.0.0", + "primary_images": "primary_images.json", + "auxiliary_images": { + "nuclei": "nuclei.json" + }, + "codebook": "codebook.json", + "extras": { + "is_space_tx_cool": True + } + } + assert experiment_validator.validate_object(experiment, fuzz=True)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_removed_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 3, "test_score": 1 }, "num_modified_files": 5 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist" ], "pre_install": null, "python": "3.6", "reqs_path": [ "REQUIREMENTS.txt", "REQUIREMENTS-DEV.txt", "REQUIREMENTS-NOTEBOOK.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 async-generator==1.10 attrs==22.2.0 backcall==0.2.0 bleach==4.1.0 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 click==8.0.4 comm==0.1.4 coverage==6.2 cycler==0.11.0 dataclasses==0.8 decorator==4.4.2 defusedxml==0.7.1 diskcache==5.6.3 entrypoints==0.4 execnet==1.9.0 flake8==5.0.4 flake8-import-order==0.18.2 idna==3.10 imageio==2.15.0 importlib-metadata==4.2.0 importlib-resources==5.4.0 iniconfig==1.1.1 ipykernel==5.5.6 ipython==7.16.3 ipython-genutils==0.2.0 ipywidgets==7.8.5 jedi==0.17.2 Jinja2==3.0.3 joblib==1.1.1 jsonpath-rw==1.4.0 jsonschema==3.2.0 jupyter-client==7.1.2 jupyter-core==4.9.2 jupyterlab-pygments==0.1.2 jupyterlab_widgets==1.1.11 kiwisolver==1.3.1 MarkupSafe==2.0.1 matplotlib==3.3.4 mccabe==0.7.0 mistune==0.8.4 mypy==0.971 mypy-extensions==1.0.0 nbclient==0.5.9 nbconvert==6.0.7 nbencdec==0.0.10 nbformat==5.1.3 nest-asyncio==1.6.0 networkx==2.5.1 notebook==6.4.10 numpy==1.19.5 packaging==21.3 pandas==1.1.5 pandocfilters==1.5.1 parso==0.7.1 pexpect==4.9.0 pickleshare==0.7.5 Pillow==8.4.0 pluggy==1.0.0 ply==3.11 prometheus-client==0.17.1 prompt-toolkit==3.0.36 ptyprocess==0.7.0 py==1.11.0 pycodestyle==2.9.1 pycparser==2.21 pyflakes==2.5.0 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 pytz==2025.2 PyWavelets==1.1.1 PyYAML==6.0.1 pyzmq==25.1.2 regional==1.1.2 requests==2.27.1 scikit-image==0.17.2 scikit-learn==0.24.2 scipy==1.5.4 seaborn==0.11.2 semantic-version==2.10.0 Send2Trash==1.8.3 showit==1.1.4 six==1.17.0 slicedimage==0.0.5 -e git+https://github.com/spacetx/starfish.git@9b53c8a84d586af1f906c742b52617b036f14221#egg=starfish terminado==0.12.1 testpath==0.6.0 threadpoolctl==3.1.0 tifffile==2020.9.3 tomli==1.2.3 tornado==6.1 tqdm==4.64.1 trackpy==0.5.0 traitlets==4.3.3 typed-ast==1.5.5 typing_extensions==4.1.1 urllib3==1.26.20 validators==0.20.0 wcwidth==0.2.13 webencodings==0.5.1 widgetsnbextension==3.6.10 xarray==0.16.2 zipp==3.6.0
name: starfish channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - argon2-cffi==21.3.0 - argon2-cffi-bindings==21.2.0 - async-generator==1.10 - attrs==22.2.0 - backcall==0.2.0 - bleach==4.1.0 - cffi==1.15.1 - charset-normalizer==2.0.12 - click==8.0.4 - comm==0.1.4 - coverage==6.2 - cycler==0.11.0 - dataclasses==0.8 - decorator==4.4.2 - defusedxml==0.7.1 - diskcache==5.6.3 - entrypoints==0.4 - execnet==1.9.0 - flake8==5.0.4 - flake8-import-order==0.18.2 - idna==3.10 - imageio==2.15.0 - importlib-metadata==4.2.0 - importlib-resources==5.4.0 - iniconfig==1.1.1 - ipykernel==5.5.6 - ipython==7.16.3 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - jedi==0.17.2 - jinja2==3.0.3 - joblib==1.1.1 - jsonpath-rw==1.4.0 - jsonschema==3.2.0 - jupyter-client==7.1.2 - jupyter-core==4.9.2 - jupyterlab-pygments==0.1.2 - jupyterlab-widgets==1.1.11 - kiwisolver==1.3.1 - markupsafe==2.0.1 - matplotlib==3.3.4 - mccabe==0.7.0 - mistune==0.8.4 - mypy==0.971 - mypy-extensions==1.0.0 - nbclient==0.5.9 - nbconvert==6.0.7 - nbencdec==0.0.10 - nbformat==5.1.3 - nest-asyncio==1.6.0 - networkx==2.5.1 - notebook==6.4.10 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pandocfilters==1.5.1 - parso==0.7.1 - pexpect==4.9.0 - pickleshare==0.7.5 - pillow==8.4.0 - pluggy==1.0.0 - ply==3.11 - prometheus-client==0.17.1 - prompt-toolkit==3.0.36 - ptyprocess==0.7.0 - py==1.11.0 - pycodestyle==2.9.1 - pycparser==2.21 - pyflakes==2.5.0 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pywavelets==1.1.1 - pyyaml==6.0.1 - pyzmq==25.1.2 - regional==1.1.2 - requests==2.27.1 - scikit-image==0.17.2 - scikit-learn==0.24.2 - scipy==1.5.4 - seaborn==0.11.2 - semantic-version==2.10.0 - send2trash==1.8.3 - showit==1.1.4 - six==1.17.0 - slicedimage==0.0.5 - terminado==0.12.1 - testpath==0.6.0 - threadpoolctl==3.1.0 - tifffile==2020.9.3 - tomli==1.2.3 - tornado==6.1 - tqdm==4.64.1 - trackpy==0.5.0 - traitlets==4.3.3 - typed-ast==1.5.5 - typing-extensions==4.1.1 - urllib3==1.26.20 - validators==0.20.0 - wcwidth==0.2.13 - webencodings==0.5.1 - widgetsnbextension==3.6.10 - xarray==0.16.2 - zipp==3.6.0 prefix: /opt/conda/envs/starfish
[ "starfish/test/full_pipelines/cli/test_validate.py::TestValidateCommand::test_run_pipeline", "validate_sptx/test_fuzz.py::test_fuzz_mock", "validate_sptx/test_fuzz.py::test_fuzz_codebook", "validate_sptx/test_fuzz.py::test_fuzz_experiment" ]
[]
[]
[]
MIT License
3,031
[ "starfish/starfish.py", "starfish/util/exec.py", "validate_sptx/validate_sptx.py", "environment.yml", ".travis.yml", "validate_sptx/util.py", "sptx-format/environment.yml", "sptx-format/Dockerfile", "Dockerfile", ".dockerignore", "validate_sptx/cli.py" ]
[ "starfish/starfish.py", "starfish/util/exec.py", "validate_sptx/validate_sptx.py", "environment.yml", ".travis.yml", "validate_sptx/util.py", "sptx-format/environment.yml", "sptx-format/Dockerfile", "Dockerfile", ".dockerignore", "validate_sptx/cli.py" ]
oduwsdl__MementoEmbed-131
dfff20f4d336e76de33851d77c3f4a4af83a5ed2
2018-09-06 20:43:09
dfff20f4d336e76de33851d77c3f4a4af83a5ed2
diff --git a/docs/source/conf.py b/docs/source/conf.py index c7c0614..9892732 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -26,7 +26,7 @@ author = 'Shawn M. Jones' # The short X.Y version version = '' # The full version, including alpha/beta/rc tags -release = '0.2018.09.05.234815' +release = '0.2018.09.06.202242' # -- General configuration --------------------------------------------------- diff --git a/mementoembed/favicon.py b/mementoembed/favicon.py index 60203f0..2d53036 100644 --- a/mementoembed/favicon.py +++ b/mementoembed/favicon.py @@ -52,18 +52,27 @@ def get_favicon_from_html(content): for link in links: - if 'icon' in link['rel']: - favicon_uri = link['href'] - break + try: + if 'icon' in link['rel']: + favicon_uri = link['href'] + break + except KeyError: + module_logger.exception("there was no 'rel' attribute in this link tag: {}".format(link)) + favicon_uri == None # if that fails, try the older, nonstandard relation 'shortcut' if favicon_uri == None: for link in links: - if 'shortcut' in link['rel']: - favicon_uri = link['href'] - break + try: + if 'shortcut' in link['rel']: + favicon_uri = link['href'] + break + except KeyError: + module_logger.exception("there was no 'rel' attribute in this link tag: {}".format(link)) + favicon_uri == None + return favicon_uri diff --git a/mementoembed/version.py b/mementoembed/version.py index 72f9069..d1f031b 100644 --- a/mementoembed/version.py +++ b/mementoembed/version.py @@ -1,3 +1,3 @@ __appname__ = "MementoEmbed" -__appversion__ = '0.2018.09.05.234815' +__appversion__ = '0.2018.09.06.202242' __useragent__ = "{}/{}".format(__appname__, __appversion__)
Exception while searching for favicon MementoEmbed throws an exception for the following URI-M: * http://wayback.archive-it.org/4887/20141104211213/http://time.com/3502740/ebola-virus-1976/ Here is the log entry: ``` [2018-09-06 14:07:36,093 -0600 ] - ERROR - [ /services/product/socialcard/http://wayback.archive-it.org/4887/20141104211213/http://time.com/3502740/ebola-virus-1976/ ]: mementoembed.services.errors - An unforeseen error has occurred Traceback (most recent call last): File "/Volumes/nerfherder External/Unsynced-Projects/MementoEmbed/mementoembed/services/errors.py", line 27, in handle_errors return function_name(urim, preferences) File "/Volumes/nerfherder External/Unsynced-Projects/MementoEmbed/mementoembed/services/product.py", line 77, in generate_socialcard_response original_favicon_uri = s.original_favicon File "/Volumes/nerfherder External/Unsynced-Projects/MementoEmbed/mementoembed/mementosurrogate.py", line 71, in original_favicon return self.originalresource.favicon File "/Volumes/nerfherder External/Unsynced-Projects/MementoEmbed/mementoembed/originalresource.py", line 49, in favicon candidate_favicon = get_favicon_from_html(self.content) File "/Volumes/nerfherder External/Unsynced-Projects/MementoEmbed/mementoembed/favicon.py", line 55, in get_favicon_from_html if 'icon' in link['rel']: File "/Users/smj/.virtualenvs/MementoEmbed-gkunwTeo/lib/python3.7/site-packages/bs4/element.py", line 1071, in __getitem__ return self.attrs[key] KeyError: 'rel' ``` The problem is that the code assumes that all `link` tags in html have a `rel` attribute.
oduwsdl/MementoEmbed
diff --git a/tests/test_archiveresource.py b/tests/test_archiveresource.py index 0be51a5..8c33160 100644 --- a/tests/test_archiveresource.py +++ b/tests/test_archiveresource.py @@ -279,3 +279,52 @@ class TestArchiveResource(unittest.TestCase): x = ArchiveResource(urim, httpcache) self.assertEqual(x.favicon, expected_favicon) + + def test_link_tag_no_rel(self): + + expected_favicon = None + + cachedict = { + "http://myarchive.org": + mock_response( + headers={}, + content="""<html> + <head> + <title>Is this a good title?</title> + <link title="a good title" href="content/favicon.ico"> + </head> + <body>Is this all there is to content?</body> + </html>""", + status=200, + url = "testing-url://notused" + ), + expected_favicon: + mock_response( + headers = { 'content-type': 'image/x-testing'}, + content = "a", + status=200, + url = "testing-url://notused" + ), + "http://myarchive.org/favicon.ico": + mock_response( + headers={}, + content="not found", + status=404, + url="testing-url://notused" + ), + "https://www.google.com/s2/favicons?domain=myarchive.org": + mock_response( + headers={}, + content="not found", + status=404, + url="testing-url://notused" + ) + } + + httpcache = mock_httpcache(cachedict) + + urim = "http://myarchive.org/20160518000858/http://example.com/somecontent" + + x = ArchiveResource(urim, httpcache) + + self.assertEqual(x.favicon, expected_favicon)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 3 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "numpy>=1.16.0", "pandas>=1.0.0" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiu==0.2.4 async-timeout==5.0.1 beautifulsoup4==4.13.3 blinker==1.9.0 bs4==0.0.2 certifi==2025.1.31 chardet==5.2.0 charset-normalizer==3.4.1 click==8.1.8 cssselect==1.3.0 dicttoxml==1.7.16 exceptiongroup==1.2.2 filelock==3.18.0 Flask==3.1.0 html5lib==1.1 htmlmin==0.1.12 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 itsdangerous==2.2.0 Jinja2==3.1.6 jusText==3.0.2 lxml==5.3.1 lxml_html_clean==0.4.1 MarkupSafe==3.0.2 -e git+https://github.com/oduwsdl/MementoEmbed.git@dfff20f4d336e76de33851d77c3f4a4af83a5ed2#egg=mementoembed numpy==2.0.2 packaging==24.2 pandas==2.2.3 pillow==11.1.0 pluggy==1.5.0 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 readability-lxml==0.8.1 redis==5.2.1 requests==2.32.3 requests-cache==0.5.2 requests-file==2.1.0 requests-futures==1.0.2 six==1.17.0 soupsieve==2.6 tldextract==5.1.3 tomli==2.2.1 typing_extensions==4.13.0 tzdata==2025.2 urllib3==2.3.0 warcio==1.7.5 webencodings==0.5.1 Werkzeug==3.1.3 zipp==3.21.0
name: MementoEmbed channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aiu==0.2.4 - async-timeout==5.0.1 - beautifulsoup4==4.13.3 - blinker==1.9.0 - bs4==0.0.2 - certifi==2025.1.31 - chardet==5.2.0 - charset-normalizer==3.4.1 - click==8.1.8 - cssselect==1.3.0 - dicttoxml==1.7.16 - exceptiongroup==1.2.2 - filelock==3.18.0 - flask==3.1.0 - html5lib==1.1 - htmlmin==0.1.12 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - itsdangerous==2.2.0 - jinja2==3.1.6 - justext==3.0.2 - lxml==5.3.1 - lxml-html-clean==0.4.1 - markupsafe==3.0.2 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - pillow==11.1.0 - pluggy==1.5.0 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - readability-lxml==0.8.1 - redis==5.2.1 - requests==2.32.3 - requests-cache==0.5.2 - requests-file==2.1.0 - requests-futures==1.0.2 - six==1.17.0 - soupsieve==2.6 - tldextract==5.1.3 - tomli==2.2.1 - typing-extensions==4.13.0 - tzdata==2025.2 - urllib3==2.3.0 - warcio==1.7.5 - webencodings==0.5.1 - werkzeug==3.1.3 - zipp==3.21.0 prefix: /opt/conda/envs/MementoEmbed
[ "tests/test_archiveresource.py::TestArchiveResource::test_link_tag_no_rel" ]
[]
[ "tests/test_archiveresource.py::TestArchiveResource::test_404_favicon_from_constructed_favicon_uri_so_google_service", "tests/test_archiveresource.py::TestArchiveResource::test_404_favicon_from_html_so_constructed_favicon_uri", "tests/test_archiveresource.py::TestArchiveResource::test_collection_data_extraction", "tests/test_archiveresource.py::TestArchiveResource::test_favicon_from_html", "tests/test_archiveresource.py::TestArchiveResource::test_favicon_from_html_relative_uri", "tests/test_archiveresource.py::TestArchiveResource::test_no_good_favicon", "tests/test_archiveresource.py::TestArchiveResource::test_simplestuff" ]
[]
MIT License
3,032
[ "mementoembed/version.py", "mementoembed/favicon.py", "docs/source/conf.py" ]
[ "mementoembed/version.py", "mementoembed/favicon.py", "docs/source/conf.py" ]
asottile__pyupgrade-53
a37342a71a84f3046b90d46b656b4cae16266617
2018-09-06 22:54:00
a37342a71a84f3046b90d46b656b4cae16266617
diff --git a/pyupgrade.py b/pyupgrade.py index 486a440..b9cb902 100644 --- a/pyupgrade.py +++ b/pyupgrade.py @@ -818,10 +818,12 @@ def _to_fstring(src, call): params[kwd.arg] = _unparse(kwd.value) parts = [] - for i, (s, name, spec, conv) in enumerate(parse_format('f' + src)): + i = 0 + for s, name, spec, conv in parse_format('f' + src): if name is not None: k, dot, rest = name.partition('.') name = ''.join((params[k or str(i)], dot, rest)) + i += 1 parts.append((s, name, spec, conv)) return unparse_parsed_string(parts)
KeyError running with --py36-plus on matplotlib's ticker.py Running pyupgrade 1.5 on matplotlib's ticker.py as of, say, mpl3.0rc2 (https://github.com/matplotlib/matplotlib/blob/v3.0.0rc2/lib/matplotlib/ticker.py) gives me ``` Traceback (most recent call last): File "/usr/bin/pyupgrade", line 11, in <module> sys.exit(main()) File "/usr/lib/python3.7/site-packages/pyupgrade.py", line 907, in main ret |= fix_file(filename, args) File "/usr/lib/python3.7/site-packages/pyupgrade.py", line 884, in fix_file contents_text = _fix_fstrings(contents_text) File "/usr/lib/python3.7/site-packages/pyupgrade.py", line 858, in _fix_fstrings tokens[i] = token._replace(src=_to_fstring(token.src, node)) File "/usr/lib/python3.7/site-packages/pyupgrade.py", line 824, in _to_fstring name = ''.join((params[k or str(i)], dot, rest)) KeyError: '3' ```
asottile/pyupgrade
diff --git a/tests/pyupgrade_test.py b/tests/pyupgrade_test.py index 80d0b3b..24b47f4 100644 --- a/tests/pyupgrade_test.py +++ b/tests/pyupgrade_test.py @@ -721,6 +721,7 @@ def test_fix_fstrings_noop(s): ('"{.x} {.y}".format(a, b)', 'f"{a.x} {b.y}"'), ('"{} {}".format(a.b, c.d)', 'f"{a.b} {c.d}"'), ('"hello {}!".format(name)', 'f"hello {name}!"'), + ('"{}{{}}{}".format(escaped, y)', 'f"{escaped}{{}}{y}"'), # TODO: poor man's f-strings? # '"{foo}".format(**locals())'
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cfgv==3.4.0 coverage==7.8.0 distlib==0.3.9 exceptiongroup==1.2.2 execnet==2.1.1 filelock==3.18.0 flake8==7.2.0 identify==2.6.9 iniconfig==2.1.0 mccabe==0.7.0 nodeenv==1.9.1 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pre_commit==4.2.0 pycodestyle==2.13.0 pyflakes==3.3.2 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 -e git+https://github.com/asottile/pyupgrade.git@a37342a71a84f3046b90d46b656b4cae16266617#egg=pyupgrade PyYAML==6.0.2 tokenize_rt==6.1.0 tomli==2.2.1 typing_extensions==4.13.0 virtualenv==20.29.3
name: pyupgrade channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cfgv==3.4.0 - coverage==7.8.0 - distlib==0.3.9 - exceptiongroup==1.2.2 - execnet==2.1.1 - filelock==3.18.0 - flake8==7.2.0 - identify==2.6.9 - iniconfig==2.1.0 - mccabe==0.7.0 - nodeenv==1.9.1 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pre-commit==4.2.0 - pycodestyle==2.13.0 - pyflakes==3.3.2 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - pyyaml==6.0.2 - tokenize-rt==6.1.0 - tomli==2.2.1 - typing-extensions==4.13.0 - virtualenv==20.29.3 prefix: /opt/conda/envs/pyupgrade
[ "tests/pyupgrade_test.py::test_fix_fstrings[\"{}{{}}{}\".format(escaped," ]
[ "tests/pyupgrade_test.py::test_sets[set(\\n", "tests/pyupgrade_test.py::test_dictcomps[dict(((a,", "tests/pyupgrade_test.py::test_percent_format[\"%s\"", "tests/pyupgrade_test.py::test_sets[set((1,", "tests/pyupgrade_test.py::test_sets[set([x", "tests/pyupgrade_test.py::test_sets[set((x", "tests/pyupgrade_test.py::test_sets[set(((1,", "tests/pyupgrade_test.py::test_sets[set((a,", "tests/pyupgrade_test.py::test_sets[set((((1,", "tests/pyupgrade_test.py::test_sets[set(\\n(1,", "tests/pyupgrade_test.py::test_sets[set((\\n1,\\n2,\\n))\\n-{\\n1,\\n2,\\n}\\n]", "tests/pyupgrade_test.py::test_sets[set((frozenset(set((1,", "tests/pyupgrade_test.py::test_sets[set((1,))-{1}]", "tests/pyupgrade_test.py::test_dictcomps[dict((a,", "tests/pyupgrade_test.py::test_dictcomps[dict([a,", "tests/pyupgrade_test.py::test_dictcomps[dict([(a,", "tests/pyupgrade_test.py::test_dictcomps[dict(((a),", "tests/pyupgrade_test.py::test_dictcomps[dict((k,", "tests/pyupgrade_test.py::test_dictcomps[dict(\\n", "tests/pyupgrade_test.py::test_dictcomps[x(\\n", "tests/pyupgrade_test.py::test_main_changes_a_file", "tests/pyupgrade_test.py::test_main_keeps_line_endings" ]
[ "tests/pyupgrade_test.py::test_roundtrip_text[]", "tests/pyupgrade_test.py::test_roundtrip_text[foo]", "tests/pyupgrade_test.py::test_roundtrip_text[{}]", "tests/pyupgrade_test.py::test_roundtrip_text[{0}]", "tests/pyupgrade_test.py::test_roundtrip_text[{named}]", "tests/pyupgrade_test.py::test_roundtrip_text[{!r}]", "tests/pyupgrade_test.py::test_roundtrip_text[{:>5}]", "tests/pyupgrade_test.py::test_roundtrip_text[{{]", "tests/pyupgrade_test.py::test_roundtrip_text[}}]", "tests/pyupgrade_test.py::test_roundtrip_text[{0!s:15}]", "tests/pyupgrade_test.py::test_intentionally_not_round_trip[{:}-{}]", "tests/pyupgrade_test.py::test_intentionally_not_round_trip[{0:}-{0}]", "tests/pyupgrade_test.py::test_intentionally_not_round_trip[{0!r:}-{0!r}]", "tests/pyupgrade_test.py::test_sets[set()-set()]", "tests/pyupgrade_test.py::test_sets[set((\\n))-set((\\n))]", "tests/pyupgrade_test.py::test_sets[set", "tests/pyupgrade_test.py::test_sets[set(())-set()]", "tests/pyupgrade_test.py::test_sets[set([])-set()]", "tests/pyupgrade_test.py::test_sets[set((", "tests/pyupgrade_test.py::test_sets[set([1,", "tests/pyupgrade_test.py::test_sets[set(x", "tests/pyupgrade_test.py::test_sets[set([(1,", "tests/pyupgrade_test.py::test_sets[set([((1,", "tests/pyupgrade_test.py::test_dictcomps[x", "tests/pyupgrade_test.py::test_dictcomps[dict()-dict()]", "tests/pyupgrade_test.py::test_dictcomps[(-(]", "tests/pyupgrade_test.py::test_dictcomps[dict", "tests/pyupgrade_test.py::test_format_literals['{}'.format(1)-'{}'.format(1)]", "tests/pyupgrade_test.py::test_format_literals['{'.format(1)-'{'.format(1)]", "tests/pyupgrade_test.py::test_format_literals['}'.format(1)-'}'.format(1)]", "tests/pyupgrade_test.py::test_format_literals[x", "tests/pyupgrade_test.py::test_format_literals['{0}'.format(1)-'{}'.format(1)]", "tests/pyupgrade_test.py::test_format_literals['''{0}\\n{1}\\n'''.format(1,", "tests/pyupgrade_test.py::test_format_literals['{0}'", "tests/pyupgrade_test.py::test_format_literals[print(\\n", "tests/pyupgrade_test.py::test_format_literals['{0:<{1}}'.format(1,", "tests/pyupgrade_test.py::test_imports_unicode_literals[import", "tests/pyupgrade_test.py::test_imports_unicode_literals[from", "tests/pyupgrade_test.py::test_imports_unicode_literals[x", "tests/pyupgrade_test.py::test_imports_unicode_literals[\"\"\"docstring\"\"\"\\nfrom", "tests/pyupgrade_test.py::test_unicode_literals[(-False-(]", "tests/pyupgrade_test.py::test_unicode_literals[u''-False-u'']", "tests/pyupgrade_test.py::test_unicode_literals[u''-True-'']", "tests/pyupgrade_test.py::test_unicode_literals[from", "tests/pyupgrade_test.py::test_unicode_literals[\"\"\"with", "tests/pyupgrade_test.py::test_noop_octal_literals[0-0]", "tests/pyupgrade_test.py::test_noop_octal_literals[00-00]", "tests/pyupgrade_test.py::test_noop_octal_literals[1-1]", "tests/pyupgrade_test.py::test_noop_octal_literals[12345-12345]", "tests/pyupgrade_test.py::test_noop_octal_literals[1.2345-1.2345]", "tests/pyupgrade_test.py::test_is_bytestring_true[b'']", "tests/pyupgrade_test.py::test_is_bytestring_true[b\"\"]", "tests/pyupgrade_test.py::test_is_bytestring_true[B\"\"]", "tests/pyupgrade_test.py::test_is_bytestring_true[B'']", "tests/pyupgrade_test.py::test_is_bytestring_true[rb''0]", "tests/pyupgrade_test.py::test_is_bytestring_true[rb''1]", "tests/pyupgrade_test.py::test_is_bytestring_false[]", "tests/pyupgrade_test.py::test_is_bytestring_false[\"\"]", "tests/pyupgrade_test.py::test_is_bytestring_false['']", "tests/pyupgrade_test.py::test_is_bytestring_false[u\"\"]", "tests/pyupgrade_test.py::test_is_bytestring_false[\"b\"]", "tests/pyupgrade_test.py::test_parse_percent_format[\"\"-expected0]", "tests/pyupgrade_test.py::test_parse_percent_format[\"%%\"-expected1]", "tests/pyupgrade_test.py::test_parse_percent_format[\"%s\"-expected2]", "tests/pyupgrade_test.py::test_parse_percent_format[\"%s", "tests/pyupgrade_test.py::test_parse_percent_format[\"%(hi)s\"-expected4]", "tests/pyupgrade_test.py::test_parse_percent_format[\"%()s\"-expected5]", "tests/pyupgrade_test.py::test_parse_percent_format[\"%#o\"-expected6]", "tests/pyupgrade_test.py::test_parse_percent_format[\"%", "tests/pyupgrade_test.py::test_parse_percent_format[\"%5d\"-expected8]", "tests/pyupgrade_test.py::test_parse_percent_format[\"%*d\"-expected9]", "tests/pyupgrade_test.py::test_parse_percent_format[\"%.f\"-expected10]", "tests/pyupgrade_test.py::test_parse_percent_format[\"%.5f\"-expected11]", "tests/pyupgrade_test.py::test_parse_percent_format[\"%.*f\"-expected12]", "tests/pyupgrade_test.py::test_parse_percent_format[\"%ld\"-expected13]", "tests/pyupgrade_test.py::test_parse_percent_format[\"%(complete)#4.4f\"-expected14]", "tests/pyupgrade_test.py::test_percent_to_format[%s-{}]", "tests/pyupgrade_test.py::test_percent_to_format[%%%s-%{}]", "tests/pyupgrade_test.py::test_percent_to_format[%(foo)s-{foo}]", "tests/pyupgrade_test.py::test_percent_to_format[%2f-{:2f}]", "tests/pyupgrade_test.py::test_percent_to_format[%r-{!r}]", "tests/pyupgrade_test.py::test_percent_to_format[%a-{!a}]", "tests/pyupgrade_test.py::test_simplify_conversion_flag[-]", "tests/pyupgrade_test.py::test_simplify_conversion_flag[", "tests/pyupgrade_test.py::test_simplify_conversion_flag[#0-", "tests/pyupgrade_test.py::test_simplify_conversion_flag[--<]", "tests/pyupgrade_test.py::test_percent_format_noop[\"%s\"", "tests/pyupgrade_test.py::test_percent_format_noop[b\"%s\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%*s\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%.*s\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%d\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%i\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%u\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%c\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%#o\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%()s\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%4%\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%.2r\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%.2a\"", "tests/pyupgrade_test.py::test_percent_format_noop[i", "tests/pyupgrade_test.py::test_percent_format_noop[\"%(1)s\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%(a)s\"", "tests/pyupgrade_test.py::test_percent_format_noop[\"%(ab)s\"", "tests/pyupgrade_test.py::test_percent_format[\"trivial\"", "tests/pyupgrade_test.py::test_percent_format[\"%s%%", "tests/pyupgrade_test.py::test_percent_format[\"%3f\"", "tests/pyupgrade_test.py::test_percent_format[\"%-5s\"", "tests/pyupgrade_test.py::test_percent_format[\"brace", "tests/pyupgrade_test.py::test_percent_format[\"%(k)s\"", "tests/pyupgrade_test.py::test_fix_super_noop[x(]", "tests/pyupgrade_test.py::test_fix_super_noop[class", "tests/pyupgrade_test.py::test_fix_super_noop[def", "tests/pyupgrade_test.py::test_fix_super[class", "tests/pyupgrade_test.py::test_fix_fstrings_noop[(]", "tests/pyupgrade_test.py::test_fix_fstrings_noop[\"{}\"", "tests/pyupgrade_test.py::test_fix_fstrings_noop[\"{}\".format(\\n", "tests/pyupgrade_test.py::test_fix_fstrings_noop[\"{}", "tests/pyupgrade_test.py::test_fix_fstrings_noop[\"{foo}", "tests/pyupgrade_test.py::test_fix_fstrings_noop[\"{0}", "tests/pyupgrade_test.py::test_fix_fstrings_noop[\"{x}", "tests/pyupgrade_test.py::test_fix_fstrings_noop[\"{x.y}", "tests/pyupgrade_test.py::test_fix_fstrings_noop[b\"{}", "tests/pyupgrade_test.py::test_fix_fstrings_noop[\"{:{}}\".format(x,", "tests/pyupgrade_test.py::test_fix_fstrings[\"{}", "tests/pyupgrade_test.py::test_fix_fstrings[\"{1}", "tests/pyupgrade_test.py::test_fix_fstrings[\"{x.y}\".format(x=z)-f\"{z.y}\"]", "tests/pyupgrade_test.py::test_fix_fstrings[\"{.x}", "tests/pyupgrade_test.py::test_fix_fstrings[\"hello", "tests/pyupgrade_test.py::test_main_trivial", "tests/pyupgrade_test.py::test_main_noop", "tests/pyupgrade_test.py::test_main_syntax_error", "tests/pyupgrade_test.py::test_main_non_utf8_bytes", "tests/pyupgrade_test.py::test_py3_plus_argument_unicode_literals", "tests/pyupgrade_test.py::test_py3_plus_super", "tests/pyupgrade_test.py::test_py36_plus_fstrings" ]
[]
MIT License
3,033
[ "pyupgrade.py" ]
[ "pyupgrade.py" ]
google__yapf-615
841ad411adef77a38bf9e98f5ab843d65f3177d7
2018-09-07 10:13:16
841ad411adef77a38bf9e98f5ab843d65f3177d7
diff --git a/CHANGELOG b/CHANGELOG index 83b26c9..698e6ea 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -6,6 +6,10 @@ ### Added - Added `INDENT_BLANK_LINES` knob to select whether the blank lines are empty or indented consistently with the current block. +### Fixed +- Correctly determine if a scope is the last in line. It avoids a wrong + computation of the line end when determining if it must split after the + opening bracket with `DEDENT_CLOSING_BRACKETS` enabled. ## [0.24.0] 2018-09-07 ### Added diff --git a/yapf/yapflib/format_decision_state.py b/yapf/yapflib/format_decision_state.py index cd6a729..40b4571 100644 --- a/yapf/yapflib/format_decision_state.py +++ b/yapf/yapflib/format_decision_state.py @@ -930,6 +930,7 @@ def _IsFunctionDefinition(current): def _IsLastScopeInLine(current): + current = current.matching_bracket while current: current = current.next_token if current and current.OpensScope():
DEDENT_CLOSING_BRACKETS does not seem to work if arguments fit in line I am getting somewhat inconsistent formatting on code like this: ```python def long_function_name(argument1: Tuple[str, int], argument2: List[str]) -> Iterator[str]: pass def ever_longer_function_name(argument1: Tuple[str, int], argument2: List[str]) -> Iterator[str]: pass ``` My `setup.cfg`: ```ini [yapf] based_on_style = pep8 coalesce_brackets = True dedent_closing_brackets = True split_penalty_after_opening_bracket = 0 split_before_first_argument = true ``` The result is this: ```python def long_function_name(argument1: Tuple[str, int], argument2: List[str]) -> Iterator[str]: pass def ever_longer_function_name( argument1: Tuple[str, int], argument2: List[str] ) -> Iterator[str]: pass ``` However, i would prefer the second style for the first example as well. I was expecting the `split_before_first_argument` option to have exactly that effect, but it does not seem to work. With type annotations, the function signatures can get significant length other than their arguments. so this happens quite frequently in my code base. This issue seems to happen if the arguments fit in one line, but the full signature does not. If the arguments are longer than one line, it works fine. I am using version 0.14.0.
google/yapf
diff --git a/yapftests/reformatter_basic_test.py b/yapftests/reformatter_basic_test.py index 57a45d6..3de71a8 100644 --- a/yapftests/reformatter_basic_test.py +++ b/yapftests/reformatter_basic_test.py @@ -2558,6 +2558,37 @@ x = [1, 2, 3, 4, 5, 6, 7,] finally: style.SetGlobalStyle(style.CreateChromiumStyle()) + def testDedentClosingBracketsWithTypeAnnotationExceedingLineLength(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{based_on_style: chromium,' + ' dedent_closing_brackets: True}')) + unformatted_code = textwrap.dedent("""\ + def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None: + pass + + + def function(first_argument_xxxxxxxxxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None: + pass + """) + expected_formatted_code = textwrap.dedent("""\ + def function( + first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None + ) -> None: + pass + + + def function( + first_argument_xxxxxxxxxxxxxxxxxxxxxxx=(0,), second_argument=None + ) -> None: + pass + """) + uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(uwlines)) + finally: + style.SetGlobalStyle(style.CreateChromiumStyle()) + if __name__ == '__main__': unittest.main()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
0.24
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work -e git+https://github.com/google/yapf.git@841ad411adef77a38bf9e98f5ab843d65f3177d7#egg=yapf zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: yapf channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 prefix: /opt/conda/envs/yapf
[ "yapftests/reformatter_basic_test.py::BasicReformatterTest::testDedentClosingBracketsWithTypeAnnotationExceedingLineLength" ]
[]
[ "yapftests/reformatter_basic_test.py::BasicReformatterTest::testArgsAndKwargsFormatting", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testBinaryOperators", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testBlankLineBeforeClassDocstring", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testBlankLineBeforeModuleDocstring", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testBlankLinesAtEndOfFile", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testBlankLinesBeforeDecorators", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testBlankLinesBeforeFunctionsNotInColumnZero", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testClosingBracketIndent", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testClosingBracketsInlinedInCall", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testCoalesceBracketsOnDict", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testCommentBeforeFuncDef", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testCommentBetweenDecorators", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testCommentColumnLimitOverflow", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testComments", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testCommentsInDataLiteral", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testCommentsWithContinuationMarkers", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testCommentsWithTrailingSpaces", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testComprehensionForAndIf", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testContiguousList", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testContinuationIndent", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testContinuationMarkerAfterStringWithContinuation", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testContinuationMarkers", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testContinuationSpaceRetention", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testDictSetGenerator", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testDictionaryElementsOnOneLine", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testDictionaryMakerFormatting", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testDictionaryOnOwnLine", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testDictionaryValuesOnOwnLines", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testDisableEndingCommaHeuristic", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testDocstringAndMultilineComment", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testDocstrings", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testDontAddBlankLineAfterMultilineString", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testDontSplitKeywordValueArguments", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testEllipses", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testEmptyContainers", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testEndingComment", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testEndingWhitespaceAfterSimpleStatement", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testExcessCharacters", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testExcessLineCountWithDefaultKeywords", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testExpressionPenalties", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testFormattingListComprehensions", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testFunctionCallArguments", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testFunctionCallContinuationLine", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testFunctionCallInDict", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testFunctionCallInNestedDict", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testI18n", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testI18nCommentsInDataLiteral", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testI18nNonFormatting", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testIfConditionalParens", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testIfExpressionWithFunctionCall", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testImportAsList", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testIndentBlankLines", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testLineDepthOfSingleLineStatement", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testLineWrapInForExpression", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testListComprehension", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testListComprehensionPreferNoBreakForTrivialExpression", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testListComprehensionPreferOneLine", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testListComprehensionPreferOneLineOverArithmeticSplit", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testListComprehensionPreferThreeLinesForLineWrap", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testListWithFunctionCalls", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testMatchingParenSplittingMatching", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineComment", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineCommentReformatted", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineDictionaryKeys", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineDocstringAndMultilineComment", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineLambdas", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineShebang", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultilineString", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultipleContinuationMarkers", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testMultipleUgliness", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNamedAssignNotAtEndOfLine", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNestedDictionary", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNestedListsInDictionary", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoBreakOutsideOfBracket", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoKeywordArgumentBreakage", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoPenaltySplitting", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoQueueSeletionInMiddleOfLine", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSpaceBetweenUnaryOpAndOpeningParen", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSpacesAroundKeywordDefaultValues", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSpacesBetweenOpeningBracketAndStartingOperator", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSpacesBetweenSubscriptsAndCalls", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSplittingAroundTermOperators", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSplittingBeforeEndingSubscriptBracket", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSplittingOnSingleArgument", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSplittingWhenBinPacking", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNoSplittingWithinSubscriptList", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNotInParams", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testNotSplittingAfterSubscript", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testOpeningAndClosingBrackets", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testOverColumnLimit", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testPseudoParens", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testRelativeImportStatements", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testRelaxArraySubscriptAffinity", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSimpleFunctionsWithTrailingComments", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSimpleMultilineCode", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSimpleMultilineWithComments", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSingleComment", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSingleLineFunctions", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSingleLineList", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSpaceAfterNotOperator", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplitAfterComment", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplitListWithComment", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplitListWithInterspersedComments", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplitListWithTerminatingComma", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplitStringsIfSurroundedByParens", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingAllArgs", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingArgumentsTerminatedByComma", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingArraysSensibly", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingBeforeFirstArgumentOnCompoundStatement", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingBeforeFirstArgumentOnFunctionCall", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingBeforeFirstArgumentOnFunctionDefinition", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingBeforeFirstElementListArgument", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSplittingOneArgumentList", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testStableDictionaryFormatting", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testStableInlinedDictionaryFormatting", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testSubscriptExpression", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testTrailerOnSingleLine", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testTrailingCommaAndBracket", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testTupleCohesion", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testTupleCommaBeforeLastParen", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testUnaryNotOperator", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testUnaryOpInDictionaryValue", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testUnbreakableNot", "yapftests/reformatter_basic_test.py::BasicReformatterTest::testUnformattedAfterMultilineString" ]
[]
Apache License 2.0
3,034
[ "yapf/yapflib/format_decision_state.py", "CHANGELOG" ]
[ "yapf/yapflib/format_decision_state.py", "CHANGELOG" ]
python-pillow__Pillow-3338
41954f244705b247667f1ea228e932ca6390bcd6
2018-09-07 10:51:11
78c8b1f341919a4f7e19e29056713d8f738c9c88
diff --git a/src/PIL/TiffImagePlugin.py b/src/PIL/TiffImagePlugin.py index 66b211cbf..c1a785ef3 100644 --- a/src/PIL/TiffImagePlugin.py +++ b/src/PIL/TiffImagePlugin.py @@ -567,6 +567,9 @@ class ImageFileDirectory_v2(MutableMapping): if self.tagtype[tag] == 7 and py3: values = [value.encode("ascii", 'replace') if isinstance( value, str) else value] + elif self.tagtype[tag] == 5: + values = [float(v) if isinstance(v, int) else v + for v in values] values = tuple(info.cvt_enum(value) for value in values)
Save compressed TIFF only accepts float values for dpi There is a trap in setting the resolution for compressed TIFF images. ```python # pillow v3.1.1 import PIL.Image im = PIL.Image.new('CMYK', (100, 100)) dpi = 100.0 # works dpi = 100 # fails im.save('test.tif', format='TIFF', dpi=(dpi, dpi), compression='tiff_lzw', #compression='tiff_deflate', #compression='tiff_adobe_deflate' ) ``` Integers fail silently resulting in the resolution value missing from the saved file. Floats work as expected.
python-pillow/Pillow
diff --git a/Tests/test_file_libtiff.py b/Tests/test_file_libtiff.py index 77caa0b9d..7735b5bea 100644 --- a/Tests/test_file_libtiff.py +++ b/Tests/test_file_libtiff.py @@ -231,6 +231,16 @@ class TestFileLibTiff(LibTiffTestCase): TiffImagePlugin.WRITE_LIBTIFF = False + def test_int_dpi(self): + # issue #1765 + im = hopper('RGB') + out = self.tempfile('temp.tif') + TiffImagePlugin.WRITE_LIBTIFF = True + im.save(out, dpi=(72, 72)) + TiffImagePlugin.WRITE_LIBTIFF = False + reloaded = Image.open(out) + self.assertEqual(reloaded.info['dpi'], (72.0, 72.0)) + def test_g3_compression(self): i = Image.open('Tests/images/hopper_g4_500.tif') out = self.tempfile("temp.tif")
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
5.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": [ "apt-get update", "apt-get install -y gcc libjpeg-dev zlib1g-dev libtiff5-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev libharfbuzz-dev libfribidi-dev libxcb1-dev" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 babel==2.17.0 blessed==1.20.0 build==1.2.2.post1 certifi==2025.1.31 charset-normalizer==3.4.1 check-manifest==0.50 cov-core==1.15.0 coverage==7.8.0 coveralls==4.0.1 docopt==0.6.2 docutils==0.21.2 exceptiongroup==1.2.2 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 jarn.viewdoc==2.7 Jinja2==3.1.6 MarkupSafe==3.0.2 olefile==0.47 packaging==24.2 -e git+https://github.com/python-pillow/Pillow.git@41954f244705b247667f1ea228e932ca6390bcd6#egg=Pillow pluggy==1.5.0 pycodestyle==2.13.0 pyflakes==3.3.1 Pygments==2.19.1 pyproject_hooks==1.2.0 pyroma==4.2 pytest==8.3.5 pytest-cov==6.0.0 pytz==2025.2 requests==2.32.3 six==1.17.0 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinx-rtd-theme==3.0.2 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 tomli==2.2.1 trove-classifiers==2025.3.19.19 urllib3==2.3.0 wcwidth==0.2.13 zipp==3.21.0
name: Pillow channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - babel==2.17.0 - blessed==1.20.0 - build==1.2.2.post1 - certifi==2025.1.31 - charset-normalizer==3.4.1 - check-manifest==0.50 - cov-core==1.15.0 - coverage==7.8.0 - coveralls==4.0.1 - docopt==0.6.2 - docutils==0.21.2 - exceptiongroup==1.2.2 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jarn-viewdoc==2.7 - jinja2==3.1.6 - markupsafe==3.0.2 - olefile==0.47 - packaging==24.2 - pluggy==1.5.0 - pycodestyle==2.13.0 - pyflakes==3.3.1 - pygments==2.19.1 - pyproject-hooks==1.2.0 - pyroma==4.2 - pytest==8.3.5 - pytest-cov==6.0.0 - pytz==2025.2 - requests==2.32.3 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinx-rtd-theme==3.0.2 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - tomli==2.2.1 - trove-classifiers==2025.3.19.19 - urllib3==2.3.0 - wcwidth==0.2.13 - zipp==3.21.0 prefix: /opt/conda/envs/Pillow
[ "Tests/test_file_libtiff.py::TestFileLibTiff::test_int_dpi", "Tests/test_file_libtiff.py::TestFileLibTiff::test_little_endian", "Tests/test_file_libtiff.py::TestFileLibTiff::test_lzw", "Tests/test_file_libtiff.py::TestFileLibTiff::test_multipage", "Tests/test_file_libtiff.py::TestFileLibTiff::test_multipage_compression", "Tests/test_file_libtiff.py::TestFileLibTiff::test_multipage_nframes", "Tests/test_file_libtiff.py::TestFileLibTiff::test_page_number_x_0", "Tests/test_file_libtiff.py::TestFileLibTiff::test_read_icc", "Tests/test_file_libtiff.py::TestFileLibTiff::test_sampleformat", "Tests/test_file_libtiff.py::TestFileLibTiff::test_save_bytesio", "Tests/test_file_libtiff.py::TestFileLibTiff::test_save_tiff_with_jpegtables", "Tests/test_file_libtiff.py::TestFileLibTiff::test_write_metadata" ]
[]
[ "Tests/test_file_libtiff.py::TestFileLibTiff::test_12bit_rawmode", "Tests/test_file_libtiff.py::TestFileLibTiff::test_16bit_RGBa_tiff", "Tests/test_file_libtiff.py::TestFileLibTiff::test_4bit", "Tests/test_file_libtiff.py::TestFileLibTiff::test__next", "Tests/test_file_libtiff.py::TestFileLibTiff::test_additional_metadata", "Tests/test_file_libtiff.py::TestFileLibTiff::test_adobe_deflate_tiff", "Tests/test_file_libtiff.py::TestFileLibTiff::test_big_endian", "Tests/test_file_libtiff.py::TestFileLibTiff::test_blur", "Tests/test_file_libtiff.py::TestFileLibTiff::test_cmyk_save", "Tests/test_file_libtiff.py::TestFileLibTiff::test_compressions", "Tests/test_file_libtiff.py::TestFileLibTiff::test_crashing_metadata", "Tests/test_file_libtiff.py::TestFileLibTiff::test_fd_duplication", "Tests/test_file_libtiff.py::TestFileLibTiff::test_fp_leak", "Tests/test_file_libtiff.py::TestFileLibTiff::test_g3_compression", "Tests/test_file_libtiff.py::TestFileLibTiff::test_g4_eq_png", "Tests/test_file_libtiff.py::TestFileLibTiff::test_g4_fillorder_eq_png", "Tests/test_file_libtiff.py::TestFileLibTiff::test_g4_large", "Tests/test_file_libtiff.py::TestFileLibTiff::test_g4_string_info", "Tests/test_file_libtiff.py::TestFileLibTiff::test_g4_tiff", "Tests/test_file_libtiff.py::TestFileLibTiff::test_g4_tiff_bytesio", "Tests/test_file_libtiff.py::TestFileLibTiff::test_g4_tiff_file", "Tests/test_file_libtiff.py::TestFileLibTiff::test_g4_write", "Tests/test_file_libtiff.py::TestFileLibTiff::test_gimp_tiff", "Tests/test_file_libtiff.py::TestFileLibTiff::test_gray_semibyte_per_pixel" ]
[]
MIT-CMU License
3,035
[ "src/PIL/TiffImagePlugin.py" ]
[ "src/PIL/TiffImagePlugin.py" ]
aws__aws-xray-sdk-python-93
303861097a2dc401dfe0c3fdafed184a0aefb2b2
2018-09-07 11:44:11
303861097a2dc401dfe0c3fdafed184a0aefb2b2
Tankanow: Hi @haotianw465, what is the status of this PR? Do you need any action from me? haotianw465: No. Thank you for your contribution. I'm merging this change and will prepare a release soon.
diff --git a/aws_xray_sdk/ext/resources/aws_para_whitelist.json b/aws_xray_sdk/ext/resources/aws_para_whitelist.json index bc6642a..3a89b2e 100644 --- a/aws_xray_sdk/ext/resources/aws_para_whitelist.json +++ b/aws_xray_sdk/ext/resources/aws_para_whitelist.json @@ -1,5 +1,14 @@ { "services": { + "sns": { + "operations": { + "Publish": { + "request_parameters": [ + "TopicArn" + ] + } + } + }, "dynamodb": { "operations": { "BatchGetItem": {
Add SNS Service "Publish" operation to the aws_para_whitelist Currently the SNS publish operation shows up with a minimum set of metadata: ``` "aws": { "operation": "Publish", "region": "us-east-1", "request_id": "a939cee1-7c48-5675-b385-9ae2206dc121" } ``` This should include at least the known internal AWS resources like `TopicArn` or `TargetArn` and maybe even the `PhoneNumber`.
aws/aws-xray-sdk-python
diff --git a/tests/ext/botocore/test_botocore.py b/tests/ext/botocore/test_botocore.py index 8d48785..f006de2 100644 --- a/tests/ext/botocore/test_botocore.py +++ b/tests/ext/botocore/test_botocore.py @@ -150,3 +150,26 @@ def test_pass_through_on_context_missing(): assert result is not None xray_recorder.configure(context_missing='RUNTIME_ERROR') + + +def test_sns_publish_parameters(): + sns = session.create_client('sns', region_name='us-west-2') + response = { + 'ResponseMetadata': { + 'RequestId': REQUEST_ID, + 'HTTPStatusCode': 200, + } + } + + with Stubber(sns) as stubber: + stubber.add_response('publish', response, {'TopicArn': 'myAmazingTopic', 'Message': 'myBodaciousMessage'}) + sns.publish(TopicArn='myAmazingTopic', Message='myBodaciousMessage') + + subsegment = xray_recorder.current_segment().subsegments[0] + assert subsegment.http['response']['status'] == 200 + + aws_meta = subsegment.aws + assert aws_meta['topic_arn'] == 'myAmazingTopic' + assert aws_meta['request_id'] == REQUEST_ID + assert aws_meta['region'] == 'us-west-2' + assert aws_meta['operation'] == 'Publish'
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work -e git+https://github.com/aws/aws-xray-sdk-python.git@303861097a2dc401dfe0c3fdafed184a0aefb2b2#egg=aws_xray_sdk botocore==1.26.10 certifi==2021.5.30 charset-normalizer==2.0.12 future==1.0.0 idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work jmespath==0.10.0 jsonpickle==2.2.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 python-dateutil==2.9.0.post0 requests==2.27.1 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 wrapt==1.16.0 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: aws-xray-sdk-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - botocore==1.26.10 - charset-normalizer==2.0.12 - future==1.0.0 - idna==3.10 - jmespath==0.10.0 - jsonpickle==2.2.0 - python-dateutil==2.9.0.post0 - requests==2.27.1 - six==1.17.0 - urllib3==1.26.20 - wrapt==1.16.0 prefix: /opt/conda/envs/aws-xray-sdk-python
[ "tests/ext/botocore/test_botocore.py::test_sns_publish_parameters" ]
[]
[ "tests/ext/botocore/test_botocore.py::test_ddb_table_name", "tests/ext/botocore/test_botocore.py::test_s3_bucket_name_capture", "tests/ext/botocore/test_botocore.py::test_list_parameter_counting", "tests/ext/botocore/test_botocore.py::test_map_parameter_grouping", "tests/ext/botocore/test_botocore.py::test_pass_through_on_context_missing" ]
[]
Apache License 2.0
3,036
[ "aws_xray_sdk/ext/resources/aws_para_whitelist.json" ]
[ "aws_xray_sdk/ext/resources/aws_para_whitelist.json" ]
zopefoundation__zope.schema-62
0a719f2ded189630a0a77e9292a66a3662c6512c
2018-09-07 14:57:45
0a719f2ded189630a0a77e9292a66a3662c6512c
diff --git a/CHANGES.rst b/CHANGES.rst index 9d29d73..72e6b4e 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -126,6 +126,16 @@ <https://github.com/zopefoundation/zope.schema/issues/57>`_. +- Make ``Field.getDoc()`` return more information about the properties + of the field, such as its required and readonly status. Subclasses + can add more information using the new method + ``Field.getExtraDocLines()``. This is used to generate Sphinx + documentation when using `repoze.sphinx.autointerface + <https://pypi.org/project/repoze.sphinx.autointerface/>`_. See + `issue 60 + <https://github.com/zopefoundation/zope.schema/issues/60>`_. + + 4.5.0 (2017-07-10) ================== diff --git a/docs/api.rst b/docs/api.rst index 5a1b9b3..7877e76 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -20,7 +20,7 @@ Interfaces .. autointerface:: zope.schema.interfaces.IInterfaceField .. autointerface:: zope.schema.interfaces.IBool .. autointerface:: zope.schema.interfaces.IObject -.. autointerface:: zope.schema.interfaces.IDict + Strings ------- @@ -76,6 +76,13 @@ Collections .. autointerface:: zope.schema.interfaces.ISet .. autointerface:: zope.schema.interfaces.IFrozenSet +Mappings +~~~~~~~~ +.. autointerface:: zope.schema.interfaces.IMapping +.. autointerface:: zope.schema.interfaces.IMutableMapping +.. autointerface:: zope.schema.interfaces.IDict + + Events ------ diff --git a/src/zope/schema/_bootstrapfields.py b/src/zope/schema/_bootstrapfields.py index cde9df0..4534073 100644 --- a/src/zope/schema/_bootstrapfields.py +++ b/src/zope/schema/_bootstrapfields.py @@ -18,6 +18,7 @@ __docformat__ = 'restructuredtext' import decimal import fractions import numbers +import sys import threading from math import isinf @@ -26,6 +27,7 @@ from zope.interface import Invalid from zope.interface import Interface from zope.interface import providedBy from zope.interface import implementer +from zope.interface.interface import InterfaceClass from zope.interface.interfaces import IInterface from zope.interface.interfaces import IMethod @@ -118,6 +120,62 @@ def getFields(schema): fields[name] = attr return fields +class _DocStringHelpers(object): + # Namespace object to hold methods related to ReST formatting + # docstrings + + @staticmethod + def docstring_to_lines(docstring): + # Similar to what sphinx.utils.docstrings.prepare_docstring + # does. Strip leading equal whitespace, accounting for an initial line + # that might not have any. Return a list of lines, with a trailing + # blank line. + lines = docstring.expandtabs().splitlines() + # Find minimum indentation of any non-blank lines after ignored lines. + + margin = sys.maxsize + for line in lines[1:]: + content = len(line.lstrip()) + if content: + indent = len(line) - content + margin = min(margin, indent) + # Remove indentation from first ignored lines. + if len(lines) >= 1: + lines[0] = lines[0].lstrip() + + if margin < sys.maxsize: + for i in range(1, len(lines)): + lines[i] = lines[i][margin:] + # Remove any leading blank lines. + while lines and not lines[0]: + lines.pop(0) + # + lines.append('') + return lines + + @staticmethod + def make_class_directive(kind): + mod = kind.__module__ + if kind.__module__ in ('__builtin__', 'builtins'): + mod = '' + if mod in ('zope.schema._bootstrapfields', 'zope.schema._field'): + mod = 'zope.schema' + mod += '.' if mod else '' + return ':class:`%s%s`' % (mod, kind.__name__) + + @classmethod + def make_field(cls, name, value): + return ":%s: %s" % (name, value) + + @classmethod + def make_class_field(cls, name, kind): + if isinstance(kind, (type, InterfaceClass)): + return cls.make_field(name, cls.make_class_directive(kind)) + if isinstance(kind, tuple): + return cls.make_field( + name, + ', '.join([cls.make_class_directive(t) for t in kind])) + class Field(Attribute): @@ -182,6 +240,9 @@ class Field(Attribute): __doc__ = '' if title: if description: + # Fix leading whitespace that occurs when using multi-line + # strings. + description = '\n'.join(_DocStringHelpers.docstring_to_lines(description)[:-1]) __doc__ = "%s\n\n%s" % (title, description) else: __doc__ = title @@ -286,6 +347,53 @@ class Field(Attribute): object.__class__.__name__)) setattr(object, self.__name__, value) + def getExtraDocLines(self): + """ + Return a list of ReST formatted lines that will be added + to the docstring returned by :meth:`getDoc`. + + By default, this will include information about the various + properties of this object, such as required and readonly status, + required type, and so on. + + This implementation uses a field list for this. + + Subclasses may override or extend. + + .. versionadded:: 4.6.0 + """ + + lines = [] + lines.append(_DocStringHelpers.make_class_field('Implementation', type(self))) + lines.append(_DocStringHelpers.make_field("Read Only", self.readonly)) + lines.append(_DocStringHelpers.make_field("Required", self.required)) + if self.defaultFactory: + lines.append(_DocStringHelpers.make_field("Default Factory", repr(self.defaultFactory))) + else: + lines.append(_DocStringHelpers.make_field("Default Value", repr(self.default))) + + if self._type: + lines.append(_DocStringHelpers.make_class_field("Allowed Type", self._type)) + + # key_type and value_type are commonly used, but don't + # have a common superclass to add them, so we do it here. + # Using a rubric produces decent formatting + for name, rubric in (('key_type', 'Key Type'), + ('value_type', 'Value Type')): + field = getattr(self, name, None) + if hasattr(field, 'getDoc'): + lines.append(".. rubric:: " + rubric) + lines.append(field.getDoc()) + + return lines + + def getDoc(self): + doc = super(Field, self).getDoc() + lines = _DocStringHelpers.docstring_to_lines(doc) + lines += self.getExtraDocLines() + lines.append('') + + return '\n'.join(lines) class Container(Field): @@ -806,6 +914,11 @@ class Object(Field): self.validate_invariants = kw.pop('validate_invariants', True) super(Object, self).__init__(**kw) + def getExtraDocLines(self): + lines = super(Object, self).getExtraDocLines() + lines.append(_DocStringHelpers.make_class_field("Must Provide", self.schema)) + return lines + def _validate(self, value): super(Object, self)._validate(value)
Can `Field.getDoc()` include more information? There is a lot of information that Field has that's not currently printed by, e.g., `repoze.sphinx.autointerface`, such as the type of acceptable values, whether they are required, the default, and so on. For example, here's what `IField` looks like now: ![before](https://user-images.githubusercontent.com/1256082/45103033-939f4e80-b0f4-11e8-83d6-512c4a133f1e.png) It's not very difficult to add this on to the user-provided title and description by overriding `Field.getDoc()`. Here's one way this could look if this information were added: ![after](https://user-images.githubusercontent.com/1256082/45103070-a6b21e80-b0f4-11e8-82a7-7188b56012dc.png) We've found this to be useful in our own projects and consequently we define subclasses of `Field` that add this information. I'm wondering if it would be more broadly useful, useful enough to include by default? Are there any programattic users of `getDoc()` that would be broken by such a change?
zopefoundation/zope.schema
diff --git a/src/zope/schema/tests/test__bootstrapfields.py b/src/zope/schema/tests/test__bootstrapfields.py index 733d5c9..55c8537 100644 --- a/src/zope/schema/tests/test__bootstrapfields.py +++ b/src/zope/schema/tests/test__bootstrapfields.py @@ -277,6 +277,64 @@ class FieldTests(EqualityTestsMixin, from zope.schema.interfaces import IField return IField + def test_getDoc(self): + import textwrap + field = self._makeOne(readonly=True, required=False) + doc = field.getDoc() + self.assertIn(':Read Only: True', doc) + self.assertIn(':Required: False', doc) + self.assertIn(":Default Value:", doc) + self.assertNotIn(':Default Factory:', doc) + + field._type = str + doc = field.getDoc() + self.assertIn(':Allowed Type: :class:`str`', doc) + self.assertNotIn(':Default Factory:', doc) + + field.defaultFactory = 'default' + doc = field.getDoc() + self.assertNotIn(":Default Value:", doc) + self.assertIn(':Default Factory:', doc) + + field._type = (str, object) + doc = field.getDoc() + self.assertIn(':Allowed Type: :class:`str`, :class:`object`', doc) + self.assertNotIn('..rubric', doc) + + # value_type and key_type are automatically picked up + field.value_type = self._makeOne() + field.key_type = self._makeOne() + doc = field .getDoc() + self.assertIn('.. rubric:: Key Type', doc) + self.assertIn('.. rubric:: Value Type', doc) + + field = self._makeOne(title=u'A title', description=u"""Multiline description. + + Some lines have leading whitespace. + + It gets stripped. + """) + + doc = field.getDoc() + self.assertEqual( + field.getDoc(), + textwrap.dedent("""\ + A title + + Multiline description. + + Some lines have leading whitespace. + + It gets stripped. + + :Implementation: :class:`zope.schema.Field` + :Read Only: False + :Required: True + :Default Value: None + """) + ) + + def test_ctor_defaults(self): field = self._makeOne() @@ -1480,6 +1538,11 @@ class ObjectTests(EqualityTestsMixin, self.assertEqual(bad_choices, e.value) self.assertEqual(['choices'], list(e.schema_errors)) + def test_getDoc(self): + field = self._makeOne() + doc = field.getDoc() + self.assertIn(":Must Provide: :class:", doc) + class DummyInst(object): missing_value = object()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 3 }
4.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work zope.event==5.0 zope.exceptions==5.1 zope.interface==6.4.post2 -e git+https://github.com/zopefoundation/zope.schema.git@0a719f2ded189630a0a77e9292a66a3662c6512c#egg=zope.schema zope.testing==5.0.1 zope.testrunner==6.5
name: zope.schema channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - zope-event==5.0 - zope-exceptions==5.1 - zope-interface==6.4.post2 - zope-testing==5.0.1 - zope-testrunner==6.5 prefix: /opt/conda/envs/zope.schema
[ "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_getDoc", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_getDoc" ]
[ "src/zope/schema/tests/test__bootstrapfields.py::test_suite" ]
[ "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___get__", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___not_missing_w_check", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___not_missing_wo_check", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___w_missing_wo_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___w_defaultFactory_not_ICAF_no_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___w_defaultFactory_w_ICAF_w_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___wo_defaultFactory_hit", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___wo_defaultFactory_miss", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test__get___wo_defaultFactory_in_dict", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_bind", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_order_madness", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_w_both_title_and_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_w_title_wo_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_wo_title_w_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_constraint_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_defaultFactory", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_defaultFactory_returning_missing_value", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_required_readonly_missingValue", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_get_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_get_miss", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_miss_no_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_miss_w_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_set_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_set_readonly", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_fails", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_raises_StopValidation", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_missing_and_required", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_missing_not_required", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_wrong_type", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_collection_but_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_not_collection_but_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_not_collection_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_w_collections", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_collection_but_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_not_collection_but_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_not_collection_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_w_collections", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_default_too_large", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_default_too_small", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_validate_too_long", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_validate_too_short", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_w_invalid_default", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_wrong_types", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_constraint", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_wrong_types", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_constraint", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_set_normal", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_set_unchanged", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_unchanged_already_set", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_unchanged_not_already_set", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test__validate_w_int", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_set_w_int", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_ctor_real_min_max", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_empty_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_not_providing_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_providing_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_providing_schema_but_invalid_fields", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_providing_schema_but_missing_fields", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_bound_field_of_collection_with_choice", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_class_conforms_to_IObject", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_ctor_w_bad_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_instance_conforms_to_IObject", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_set_allows_IBOAE_subscr_to_replace_value", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_set_emits_IBOAE", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_w_cycles", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_w_cycles_collection_not_valid", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_w_cycles_object_not_valid", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validates_invariants_by_default" ]
[]
Zope Public License 2.1
3,037
[ "src/zope/schema/_bootstrapfields.py", "docs/api.rst", "CHANGES.rst" ]
[ "src/zope/schema/_bootstrapfields.py", "docs/api.rst", "CHANGES.rst" ]
vertexproject__synapse-941
6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0
2018-09-07 17:20:02
6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0
diff --git a/synapse/telepath.py b/synapse/telepath.py index 2f0571772..cf5131317 100644 --- a/synapse/telepath.py +++ b/synapse/telepath.py @@ -393,13 +393,43 @@ class Proxy(s_coro.Fini): def alias(name): ''' Resolve a telepath alias via ~/.syn/aliases.yaml + + Args: + name (str): Name of the alias to resolve. + + Notes: + An exact match against the aliases will always be returned first. + If no exact match is found and the name contains a '/' in it, the + value before the slash is looked up and the remainder of the path + is joined to any result. This is done to support dynamic Telepath + share names. + + Returns: + str: The url string, if present in the alias. None will be returned + if there are no matches. ''' path = s_common.getSynPath('aliases.yaml') if not os.path.isfile(path): return None conf = s_common.yamlload(path) - return conf.get(name) + + # Is there an exact match - if so, return it. + url = conf.get(name) + if url: + return url + + # Since telepath supports dynamic shared object access, + # slice a name at the first '/', look up using that value + # and then append the second value to it. + dynname = None + if '/' in name: + name, dynname = name.split('/', 1) + url = conf.get(name) + if url and dynname: + url = '/'.join([url, dynname]) + + return url @s_glob.synchelp async def openurl(url, **opts):
Support telepath aliasing for cryotanks Let's say I have the following aliases file. ~/.syn/aliases.yaml ``` --- cryo: tcp://user:pass@host:port/cryocell ... ``` The telepath expansion when running `python -m synapse.tools.cryo.cat cryo/tank_name` doesn't work while `python -m synapse.tools.cryo.list cryo` does. Could a change akin to the following be made to support cryo aliases? ``` diff --git a/synapse/telepath.py b/synapse/telepath.py index 2f057177..590e7322 100644 --- a/synapse/telepath.py +++ b/synapse/telepath.py @@ -427,7 +427,12 @@ async def openurl(url, **opts): if url.find('://') == -1: newurl = alias(url) if newurl is None: - raise s_exc.BadUrl(f':// not found in [{url}] and no alias found!') + # Cryotank + cryo_alias, sep, tank = url.rpartition("/") + newurl = alias(cryo_alias) + if newurl is None: + raise s_exc.BadUrl(f':// not found in [{url}] and no alias found!') + newurl = sep.join((newurl, tank)) url = newurl info = s_urlhelp.chopurl(url) ```
vertexproject/synapse
diff --git a/synapse/tests/test_telepath.py b/synapse/tests/test_telepath.py index 70e2d1d3c..09bcee161 100644 --- a/synapse/tests/test_telepath.py +++ b/synapse/tests/test_telepath.py @@ -6,9 +6,11 @@ logger = logging.getLogger(__name__) import synapse.exc as s_exc import synapse.glob as s_glob +import synapse.common as s_common import synapse.telepath as s_telepath import synapse.lib.share as s_share +import synapse.lib.scope as s_scope import synapse.tests.common as s_test from synapse.tests.utils import SyncToAsyncCMgr @@ -25,6 +27,13 @@ class CustomShare(s_share.Share): def boo(self, x): return x +class Beep: + def __init__(self, path): + self.path = path + + def beep(self): + return f'{self.path}: beep' + class Foo: def __init__(self): @@ -99,6 +108,20 @@ class TeleApi: return CustomShare(self.link, 42) class TeleAware(s_telepath.Aware): + def __init__(self): + s_telepath.Aware.__init__(self) + self.beeps = {} + + def _initBeep(self, path): + beep = self.beeps.get(path) + if beep: + return beep + beep = Beep(path) + self.beeps[path] = beep + return beep + + def onTeleOpen(self, link, path): + return self._initBeep(path[1]) def getTeleApi(self, link, mesg): return TeleApi(self, link) @@ -249,12 +272,16 @@ class TeleTest(s_test.SynTest): with self.getTestDmon() as dmon: dmon.share('woke', item) - proxy = dmon._getTestProxy('woke') - self.eq(10, proxy.getFooBar(20, 10)) + with dmon._getTestProxy('woke') as proxy: + self.eq(10, proxy.getFooBar(20, 10)) + + # check a custom share works + obj = proxy.customshare() + self.eq(999, obj.boo(999)) - # check a custom share works - obj = proxy.customshare() - self.eq(999, obj.boo(999)) + # check that a dynamic share works + with dmon._getTestProxy('woke/up') as proxy: + self.eq('up: beep', proxy.beep()) def test_telepath_auth(self): @@ -282,3 +309,39 @@ class TeleTest(s_test.SynTest): host, port = dmon.listen('tcp://127.0.0.1:0/') self.raises(s_exc.BadMesgVers, s_telepath.openurl, 'tcp://127.0.0.1/', port=port) + + def test_alias(self): + item = TeleAware() + name = 'item' + + with self.getTestDmon() as dmon: + addr = dmon.listen('tcp://127.0.0.1:0') + dmon.share(name, item) + dirn = s_scope.get('dirn') + + url = f'tcp://{addr[0]}:{addr[1]}/{name}' + beepbeep_alias = url + '/beepbeep' + aliases = {name: url, + f'{name}/borp': beepbeep_alias} + + with self.setSynDir(dirn): + fp = s_common.getSynPath('aliases.yaml') + s_common.yamlsave(aliases, fp) + + # None existent aliases return None + self.none(s_telepath.alias('newp')) + self.none(s_telepath.alias('newp/path')) + + # An exact match wins + self.eq(s_telepath.alias(name), url) + self.eq(s_telepath.alias(f'{name}/borp'), beepbeep_alias) + # Dynamic aliases are valid. + self.eq(s_telepath.alias(f'{name}/beepbeep'), beepbeep_alias) + + with s_telepath.openurl(name) as prox: + self.eq(10, prox.getFooBar(20, 10)) + + # Check to see that we can connect to an aliased name + # with a dynamic share attached to it. + with s_telepath.openurl(f'{name}/bar') as prox: + self.eq('bar: beep', prox.beep()) diff --git a/synapse/tests/utils.py b/synapse/tests/utils.py index 5647d715a..f59547bc7 100644 --- a/synapse/tests/utils.py +++ b/synapse/tests/utils.py @@ -798,6 +798,24 @@ class SynTest(unittest.TestCase): self.raises(exc, testfunc) + @contextlib.contextmanager + def setSynDir(self, dirn): + ''' + Sets s_common.syndir to a specific directory and then unsets it afterwards. + + Args: + dirn (str): Directory to set syndir to. + + Notes: + This is to be used as a context manager. + ''' + olddir = s_common.syndir + try: + s_common.syndir = dirn + yield None + finally: + s_common.syndir = olddir + def eq(self, x, y, msg=None): ''' Assert X is equal to Y
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "tornado>=3.2.2 cryptography>=1.7.2 pyOpenSSL>=16.2.0 msgpack-python>=0.4.2 xxhash>=1.0.1 lmdb>=0.92", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 cffi @ file:///tmp/build/80754af9/cffi_1625814693874/work cryptography @ file:///tmp/build/80754af9/cryptography_1635366128178/work importlib-metadata==4.8.3 iniconfig==1.1.1 lmdb==0.99 msgpack==0.5.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work pyOpenSSL==17.5.0 pyparsing==3.1.4 pytest==7.0.1 PyYAML==3.13 regex==2023.8.8 six==1.17.0 -e git+https://github.com/vertexproject/synapse.git@6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0#egg=synapse tomli==1.2.3 tornado==5.1.1 typing_extensions==4.1.1 xxhash==1.4.4 zipp==3.6.0
name: synapse channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - cffi=1.14.6=py36h400218f_0 - cryptography=35.0.0=py36hd23ed53_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - pycparser=2.21=pyhd3eb1b0_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - lmdb==0.99 - msgpack==0.5.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyopenssl==17.5.0 - pyparsing==3.1.4 - pytest==7.0.1 - pyyaml==3.13 - regex==2023.8.8 - six==1.17.0 - tomli==1.2.3 - tornado==5.1.1 - typing-extensions==4.1.1 - xxhash==1.4.4 - zipp==3.6.0 prefix: /opt/conda/envs/synapse
[ "synapse/tests/test_telepath.py::TeleTest::test_alias" ]
[]
[ "synapse/tests/test_telepath.py::TeleTest::test_telepath_async", "synapse/tests/test_telepath.py::TeleTest::test_telepath_auth", "synapse/tests/test_telepath.py::TeleTest::test_telepath_aware", "synapse/tests/test_telepath.py::TeleTest::test_telepath_basics", "synapse/tests/test_telepath.py::TeleTest::test_telepath_blocking", "synapse/tests/test_telepath.py::TeleTest::test_telepath_server_badvers" ]
[]
Apache License 2.0
3,038
[ "synapse/telepath.py" ]
[ "synapse/telepath.py" ]
scrapy__w3lib-112
c1a030582ec30423c40215fcd159bc951c851ed7
2018-09-07 19:48:08
c1a030582ec30423c40215fcd159bc951c851ed7
diff --git a/w3lib/url.py b/w3lib/url.py index 4be74f7..fc9b343 100644 --- a/w3lib/url.py +++ b/w3lib/url.py @@ -182,6 +182,8 @@ def url_query_cleaner(url, parameterlist=(), sep='&', kvsep='=', remove=False, u seen = set() querylist = [] for ksv in query.split(sep): + if not ksv: + continue k, _, _ = ksv.partition(kvsep) if unique and k in seen: continue
url_query_cleaner appends ? to urls without a query string ```python >>> url_query_cleaner('http://domain.tld/', ['bla'], remove=True) 'http://domain.tld/?' ``` This is the code which does it: `url = '?'.join([base, sep.join(querylist)]) if querylist else base` (`querylist==['']`).
scrapy/w3lib
diff --git a/tests/test_url.py b/tests/test_url.py index 0df5bfd..9476b30 100644 --- a/tests/test_url.py +++ b/tests/test_url.py @@ -284,6 +284,10 @@ class UrlTests(unittest.TestCase): 'http://example.com/?version=1&pageurl=test&param2=value2') def test_url_query_cleaner(self): + self.assertEqual('product.html', + url_query_cleaner("product.html?")) + self.assertEqual('product.html', + url_query_cleaner("product.html?&")) self.assertEqual('product.html?id=200', url_query_cleaner("product.html?id=200&foo=bar&name=wired", ['id'])) self.assertEqual('product.html?id=200', @@ -308,6 +312,10 @@ class UrlTests(unittest.TestCase): url_query_cleaner("product.html?id=2&foo=bar&name=wired", ['id', 'foo'], remove=True)) self.assertEqual('product.html?foo=bar&name=wired', url_query_cleaner("product.html?id=2&foo=bar&name=wired", ['id', 'footo'], remove=True)) + self.assertEqual('product.html', + url_query_cleaner("product.html", ['id'], remove=True)) + self.assertEqual('product.html', + url_query_cleaner("product.html?&", ['id'], remove=True)) self.assertEqual('product.html?foo=bar', url_query_cleaner("product.html?foo=bar&name=wired", 'foo')) self.assertEqual('product.html?foobar=wired',
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
1.19
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work pytest-cov==6.0.0 six==1.17.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work -e git+https://github.com/scrapy/w3lib.git@c1a030582ec30423c40215fcd159bc951c851ed7#egg=w3lib
name: w3lib channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - pytest-cov==6.0.0 - six==1.17.0 prefix: /opt/conda/envs/w3lib
[ "tests/test_url.py::UrlTests::test_url_query_cleaner" ]
[ "tests/test_url.py::UrlTests::test_add_or_replace_parameter" ]
[ "tests/test_url.py::UrlTests::test_any_to_uri", "tests/test_url.py::UrlTests::test_file_uri_to_path", "tests/test_url.py::UrlTests::test_is_url", "tests/test_url.py::UrlTests::test_path_to_file_uri", "tests/test_url.py::UrlTests::test_safe_download_url", "tests/test_url.py::UrlTests::test_safe_url_idna", "tests/test_url.py::UrlTests::test_safe_url_idna_encoding_failure", "tests/test_url.py::UrlTests::test_safe_url_port_number", "tests/test_url.py::UrlTests::test_safe_url_string", "tests/test_url.py::UrlTests::test_safe_url_string_bytes_input", "tests/test_url.py::UrlTests::test_safe_url_string_bytes_input_nonutf8", "tests/test_url.py::UrlTests::test_safe_url_string_misc", "tests/test_url.py::UrlTests::test_safe_url_string_unsafe_chars", "tests/test_url.py::UrlTests::test_safe_url_string_with_query", "tests/test_url.py::UrlTests::test_url_query_cleaner_keep_fragments", "tests/test_url.py::UrlTests::test_url_query_parameter", "tests/test_url.py::UrlTests::test_url_query_parameter_2", "tests/test_url.py::UrlTests::test_urljoin_rfc_deprecated", "tests/test_url.py::CanonicalizeUrlTest::test_append_missing_path", "tests/test_url.py::CanonicalizeUrlTest::test_canonicalize_idns", "tests/test_url.py::CanonicalizeUrlTest::test_canonicalize_parse_url", "tests/test_url.py::CanonicalizeUrlTest::test_canonicalize_url", "tests/test_url.py::CanonicalizeUrlTest::test_canonicalize_url_idempotence", "tests/test_url.py::CanonicalizeUrlTest::test_canonicalize_url_idna_exceptions", "tests/test_url.py::CanonicalizeUrlTest::test_canonicalize_url_unicode_path", "tests/test_url.py::CanonicalizeUrlTest::test_canonicalize_url_unicode_query_string", "tests/test_url.py::CanonicalizeUrlTest::test_canonicalize_url_unicode_query_string_wrong_encoding", "tests/test_url.py::CanonicalizeUrlTest::test_canonicalize_urlparsed", "tests/test_url.py::CanonicalizeUrlTest::test_domains_are_case_insensitive", "tests/test_url.py::CanonicalizeUrlTest::test_dont_convert_safe_characters", "tests/test_url.py::CanonicalizeUrlTest::test_keep_blank_values", "tests/test_url.py::CanonicalizeUrlTest::test_non_ascii_percent_encoding_in_paths", "tests/test_url.py::CanonicalizeUrlTest::test_non_ascii_percent_encoding_in_query_arguments", "tests/test_url.py::CanonicalizeUrlTest::test_normalize_percent_encoding_in_paths", "tests/test_url.py::CanonicalizeUrlTest::test_normalize_percent_encoding_in_query_arguments", "tests/test_url.py::CanonicalizeUrlTest::test_port_number", "tests/test_url.py::CanonicalizeUrlTest::test_quoted_slash_and_question_sign", "tests/test_url.py::CanonicalizeUrlTest::test_remove_fragments", "tests/test_url.py::CanonicalizeUrlTest::test_return_str", "tests/test_url.py::CanonicalizeUrlTest::test_safe_characters_unicode", "tests/test_url.py::CanonicalizeUrlTest::test_sorting", "tests/test_url.py::CanonicalizeUrlTest::test_spaces", "tests/test_url.py::CanonicalizeUrlTest::test_typical_usage", "tests/test_url.py::CanonicalizeUrlTest::test_urls_with_auth_and_ports", "tests/test_url.py::DataURITests::test_base64", "tests/test_url.py::DataURITests::test_base64_spaces", "tests/test_url.py::DataURITests::test_bytes_uri", "tests/test_url.py::DataURITests::test_default_mediatype", "tests/test_url.py::DataURITests::test_default_mediatype_charset", "tests/test_url.py::DataURITests::test_mediatype_parameters", "tests/test_url.py::DataURITests::test_missing_comma", "tests/test_url.py::DataURITests::test_missing_scheme", "tests/test_url.py::DataURITests::test_scheme_case_insensitive", "tests/test_url.py::DataURITests::test_text_charset", "tests/test_url.py::DataURITests::test_text_uri", "tests/test_url.py::DataURITests::test_unicode_uri", "tests/test_url.py::DataURITests::test_wrong_base64_param", "tests/test_url.py::DataURITests::test_wrong_scheme" ]
[]
BSD 3-Clause "New" or "Revised" License
3,039
[ "w3lib/url.py" ]
[ "w3lib/url.py" ]
airspeed-velocity__asv-733
d069dc4a375a60402acc0bbe1b7df938768b0c16
2018-09-07 22:41:44
d069dc4a375a60402acc0bbe1b7df938768b0c16
diff --git a/asv/util.py b/asv/util.py index c7a58e7..42688af 100644 --- a/asv/util.py +++ b/asv/util.py @@ -106,6 +106,13 @@ def human_float(value, significant=3, truncate_small=None, significant_zeros=Fal """ if value == 0: return "0" + elif math.isinf(value) or math.isnan(value): + return "{}".format(value) + elif value < 0: + sign = "-" + value = -value + else: + sign = "" logv = math.log10(value) magnitude = int(math.floor(logv)) + 1 @@ -127,7 +134,7 @@ def human_float(value, significant=3, truncate_small=None, significant_zeros=Fal else: fmt = "{{0:.{0}f}}".format(num_digits) - formatted = fmt.format(value) + formatted = sign + fmt.format(value) if not significant_zeros and '.' in formatted and 'e' not in fmt: formatted = formatted.rstrip('0')
util.human_float doesn't print +-inf or nan properly Raises exceptions in `math.log10`: ``` File "asv/util.py", line 110, in human_float logv = math.log10(value) ValueError: math domain error ``` on +/-inf or nan inputs. This can be triggered in `asv show` and maybe elsewhere.
airspeed-velocity/asv
diff --git a/test/test_util.py b/test/test_util.py index 364bcf2..ccd8832 100644 --- a/test/test_util.py +++ b/test/test_util.py @@ -192,6 +192,17 @@ def test_human_float(): ("0", 0.001, 2, 0), ("0", 0.001, 2, 1), ("0.001", 0.001, 2, 2), + + # non-finite + ("inf", float('inf'), 1), + ("-inf", -float('inf'), 1), + ("nan", float('nan'), 1), + + # negative + ("-1", -1.2345, 1), + ("-0.00100", -0.001, 3, None, True), + ("-0", -0.001, 2, 1), + ("-0.001", -0.001, 2, 2), ] for item in items:
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.31
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "pip_requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 -e git+https://github.com/airspeed-velocity/asv.git@d069dc4a375a60402acc0bbe1b7df938768b0c16#egg=asv attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 docutils==0.18.1 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 Jinja2==3.0.3 MarkupSafe==2.0.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytz==2025.2 requests==2.27.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-bootstrap-theme==0.8.1 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: asv channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - docutils==0.18.1 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jinja2==3.0.3 - markupsafe==2.0.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytz==2025.2 - requests==2.27.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-bootstrap-theme==0.8.1 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/asv
[ "test/test_util.py::test_human_float" ]
[]
[ "test/test_util.py::test_parallelfailure", "test/test_util.py::test_write_unicode_to_ascii", "test/test_util.py::test_which_path", "test/test_util.py::test_write_load_json", "test/test_util.py::test_human_time", "test/test_util.py::test_human_file_size", "test/test_util.py::test_is_main_thread", "test/test_util.py::test_json_non_ascii", "test/test_util.py::test_interpolate_command", "test/test_util.py::test_datetime_to_js_timestamp", "test/test_util.py::test_datetime_to_timestamp" ]
[]
BSD 3-Clause "New" or "Revised" License
3,040
[ "asv/util.py" ]
[ "asv/util.py" ]
chaostoolkit-incubator__chaostoolkit-aws-26
4ab02c19fd02f7c4105f4c5da48f2d70319eeb4d
2018-09-08 07:15:02
4ab02c19fd02f7c4105f4c5da48f2d70319eeb4d
diff --git a/CHANGELOG.md b/CHANGELOG.md index 1cf8254..3e93e48 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,15 @@ [Unreleased]: https://github.com/chaostoolkit-incubator/chaostoolkit-aws/compare/0.7.1...HEAD +### Changed + +- make sure instances with no explicit lifecycle can be stopped. They are + assumed to be in the `normal` lifecycle as per the last + [line of the AWS documentation][]. [#25][25] + +[25]: https://github.com/chaostoolkit-incubator/chaostoolkit-aws/issues/25 +[instlifecycledocs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-purchasing-options.html + ## [0.7.1][] [0.7.1]: https://github.com/chaostoolkit-incubator/chaostoolkit-aws/compare/0.7.0...0.7.1 diff --git a/chaosaws/ec2/actions.py b/chaosaws/ec2/actions.py index 230a1a3..c60a05d 100644 --- a/chaosaws/ec2/actions.py +++ b/chaosaws/ec2/actions.py @@ -46,7 +46,7 @@ def stop_instance(instance_id: str = None, az: str = None, force: bool = False, raise FailedActivity( "No instances in availability zone: {}".format(az)) else: - instance_types = get_instance_type_by_id(instance_id, client) + instance_types = get_instance_type_by_id([instance_id], client) logger.debug( "Picked EC2 instance '{}' from AZ '{}' to be stopped".format( @@ -140,10 +140,17 @@ def get_instance_type_from_response(response: Dict) -> Dict: for reservation in response['Reservations']: for inst in reservation['Instances']: - if inst['InstanceLifecycle'] not in instances_type.keys(): + # when this field is missing, we assume "normal" + # which means On-Demand or Reserved + # this seems what the last line of the docs imply at + # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-purchasing-options.html + lifecycle = inst.get('InstanceLifecycle', 'normal') + + if lifecycle not in instances_type.keys(): # adding empty list (value) for new instance type (key) - instances_type[inst['InstanceLifecycle']] = [] - instances_type[inst['InstanceLifecycle']].append( + instances_type[lifecycle] = [] + + instances_type[lifecycle].append( inst['InstanceId']) return instances_type @@ -158,7 +165,10 @@ def get_spot_request_ids_from_response(response: Dict) -> List[str]: for reservation in response['Reservations']: for inst in reservation['Instances']: - if inst['InstanceLifecycle'] == 'spot': + # when this field is missing, we assume "normal" + # which means On-Demand or Reserved + lifecycle = inst.get('InstanceLifecycle', 'normal') + if lifecycle == 'spot': spot_request_ids.append(inst['SpotInstanceRequestId']) return spot_request_ids
Can't stop instances with explicit lifecycle When an instance has no explicit lifecycle, the extension fails to stop it. This is usually the case of On-Demand or Reserved instances.
chaostoolkit-incubator/chaostoolkit-aws
diff --git a/tests/ec2/test_ec2_actions.py b/tests/ec2/test_ec2_actions.py index 1611aba..37baa7a 100644 --- a/tests/ec2/test_ec2_actions.py +++ b/tests/ec2/test_ec2_actions.py @@ -180,3 +180,34 @@ def test_stop_instance_by_specific_filters(aws_client): client.describe_instances.assert_called_with(Filters=called_filters) client.stop_instances.assert_called_with( InstanceIds=[inst_1_id], Force=False) + + +@patch('chaosaws.ec2.actions.aws_client', autospec=True) +def test_stop_instance_with_no_lifecycle(aws_client): + client = MagicMock() + aws_client.return_value = client + inst_id = "i-1234567890abcdef0" + client.describe_instances.return_value = { + 'Reservations': + [{'Instances': [{'InstanceId': inst_id}]}] + } + stop_instance(inst_id) + client.stop_instances.assert_called_with( + InstanceIds=[inst_id], Force=False) + + +@patch('chaosaws.ec2.actions.aws_client', autospec=True) +def test_stop_normal_instance(aws_client): + client = MagicMock() + aws_client.return_value = client + inst_id = "i-1234567890abcdef0" + client.describe_instances.return_value = { + 'Reservations': + [{'Instances': [{ + 'InstanceId': inst_id, + 'InstanceLifecycle': 'normal' + }]}] + } + stop_instance(inst_id) + client.stop_instances.assert_called_with( + InstanceIds=[inst_id], Force=False)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 2 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-sugar" ], "pre_install": null, "python": "3.7", "reqs_path": [ "requirements.txt", "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aws-requests-auth==0.4.3 aws-xray-sdk==0.95 boto==2.49.0 boto3==1.33.13 botocore==1.33.13 certifi @ file:///croot/certifi_1671487769961/work/certifi cffi==1.15.1 -e git+https://github.com/chaostoolkit-incubator/chaostoolkit-aws.git@4ab02c19fd02f7c4105f4c5da48f2d70319eeb4d#egg=chaostoolkit_aws chaostoolkit-lib==1.41.0 charset-normalizer==3.4.1 cookies==2.2.1 coverage==7.2.7 cryptography==44.0.2 docker==6.1.3 ecdsa==0.19.1 exceptiongroup==1.2.2 future==0.18.3 idna==3.10 importlib-metadata==6.7.0 iniconfig==2.0.0 Jinja2==3.1.6 jmespath==1.0.1 jsondiff==1.1.1 jsonpickle==3.4.2 logzero==1.7.0 MarkupSafe==2.1.5 mock==5.2.0 moto==1.3.4 packaging==24.0 pluggy==1.2.0 pyaml==23.5.8 pycodestyle==2.10.0 pycparser==2.21 pycryptodome==3.22.0 pytest==7.4.4 pytest-cov==4.1.0 pytest-sugar==1.0.0 python-dateutil==2.9.0.post0 python-jose==2.0.2 pytz==2025.2 PyYAML==6.0.1 requests==2.31.0 requests-mock==1.12.1 responses==0.23.3 s3transfer==0.8.2 six==1.17.0 termcolor==2.3.0 tomli==2.0.1 types-PyYAML==6.0.12.12 typing_extensions==4.7.1 urllib3==1.26.20 websocket-client==1.6.1 Werkzeug==2.2.3 wrapt==1.16.0 xmltodict==0.14.2 zipp==3.15.0
name: chaostoolkit-aws channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aws-requests-auth==0.4.3 - aws-xray-sdk==0.95 - boto==2.49.0 - boto3==1.33.13 - botocore==1.33.13 - cffi==1.15.1 - chaostoolkit-lib==1.41.0 - charset-normalizer==3.4.1 - cookies==2.2.1 - coverage==7.2.7 - cryptography==44.0.2 - docker==6.1.3 - ecdsa==0.19.1 - exceptiongroup==1.2.2 - future==0.18.3 - idna==3.10 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - jinja2==3.1.6 - jmespath==1.0.1 - jsondiff==1.1.1 - jsonpickle==3.4.2 - logzero==1.7.0 - markupsafe==2.1.5 - mock==5.2.0 - moto==1.3.4 - packaging==24.0 - pluggy==1.2.0 - pyaml==23.5.8 - pycodestyle==2.10.0 - pycparser==2.21 - pycryptodome==3.22.0 - pytest==7.4.4 - pytest-cov==4.1.0 - pytest-sugar==1.0.0 - python-dateutil==2.9.0.post0 - python-jose==2.0.2 - pytz==2025.2 - pyyaml==6.0.1 - requests==2.31.0 - requests-mock==1.12.1 - responses==0.23.3 - s3transfer==0.8.2 - six==1.17.0 - termcolor==2.3.0 - tomli==2.0.1 - types-pyyaml==6.0.12.12 - typing-extensions==4.7.1 - urllib3==1.26.20 - websocket-client==1.6.1 - werkzeug==2.2.3 - wrapt==1.16.0 - xmltodict==0.14.2 - zipp==3.15.0 prefix: /opt/conda/envs/chaostoolkit-aws
[ "tests/ec2/test_ec2_actions.py::test_stop_instance_with_no_lifecycle" ]
[]
[ "tests/ec2/test_ec2_actions.py::test_stop_instance", "tests/ec2/test_ec2_actions.py::test_stop_spot_instance", "tests/ec2/test_ec2_actions.py::test_stop_instance_can_be_forced", "tests/ec2/test_ec2_actions.py::test_stop_spot_instance_can_be_forced", "tests/ec2/test_ec2_actions.py::test_stop_instances", "tests/ec2/test_ec2_actions.py::test_stop_random_instance_in_az", "tests/ec2/test_ec2_actions.py::test_stop_random_needs_instance_id_or_az", "tests/ec2/test_ec2_actions.py::test_stop_all_instances_in_az", "tests/ec2/test_ec2_actions.py::test_stop_all_instances_needs_instance_id_or_az", "tests/ec2/test_ec2_actions.py::test_stop_all_instances_may_not_have_any_instances", "tests/ec2/test_ec2_actions.py::test_stop_instance_by_specific_filters", "tests/ec2/test_ec2_actions.py::test_stop_normal_instance" ]
[]
Apache License 2.0
3,041
[ "chaosaws/ec2/actions.py", "CHANGELOG.md" ]
[ "chaosaws/ec2/actions.py", "CHANGELOG.md" ]
joke2k__faker-808
fc546bca9b4f411e8565aeb5ddd9ccee8de59494
2018-09-08 12:56:41
d26db45eebb9dcd02eb73099bb98b660f0e03aad
diff --git a/faker/providers/lorem/__init__.py b/faker/providers/lorem/__init__.py index 48276aa5..55d25543 100644 --- a/faker/providers/lorem/__init__.py +++ b/faker/providers/lorem/__init__.py @@ -26,7 +26,7 @@ class Provider(BaseProvider): word_connector = ' ' sentence_punctuation = '.' - def words(self, nb=3, ext_word_list=None): + def words(self, nb=3, ext_word_list=None, unique=False): """ :returns: An array of random words. for example: ['Lorem', 'ipsum', 'dolor'] @@ -34,10 +34,13 @@ class Provider(BaseProvider): :param nb: how many words to return :param ext_word_list: a list of words you would like to have instead of 'Lorem ipsum' + :param unique: If True, the returned word list will contain unique words :rtype: list """ word_list = ext_word_list if ext_word_list else self.word_list + if unique: + return self.random_sample(word_list, length=nb) return self.random_choices(word_list, length=nb) def word(self, ext_word_list=None):
Ensure words() returns a unique list of words without duplicates When calling `words(10)` there is no guarantee that the returned list of elements is unique. While I could do `set(words(10))`, this may only return 9 items when I am in need of 10. If creating a list of words with potential duplicates is a useful feature to some, maybe adding an optional parameter called `unique` to the `words` generator could prove useful.
joke2k/faker
diff --git a/tests/test_factory.py b/tests/test_factory.py index 39ed0084..7b28a767 100644 --- a/tests/test_factory.py +++ b/tests/test_factory.py @@ -330,6 +330,91 @@ class FactoryTestCase(unittest.TestCase): word = fake.word(ext_word_list=my_word_list) self.assertIn(word, my_word_list) + def test_no_words(self): + fake = Faker() + + words = fake.words(0) + self.assertEqual(words, []) + + def test_some_words(self): + fake = Faker() + + num_words = 5 + words = fake.words(num_words) + self.assertTrue(isinstance(words, list)) + self.assertEqual(len(words), num_words) + + for word in words: + self.assertTrue(isinstance(word, string_types)) + self.assertTrue(re.match(r'^[a-z].*$', word)) + + def test_words_ext_word_list(self): + fake = Faker() + + my_word_list = [ + 'danish', + 'cheesecake', + 'sugar', + 'Lollipop', + 'wafer', + 'Gummies', + 'Jelly', + 'pie', + ] + + num_words = 5 + words = fake.words(5, ext_word_list=my_word_list) + self.assertTrue(isinstance(words, list)) + self.assertEqual(len(words), num_words) + + for word in words: + self.assertTrue(isinstance(word, string_types)) + self.assertIn(word, my_word_list) + + def test_words_ext_word_list_unique(self): + fake = Faker() + + my_word_list = [ + 'danish', + 'cheesecake', + 'sugar', + 'Lollipop', + 'wafer', + 'Gummies', + 'Jelly', + 'pie', + ] + + num_words = 5 + words = fake.words(5, ext_word_list=my_word_list) + self.assertTrue(isinstance(words, list)) + self.assertEqual(len(words), num_words) + + checked_words = [] + for word in words: + self.assertTrue(isinstance(word, string_types)) + self.assertIn(word, my_word_list) + # Check that word is unique + self.assertTrue(word not in checked_words) + checked_words.append(word) + + def test_unique_words(self): + fake = Faker() + + num_words = 20 + words = fake.words(num_words, unique=True) + self.assertTrue(isinstance(words, list)) + self.assertEqual(len(words), num_words) + + checked_words = [] + for word in words: + self.assertTrue(isinstance(word, string_types)) + # Check that word is lowercase letters. No numbers, symbols, etc + self.assertTrue(re.match(r'^[a-z].*$', word)) + # Check that word list is unique + self.assertTrue(word not in checked_words) + checked_words.append(word) + def test_random_pystr_characters(self): from faker.providers.python import Provider provider = Provider(self.generator)
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "tests/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
dnspython==2.7.0 email-validator==1.0.3 exceptiongroup==1.2.2 -e git+https://github.com/joke2k/faker.git@fc546bca9b4f411e8565aeb5ddd9ccee8de59494#egg=Faker idna==3.10 iniconfig==2.1.0 mock==2.0.0 packaging==24.2 pbr==6.1.1 pluggy==1.5.0 pytest==8.3.5 python-dateutil==2.9.0.post0 six==1.17.0 text-unidecode==1.2 tomli==2.2.1 UkPostcodeParser==1.1.2
name: faker channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - dnspython==2.7.0 - email-validator==1.0.3 - exceptiongroup==1.2.2 - idna==3.10 - iniconfig==2.1.0 - mock==2.0.0 - packaging==24.2 - pbr==6.1.1 - pluggy==1.5.0 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - six==1.17.0 - text-unidecode==1.2 - tomli==2.2.1 - ukpostcodeparser==1.1.2 prefix: /opt/conda/envs/faker
[ "tests/test_factory.py::FactoryTestCase::test_unique_words" ]
[ "tests/test_factory.py::FactoryTestCase::test_ipv4_public", "tests/test_factory.py::FactoryTestCase::test_ipv4_public_class_c", "tests/test_factory.py::FactoryTestCase::test_words_ext_word_list_unique" ]
[ "tests/test_factory.py::FactoryTestCase::test_add_provider_gives_priority_to_newly_added_provider", "tests/test_factory.py::FactoryTestCase::test_binary", "tests/test_factory.py::FactoryTestCase::test_cli_seed", "tests/test_factory.py::FactoryTestCase::test_cli_seed_with_repeat", "tests/test_factory.py::FactoryTestCase::test_cli_verbosity", "tests/test_factory.py::FactoryTestCase::test_command", "tests/test_factory.py::FactoryTestCase::test_command_custom_provider", "tests/test_factory.py::FactoryTestCase::test_documentor", "tests/test_factory.py::FactoryTestCase::test_email", "tests/test_factory.py::FactoryTestCase::test_ext_word_list", "tests/test_factory.py::FactoryTestCase::test_format_calls_formatter_on_provider", "tests/test_factory.py::FactoryTestCase::test_format_transfers_arguments_to_formatter", "tests/test_factory.py::FactoryTestCase::test_get_formatter_returns_callable", "tests/test_factory.py::FactoryTestCase::test_get_formatter_returns_correct_formatter", "tests/test_factory.py::FactoryTestCase::test_get_formatter_throws_exception_on_incorrect_formatter", "tests/test_factory.py::FactoryTestCase::test_instance_seed_chain", "tests/test_factory.py::FactoryTestCase::test_invalid_locale", "tests/test_factory.py::FactoryTestCase::test_ipv4", "tests/test_factory.py::FactoryTestCase::test_ipv4_network_class", "tests/test_factory.py::FactoryTestCase::test_ipv4_private", "tests/test_factory.py::FactoryTestCase::test_ipv4_private_class_a", "tests/test_factory.py::FactoryTestCase::test_ipv4_private_class_b", "tests/test_factory.py::FactoryTestCase::test_ipv4_private_class_c", "tests/test_factory.py::FactoryTestCase::test_ipv4_public_class_a", "tests/test_factory.py::FactoryTestCase::test_ipv4_public_class_b", "tests/test_factory.py::FactoryTestCase::test_ipv6", "tests/test_factory.py::FactoryTestCase::test_language_code", "tests/test_factory.py::FactoryTestCase::test_locale", "tests/test_factory.py::FactoryTestCase::test_magic_call_calls_format", "tests/test_factory.py::FactoryTestCase::test_magic_call_calls_format_with_arguments", "tests/test_factory.py::FactoryTestCase::test_nl_BE_ssn_valid", "tests/test_factory.py::FactoryTestCase::test_no_words", "tests/test_factory.py::FactoryTestCase::test_no_words_paragraph", "tests/test_factory.py::FactoryTestCase::test_no_words_sentence", "tests/test_factory.py::FactoryTestCase::test_parse_returns_same_string_when_it_contains_no_curly_braces", "tests/test_factory.py::FactoryTestCase::test_parse_returns_string_with_tokens_replaced_by_formatters", "tests/test_factory.py::FactoryTestCase::test_password", "tests/test_factory.py::FactoryTestCase::test_prefix_suffix_always_string", "tests/test_factory.py::FactoryTestCase::test_random_element", "tests/test_factory.py::FactoryTestCase::test_random_number", "tests/test_factory.py::FactoryTestCase::test_random_pyfloat", "tests/test_factory.py::FactoryTestCase::test_random_pystr_characters", "tests/test_factory.py::FactoryTestCase::test_random_sample_unique", "tests/test_factory.py::FactoryTestCase::test_slugify", "tests/test_factory.py::FactoryTestCase::test_some_words", "tests/test_factory.py::FactoryTestCase::test_us_ssn_valid", "tests/test_factory.py::FactoryTestCase::test_words_ext_word_list", "tests/test_factory.py::FactoryTestCase::test_words_valueerror" ]
[]
MIT License
3,043
[ "faker/providers/lorem/__init__.py" ]
[ "faker/providers/lorem/__init__.py" ]
joke2k__faker-809
fc546bca9b4f411e8565aeb5ddd9ccee8de59494
2018-09-09 13:34:13
d26db45eebb9dcd02eb73099bb98b660f0e03aad
diff --git a/faker/providers/address/en_US/__init__.py b/faker/providers/address/en_US/__init__.py index 451d5d72..4d466336 100644 --- a/faker/providers/address/en_US/__init__.py +++ b/faker/providers/address/en_US/__init__.py @@ -272,13 +272,19 @@ class Provider(AddressProvider): 'West Virginia', 'Wisconsin', 'Wyoming', ) states_abbr = ( - 'AL', 'AK', 'AS', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'DC', 'FM', 'FL', - 'GA', 'GU', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MH', - 'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM', - 'NY', 'NC', 'ND', 'MP', 'OH', 'OK', 'OR', 'PW', 'PA', 'PR', 'RI', 'SC', - 'SD', 'TN', 'TX', 'UT', 'VT', 'VI', 'VA', 'WA', 'WV', 'WI', 'WY', + 'AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'DC', 'FL', 'GA', 'HI', + 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD', 'MA', 'MI', 'MN', + 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM', 'NY', 'NC', 'ND', 'OH', + 'OK', 'OR', 'PA', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', + 'WV', 'WI', 'WY', ) + territories_abbr = ( + 'AS', 'FM', 'GU', 'MH', 'MP', 'PW', 'PR', 'VI', + ) + + states_and_territories_abbr = states_abbr + territories_abbr + military_state_abbr = ('AE', 'AA', 'AP') military_ship_prefix = ('USS', 'USNS', 'USNV', 'USCGC') @@ -329,7 +335,15 @@ class Provider(AddressProvider): def state(self): return self.random_element(self.states) - def state_abbr(self): + def state_abbr(self, include_territories=True): + """ + :returns: A random state or territory abbreviation. + + :param include_territories: If True, territories will be included. + If False, only states will be returned. + """ + if include_territories: + self.random_element(self.states_and_territories_abbr) return self.random_element(self.states_abbr) def postcode(self):
State Abbreviations for en_US have too many The output of the state provider in address only outputs the 50 states, but the state_abbr has 59 potential outputs ### Steps to reproduce Generate a value using the state_abbr provider ### Expected behavior The value should be one of the 50 US states abbreviations ### Actual behavior The value is one of the 50 US states, Washington DC (DC), American Samoa (AS), Micronesia (FM), Guam (GU), Marshall Islands (MH), Northern Marianas (MP), Palau (PW), Puerto Rico (PR), and Virgin Isles (VI). It appears the list was pulled from sources such as https://www.factmonster.com/us/postal-information/state-abbreviations-and-state-postal-codes that list every possible postal state abbreviation.
joke2k/faker
diff --git a/tests/providers/test_address.py b/tests/providers/test_address.py index 752b294e..7210f8fa 100644 --- a/tests/providers/test_address.py +++ b/tests/providers/test_address.py @@ -313,6 +313,12 @@ class TestEnUS(unittest.TestCase): def test_state_abbr(self): state_abbr = self.factory.state_abbr() assert isinstance(state_abbr, string_types) + states_and_territories = EnUsProvider.states_and_territories_abbr + assert state_abbr in states_and_territories + + def test_state_abbr_no_territories(self): + state_abbr = self.factory.state_abbr(include_territories=False) + assert isinstance(state_abbr, string_types) assert state_abbr in EnUsProvider.states_abbr def test_postcode(self):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 1 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [], "python": "3.6", "reqs_path": [ "tests/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 dnspython==2.2.1 email-validator==1.0.3 execnet==1.9.0 -e git+https://github.com/joke2k/faker.git@fc546bca9b4f411e8565aeb5ddd9ccee8de59494#egg=Faker idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 mock==2.0.0 packaging==21.3 pbr==6.1.1 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 six==1.17.0 text-unidecode==1.2 tomli==1.2.3 typing_extensions==4.1.1 UkPostcodeParser==1.1.2 zipp==3.6.0
name: faker channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - dnspython==2.2.1 - email-validator==1.0.3 - execnet==1.9.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - mock==2.0.0 - packaging==21.3 - pbr==6.1.1 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - six==1.17.0 - text-unidecode==1.2 - tomli==1.2.3 - typing-extensions==4.1.1 - ukpostcodeparser==1.1.2 - zipp==3.6.0 prefix: /opt/conda/envs/faker
[ "tests/providers/test_address.py::TestEnUS::test_state_abbr", "tests/providers/test_address.py::TestEnUS::test_state_abbr_no_territories" ]
[]
[ "tests/providers/test_address.py::TestBaseProvider::test_alpha_2_country_codes", "tests/providers/test_address.py::TestBaseProvider::test_alpha_2_country_codes_as_default", "tests/providers/test_address.py::TestBaseProvider::test_alpha_3_country_codes", "tests/providers/test_address.py::TestBaseProvider::test_bad_country_code_representation", "tests/providers/test_address.py::TestAr_AA::test_alpha_2_country_codes", "tests/providers/test_address.py::TestAr_AA::test_alpha_2_country_codes_as_default", "tests/providers/test_address.py::TestAr_AA::test_alpha_3_country_codes", "tests/providers/test_address.py::TestAr_AA::test_bad_country_code_representation", "tests/providers/test_address.py::TestDeAT::test_city", "tests/providers/test_address.py::TestDeAT::test_country", "tests/providers/test_address.py::TestDeAT::test_latitude", "tests/providers/test_address.py::TestDeAT::test_longitude", "tests/providers/test_address.py::TestDeAT::test_postcode", "tests/providers/test_address.py::TestDeAT::test_state", "tests/providers/test_address.py::TestDeAT::test_street_suffix_long", "tests/providers/test_address.py::TestDeAT::test_street_suffix_short", "tests/providers/test_address.py::TestDeDE::test_city", "tests/providers/test_address.py::TestDeDE::test_country", "tests/providers/test_address.py::TestDeDE::test_state", "tests/providers/test_address.py::TestDeDE::test_street_suffix_long", "tests/providers/test_address.py::TestDeDE::test_street_suffix_short", "tests/providers/test_address.py::TestFiFI::test_city", "tests/providers/test_address.py::TestFiFI::test_street_suffix", "tests/providers/test_address.py::TestElGR::test_city", "tests/providers/test_address.py::TestElGR::test_latlng", "tests/providers/test_address.py::TestElGR::test_line_address", "tests/providers/test_address.py::TestElGR::test_region", "tests/providers/test_address.py::TestEnAU::test_city_prefix", "tests/providers/test_address.py::TestEnAU::test_postcode", "tests/providers/test_address.py::TestEnAU::test_state", "tests/providers/test_address.py::TestEnAU::test_state_abbr", "tests/providers/test_address.py::TestEnNZ::test_postcode", "tests/providers/test_address.py::TestEnNZ::test_state", "tests/providers/test_address.py::TestEnCA::test_city_prefix", "tests/providers/test_address.py::TestEnCA::test_postal_code_letter", "tests/providers/test_address.py::TestEnCA::test_postalcode", "tests/providers/test_address.py::TestEnCA::test_postcode", "tests/providers/test_address.py::TestEnCA::test_province", "tests/providers/test_address.py::TestEnCA::test_province_abbr", "tests/providers/test_address.py::TestEnCA::test_secondary_address", "tests/providers/test_address.py::TestEnGB::test_postcode", "tests/providers/test_address.py::TestEnUS::test_city_prefix", "tests/providers/test_address.py::TestEnUS::test_military_apo", "tests/providers/test_address.py::TestEnUS::test_military_dpo", "tests/providers/test_address.py::TestEnUS::test_military_ship", "tests/providers/test_address.py::TestEnUS::test_military_state", "tests/providers/test_address.py::TestEnUS::test_postcode", "tests/providers/test_address.py::TestEnUS::test_state", "tests/providers/test_address.py::TestEnUS::test_zipcode", "tests/providers/test_address.py::TestEnUS::test_zipcode_plus4", "tests/providers/test_address.py::TestHuHU::test_address", "tests/providers/test_address.py::TestHuHU::test_postcode_first_digit", "tests/providers/test_address.py::TestHuHU::test_street_address", "tests/providers/test_address.py::TestHuHU::test_street_address_with_county", "tests/providers/test_address.py::TestJaJP::test_address", "tests/providers/test_address.py::TestKoKR::test_address", "tests/providers/test_address.py::TestNeNP::test_address", "tests/providers/test_address.py::TestNoNO::test_address", "tests/providers/test_address.py::TestNoNO::test_city_suffix", "tests/providers/test_address.py::TestNoNO::test_postcode", "tests/providers/test_address.py::TestNoNO::test_street_suffix", "tests/providers/test_address.py::TestZhTW::test_address", "tests/providers/test_address.py::TestZhCN::test_address", "tests/providers/test_address.py::TestPtBr::test_address", "tests/providers/test_address.py::TestPtPT::test_distrito", "tests/providers/test_address.py::TestPtPT::test_freguesia" ]
[]
MIT License
3,044
[ "faker/providers/address/en_US/__init__.py" ]
[ "faker/providers/address/en_US/__init__.py" ]
JonathonReinhart__scuba-120
94a5f67bae600298e969373b66d527f7d02308c2
2018-09-09 19:05:41
94a5f67bae600298e969373b66d527f7d02308c2
diff --git a/CHANGELOG.md b/CHANGELOG.md index 3eefb35..3b9ff89 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ### Added - Add -e/--env command-line option (#111) +- Add support for setting environment in .scuba.yml (#120) ### Changed - Implemented auto-versioning using Git and Travis (#112) diff --git a/doc/yaml-reference.md b/doc/yaml-reference.md index 95826e8..996f554 100644 --- a/doc/yaml-reference.md +++ b/doc/yaml-reference.md @@ -18,6 +18,25 @@ Example: image: debian:8.2 ``` +### `environment` + +The optional `environment` node allows environment variables to be specified. +This can be either a mapping (dictionary), or a list of `KEY=VALUE` pairs. +If a value is not specified, the value is taken from the external environment. + +Examples: +```yaml +environment: + FOO: "This is foo" + SECRET: +``` +```yaml +environment: + - FOO=This is foo + - SECRET +``` + + ### `aliases` The optional `aliases` node is a mapping (dictionary) of bash-like aliases, @@ -53,6 +72,21 @@ aliases: - cat /etc/os-release ``` +Aliases can add to the top-level `environment` and override its values using +the same syntax: +```yaml +environment: + FOO: "Top-level" +aliases: + example: + environment: + FOO: "Override" + BAR: "New" + script: + - echo $FOO $BAR +``` + + ### `hooks` The optional `hooks` node is a mapping (dictionary) of "hook" scripts that run diff --git a/example/env_vars/.scuba.yml b/example/env_vars/.scuba.yml new file mode 100644 index 0000000..b28ca8a --- /dev/null +++ b/example/env_vars/.scuba.yml @@ -0,0 +1,21 @@ +image: !from_yaml ../common.yml image + +environment: + FOO: Top-level + BAR: 42 + EMPTY: "" + EXTERNAL_1: + +aliases: + example: + environment: + FOO: Overridden by alias + EXTERNAL_2: + script: + - echo "FOO=\"$FOO\"" + - echo "BAR=\"$BAR\"" + - echo "EMPTY=\"$EMPTY\"" + - echo "EXTERNAL_1=\"$EXTERNAL_1\"" + - echo "EXTERNAL_2=\"$EXTERNAL_2\"" + - echo "CMDLINE=\"$CMDLINE\"" + diff --git a/example/env_vars/run_example.sh b/example/env_vars/run_example.sh new file mode 100755 index 0000000..bc742d1 --- /dev/null +++ b/example/env_vars/run_example.sh @@ -0,0 +1,7 @@ +#!/bin/bash +cd $(dirname $0) + +export EXTERNAL_1="Value 1 taken from external environment" +export EXTERNAL_2="Value 2 taken from external environment" + +scuba -e CMDLINE="This comes from the cmdline" example diff --git a/scuba/__main__.py b/scuba/__main__.py index 72c89d6..c3eb3e6 100644 --- a/scuba/__main__.py +++ b/scuba/__main__.py @@ -132,6 +132,9 @@ class ScubaDive(object): # Docker is running natively self.__setup_native_run() + # Apply environment vars from .scuba.yml + self.env_vars.update(self.context.environment) + def __str__(self): s = StringIO() writeln(s, 'ScubaDive') diff --git a/scuba/config.py b/scuba/config.py index b8e0417..9b79ffd 100644 --- a/scuba/config.py +++ b/scuba/config.py @@ -129,17 +129,50 @@ def _process_script_node(node, name): raise ConfigError("{0}: must be string or dict".format(name)) +def _process_environment(node, name): + # Environment can be either a list of strings ("KEY=VALUE") or a mapping + # Environment keys and values are always strings + result = {} + + if not node: + pass + elif isinstance(node, dict): + for k, v in node.items(): + if v is None: + v = os.getenv(k, '') + result[k] = str(v) + elif isinstance(node, list): + for e in node: + k, v = parse_env_var(e) + result[k] = v + else: + raise ConfigError("'{0}' must be list or mapping, not {1}".format( + name, type(node).__name__)) + + return result + + + class ScubaAlias(object): - def __init__(self, name, script, image): + def __init__(self, name, script, image, environment): self.name = name self.script = script self.image = image + self.environment = environment @classmethod def from_dict(cls, name, node): script = _process_script_node(node, name) - image = node.get('image') if isinstance(node, dict) else None - return cls(name, script, image) + image = None + environment = None + + if isinstance(node, dict): # Rich alias + image = node.get('image') + environment = _process_environment( + node.get('environment'), + '{0}.{1}'.format(name, 'environment')) + + return cls(name, script, image, environment) class ScubaContext(object): pass @@ -147,7 +180,7 @@ class ScubaContext(object): class ScubaConfig(object): def __init__(self, **data): required_nodes = ('image',) - optional_nodes = ('aliases','hooks',) + optional_nodes = ('aliases','hooks','environment') # Check for missing required nodes missing = [n for n in required_nodes if not n in data] @@ -165,6 +198,7 @@ class ScubaConfig(object): self._load_aliases(data) self._load_hooks(data) + self._environment = self._load_environment(data) @@ -187,6 +221,9 @@ class ScubaConfig(object): hook = _process_script_node(node, name) self._hooks[name] = hook + def _load_environment(self, data): + return _process_environment(data.get('environment'), 'environment') + @property def image(self): @@ -200,6 +237,10 @@ class ScubaConfig(object): def hooks(self): return self._hooks + @property + def environment(self): + return self._environment + def process_command(self, command): '''Processes a user command using aliases @@ -214,6 +255,7 @@ class ScubaConfig(object): result = ScubaContext() result.script = None result.image = self.image + result.environment = self.environment.copy() if command: alias = self.aliases.get(command[0]) @@ -226,6 +268,10 @@ class ScubaConfig(object): if alias.image: result.image = alias.image + # Merge/override the environment + if alias.environment: + result.environment.update(alias.environment) + if len(alias.script) > 1: # Alias is a multiline script; no additional # arguments are allowed in the scuba invocation. diff --git a/scuba/utils.py b/scuba/utils.py index f1555fc..1af6f45 100644 --- a/scuba/utils.py +++ b/scuba/utils.py @@ -74,4 +74,4 @@ def parse_env_var(s): return (k, v) k = parts[0] - return (k, os.getenv(k)) + return (k, os.getenv(k, ''))
Using environment variables in .scuba.yml or passing them Is it already possible – or do you consider it a valuable addition – to use environment variables in the `.scuba.yml` file? For one, these could be already present in the shell where `scuba` is run. Additionally, I think some variables added by `scuba` might be helpful. (Is there a list of already used variables available somewhere, like the directory where `scuba` was started, path and directory to `.scubafile.yml` etc.)? The goal would be do define an alias like `build: ./do-build.sh ${SCUBA_HOME} ${SOME_ENV}`. With regard to passing environment variables into the container, what about a new configuration setting, either global or per-alias, that lists env vars to be passed? ``` image: ... aliases: build: command: ./do-build.sh ${TARGET} env: - THIS_VAR - THAT_VAR env: - TARGET_ARCH - COMPILE_MODE ``` Note that we'd probably need to change the alias syntax somewhat or allow another structure to place the `env` key. This would add `-e THIS_VAR -e THAT_VAR` to the `docker run` command when using the `build` alias, and add `-e TARGET_ARCH -e COMPILE_MODE` in any case. Does that make sense?
JonathonReinhart/scuba
diff --git a/tests/test_config.py b/tests/test_config.py index 118379e..5d8a5b4 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -372,3 +372,110 @@ class TestConfig(TmpDirTestCase): ''') assert_raises(scuba.config.ConfigError, scuba.config.load_config, '.scuba.yml') + + + ############################################################################ + # Env + + def test_env_top_dict(self): + '''Top-level environment can be loaded (dict)''' + with open('.scuba.yml', 'w') as f: + f.write(r''' + image: na + environment: + FOO: This is foo + FOO_WITH_QUOTES: "\"Quoted foo\"" # Quotes included in value + BAR: "This is bar" + MAGIC: 42 + SWITCH_1: true # YAML boolean + SWITCH_2: "true" # YAML string + EMPTY: "" + EXTERNAL: # Comes from os env + EXTERNAL_NOTSET: # Missing in os env + ''') + + with mocked_os_env(EXTERNAL='Outside world'): + config = scuba.config.load_config('.scuba.yml') + + expect = dict( + FOO = "This is foo", + FOO_WITH_QUOTES = "\"Quoted foo\"", + BAR = "This is bar", + MAGIC = "42", # N.B. string + SWITCH_1 = "True", # Unfortunately this is due to str(bool(1)) + SWITCH_2 = "true", + EMPTY = "", + EXTERNAL = "Outside world", + EXTERNAL_NOTSET = "", + ) + self.assertEqual(expect, config.environment) + + + def test_env_top_list(self): + '''Top-level environment can be loaded (list)''' + with open('.scuba.yml', 'w') as f: + f.write(r''' + image: na + environment: + - FOO=This is foo # No quotes + - FOO_WITH_QUOTES="Quoted foo" # Quotes included in value + - BAR=This is bar + - MAGIC=42 + - SWITCH_2=true + - EMPTY= + - EXTERNAL # Comes from os env + - EXTERNAL_NOTSET # Missing in os env + ''') + + with mocked_os_env(EXTERNAL='Outside world'): + config = scuba.config.load_config('.scuba.yml') + + expect = dict( + FOO = "This is foo", + FOO_WITH_QUOTES = "\"Quoted foo\"", + BAR = "This is bar", + MAGIC = "42", # N.B. string + SWITCH_2 = "true", + EMPTY = "", + EXTERNAL = "Outside world", + EXTERNAL_NOTSET = "", + ) + self.assertEqual(expect, config.environment) + + + def test_env_alias(self): + '''Alias can have environment which overrides top-level''' + with open('.scuba.yml', 'w') as f: + f.write(r''' + image: na + environment: + FOO: Top-level + BAR: 42 + aliases: + al: + script: Don't care + environment: + FOO: Overridden + MORE: Hello world + ''') + + config = scuba.config.load_config('.scuba.yml') + + self.assertEqual(config.environment, dict( + FOO = "Top-level", + BAR = "42", + )) + + self.assertEqual(config.aliases['al'].environment, dict( + FOO = "Overridden", + MORE = "Hello world", + )) + + # Does the environment get overridden / merged? + ctx = config.process_command(['al']) + + self.assertEqual(ctx.environment, dict( + FOO = "Overridden", + BAR = "42", + MORE = "Hello world", + )) diff --git a/tests/test_main.py b/tests/test_main.py index 0c192af..9e4641a 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -13,6 +13,7 @@ import os import sys from tempfile import TemporaryFile, NamedTemporaryFile import subprocess +import shlex import scuba.__main__ as main import scuba.constants @@ -356,32 +357,6 @@ class TestMain(TmpDirTestCase): assert_str_equalish(out, data) - def test_env_var_keyval(self): - '''Verify -e KEY=VAL works''' - with open('.scuba.yml', 'w') as f: - f.write('image: {0}\n'.format(DOCKER_IMAGE)) - args = [ - '-e', 'KEY=VAL', - '/bin/sh', '-c', 'echo $KEY', - ] - out, _ = self.run_scuba(args) - assert_str_equalish(out, 'VAL') - - def test_env_var_key_only(self): - '''Verify -e KEY works''' - with open('.scuba.yml', 'w') as f: - f.write('image: {0}\n'.format(DOCKER_IMAGE)) - args = [ - '-e', 'KEY', - '/bin/sh', '-c', 'echo $KEY', - ] - def mocked_getenv(key): - self.assertEqual(key, 'KEY') - return 'mockedvalue' - with mock.patch('os.getenv', side_effect=mocked_getenv): - out, _ = self.run_scuba(args) - assert_str_equalish(out, 'mockedvalue') - def test_image_entrypoint(self): '''Verify scuba doesn't interfere with the configured image ENTRYPOINT''' @@ -493,6 +468,86 @@ class TestMain(TmpDirTestCase): self._test_one_hook('root', 0, 0) + ############################################################################ + # Environment + + def test_env_var_keyval(self): + '''Verify -e KEY=VAL works''' + with open('.scuba.yml', 'w') as f: + f.write('image: {0}\n'.format(DOCKER_IMAGE)) + args = [ + '-e', 'KEY=VAL', + '/bin/sh', '-c', 'echo $KEY', + ] + out, _ = self.run_scuba(args) + assert_str_equalish(out, 'VAL') + + def test_env_var_key_only(self): + '''Verify -e KEY works''' + with open('.scuba.yml', 'w') as f: + f.write('image: {0}\n'.format(DOCKER_IMAGE)) + args = [ + '-e', 'KEY', + '/bin/sh', '-c', 'echo $KEY', + ] + with mocked_os_env(KEY='mockedvalue'): + out, _ = self.run_scuba(args) + assert_str_equalish(out, 'mockedvalue') + + + def test_env_var_sources(self): + '''Verify scuba handles all possible environment variable sources''' + with open('.scuba.yml', 'w') as f: + f.write(r''' + image: {image} + environment: + FOO: Top-level + BAR: 42 + EXTERNAL_2: + aliases: + al: + script: + - echo "FOO=\"$FOO\"" + - echo "BAR=\"$BAR\"" + - echo "MORE=\"$MORE\"" + - echo "EXTERNAL_1=\"$EXTERNAL_1\"" + - echo "EXTERNAL_2=\"$EXTERNAL_2\"" + - echo "EXTERNAL_3=\"$EXTERNAL_3\"" + - echo "BAZ=\"$BAZ\"" + environment: + FOO: Overridden + MORE: Hello world + EXTERNAL_3: + '''.format(image=DOCKER_IMAGE)) + + args = [ + '-e', 'EXTERNAL_1', + '-e', 'BAZ=From the command line', + 'al', + ] + + m = mocked_os_env( + EXTERNAL_1 = "External value 1", + EXTERNAL_2 = "External value 2", + EXTERNAL_3 = "External value 3", + ) + with m: + out, _ = self.run_scuba(args) + + # Convert key/pair output to dictionary + result = dict( pair.split('=', 1) for pair in shlex.split(out) ) + + self.assertEqual(result, dict( + FOO = "Overridden", + BAR = "42", + MORE = "Hello world", + EXTERNAL_1 = "External value 1", + EXTERNAL_2 = "External value 2", + EXTERNAL_3 = "External value 3", + BAZ = "From the command line", + )) + + ############################################################################ # Misc def test_list_aliases(self): diff --git a/tests/test_utils.py b/tests/test_utils.py index 0e6c130..02c3aaf 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -89,10 +89,12 @@ class TestUtils(TestCase): def test_parse_env_var_no_equals(self): '''parse_env_var handles no equals and gets value from environment''' - def mocked_getenv(key): - self.assertEqual(key, 'KEY') - return 'mockedvalue' - - with mock.patch('os.getenv', side_effect=mocked_getenv): + with mocked_os_env(KEY='mockedvalue'): result = scuba.utils.parse_env_var('KEY') self.assertEqual(result, ('KEY', 'mockedvalue')) + + def test_parse_env_var_not_set(self): + '''parse_env_var returns an empty string if not set''' + with mocked_os_env(): + result = scuba.utils.parse_env_var('NOTSET') + self.assertEqual(result, ('NOTSET', '')) diff --git a/tests/utils.py b/tests/utils.py index 1444b3d..7ca2024 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -6,6 +6,11 @@ import tempfile import shutil import unittest import logging +try: + from unittest import mock +except ImportError: + import mock + def assert_set_equal(a, b): assert_equal(set(a), set(b)) @@ -49,6 +54,9 @@ def make_executable(path): mode |= (mode & 0o444) >> 2 # copy R bits to X os.chmod(path, mode) +def mocked_os_env(**env): + return mock.patch('os.getenv', side_effect=env.get) + # http://stackoverflow.com/a/8389373/119527 class PseudoTTY(object):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 5 }
2.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.5", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==3.7.1 importlib-metadata==4.8.3 iniconfig==1.1.1 nose==1.3.7 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 PyYAML==6.0.1 -e git+https://github.com/JonathonReinhart/scuba.git@94a5f67bae600298e969373b66d527f7d02308c2#egg=scuba tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: scuba channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==3.7.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - nose==1.3.7 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pyyaml==6.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/scuba
[ "tests/test_config.py::TestConfig::test_env_alias", "tests/test_config.py::TestConfig::test_env_top_dict", "tests/test_config.py::TestConfig::test_env_top_list", "tests/test_utils.py::TestUtils::test_parse_env_var_not_set" ]
[ "tests/test_main.py::TestMain::test_arbitrary_docker_args", "tests/test_main.py::TestMain::test_args", "tests/test_main.py::TestMain::test_basic", "tests/test_main.py::TestMain::test_complex_commands_in_alias", "tests/test_main.py::TestMain::test_created_file_ownership", "tests/test_main.py::TestMain::test_dry_run", "tests/test_main.py::TestMain::test_env_var_key_only", "tests/test_main.py::TestMain::test_env_var_keyval", "tests/test_main.py::TestMain::test_env_var_sources", "tests/test_main.py::TestMain::test_home_writable_root", "tests/test_main.py::TestMain::test_home_writable_scubauser", "tests/test_main.py::TestMain::test_image_entrypoint", "tests/test_main.py::TestMain::test_image_override", "tests/test_main.py::TestMain::test_image_override_with_alias", "tests/test_main.py::TestMain::test_list_aliases", "tests/test_main.py::TestMain::test_no_cmd", "tests/test_main.py::TestMain::test_no_docker", "tests/test_main.py::TestMain::test_redirect_stdin", "tests/test_main.py::TestMain::test_root_hook", "tests/test_main.py::TestMain::test_user_hook", "tests/test_main.py::TestMain::test_user_root", "tests/test_main.py::TestMain::test_user_scubauser", "tests/test_main.py::TestMain::test_with_tty", "tests/test_main.py::TestMain::test_without_tty", "tests/test_main.py::TestMain::test_yml_not_needed_with_image_override" ]
[ "tests/test_config.py::TestCommonScriptSchema::test_script_key_list", "tests/test_config.py::TestCommonScriptSchema::test_script_key_mapping_invalid", "tests/test_config.py::TestCommonScriptSchema::test_script_key_string", "tests/test_config.py::TestCommonScriptSchema::test_simple", "tests/test_config.py::TestConfig::test_find_config_cur_dir", "tests/test_config.py::TestConfig::test_find_config_nonexist", "tests/test_config.py::TestConfig::test_find_config_parent_dir", "tests/test_config.py::TestConfig::test_find_config_way_up", "tests/test_config.py::TestConfig::test_hooks_invalid_list", "tests/test_config.py::TestConfig::test_hooks_missing_script", "tests/test_config.py::TestConfig::test_hooks_mixed", "tests/test_config.py::TestConfig::test_load_config__no_spaces_in_aliases", "tests/test_config.py::TestConfig::test_load_config_empty", "tests/test_config.py::TestConfig::test_load_config_image_from_yaml", "tests/test_config.py::TestConfig::test_load_config_image_from_yaml_missing_arg", "tests/test_config.py::TestConfig::test_load_config_image_from_yaml_missing_file", "tests/test_config.py::TestConfig::test_load_config_image_from_yaml_nested_key_missing", "tests/test_config.py::TestConfig::test_load_config_image_from_yaml_nested_keys", "tests/test_config.py::TestConfig::test_load_config_image_from_yaml_unicode_args", "tests/test_config.py::TestConfig::test_load_config_minimal", "tests/test_config.py::TestConfig::test_load_config_with_aliases", "tests/test_config.py::TestConfig::test_load_unexpected_node", "tests/test_config.py::TestConfig::test_process_command_alias_overrides_image", "tests/test_config.py::TestConfig::test_process_command_aliases_unused", "tests/test_config.py::TestConfig::test_process_command_aliases_used_noargs", "tests/test_config.py::TestConfig::test_process_command_aliases_used_withargs", "tests/test_config.py::TestConfig::test_process_command_empty", "tests/test_config.py::TestConfig::test_process_command_multiline_aliases_forbid_user_args", "tests/test_config.py::TestConfig::test_process_command_multiline_aliases_used", "tests/test_config.py::TestConfig::test_process_command_no_aliases", "tests/test_main.py::TestMain::test_config_error", "tests/test_main.py::TestMain::test_handle_get_image_command_error", "tests/test_main.py::TestMain::test_multiline_alias_no_args_error", "tests/test_main.py::TestMain::test_no_image_cmd", "tests/test_main.py::TestMain::test_version", "tests/test_utils.py::TestUtils::test_format_cmdline", "tests/test_utils.py::TestUtils::test_mkdir_p", "tests/test_utils.py::TestUtils::test_mkdir_p_fail_exist", "tests/test_utils.py::TestUtils::test_parse_env_var", "tests/test_utils.py::TestUtils::test_parse_env_var_more_equals", "tests/test_utils.py::TestUtils::test_parse_env_var_no_equals", "tests/test_utils.py::TestUtils::test_shell_quote_cmd" ]
[]
MIT License
3,045
[ "scuba/utils.py", "CHANGELOG.md", "scuba/__main__.py", "doc/yaml-reference.md", "scuba/config.py", "example/env_vars/.scuba.yml", "example/env_vars/run_example.sh" ]
[ "scuba/utils.py", "CHANGELOG.md", "scuba/__main__.py", "doc/yaml-reference.md", "scuba/config.py", "example/env_vars/.scuba.yml", "example/env_vars/run_example.sh" ]
cwacek__python-jsonschema-objects-149
e5788a6eadb4bee5a28aadae351f218da0fbb4bf
2018-09-10 02:37:31
a1367dd0fb29f8d218f759bd2baf86e2ae48484d
diff --git a/development.txt b/development.txt index d0653b0..f4ede09 100644 --- a/development.txt +++ b/development.txt @@ -6,3 +6,4 @@ sphinx sphinx-autobuild recommonmark pytest +pytest-mock diff --git a/python_jsonschema_objects/classbuilder.py b/python_jsonschema_objects/classbuilder.py index 75ad2eb..65dbfad 100644 --- a/python_jsonschema_objects/classbuilder.py +++ b/python_jsonschema_objects/classbuilder.py @@ -197,7 +197,7 @@ class ProtocolBase(collections.MutableMapping): # run its setter. We get it from the class definition and call # it directly. XXX Heinous. prop = getattr(self.__class__, self.__prop_names__[name]) - prop.fset(self, val) + prop.__set__(self, val) else: # This is an additional property of some kind try: @@ -629,12 +629,15 @@ class ClassBuilder(object): self.resolver.resolution_scope, detail['items']['$ref']) typ = self.construct(uri, detail['items']) + constraints = copy.copy(detail) + constraints['strict'] = kw.get('strict') propdata = { 'type': 'array', 'validator': python_jsonschema_objects.wrapper_types.ArrayWrapper.create( uri, item_constraint=typ, - **detail)} + **constraints)} + else: uri = "{0}/{1}_{2}".format(nm, prop, "<anonymous_field>") @@ -652,15 +655,24 @@ class ClassBuilder(object): ) else: typ = self.construct(uri, detail['items']) + + constraints = copy.copy(detail) + constraints['strict'] = kw.get('strict') propdata = {'type': 'array', - 'validator': python_jsonschema_objects.wrapper_types.ArrayWrapper.create(uri, item_constraint=typ, - **detail)} + 'validator': python_jsonschema_objects.wrapper_types.ArrayWrapper.create( + uri, + item_constraint=typ, + **constraints)} + except NotImplementedError: typ = detail['items'] + constraints = copy.copy(detail) + constraints['strict'] = kw.get('strict') propdata = {'type': 'array', - 'validator': python_jsonschema_objects.wrapper_types.ArrayWrapper.create(uri, - item_constraint=typ, - **detail)} + 'validator': python_jsonschema_objects.wrapper_types.ArrayWrapper.create( + uri, + item_constraint=typ, + **constraints)} props[prop] = make_property(prop, propdata, @@ -725,125 +737,7 @@ class ClassBuilder(object): def make_property(prop, info, desc=""): + from . import descriptors - def getprop(self): - try: - return self._properties[prop] - except KeyError: - raise AttributeError("No such attribute") - - def setprop(self, val): - if isinstance(info['type'], (list, tuple)): - ok = False - errors = [] - type_checks = [] - - for typ in info['type']: - if not isinstance(typ, dict): - type_checks.append(typ) - continue - typ = next(t - for n, t in validators.SCHEMA_TYPE_MAPPING - if typ['type'] == n) - if typ is None: - typ = type(None) - if isinstance(typ, (list, tuple)): - type_checks.extend(typ) - else: - type_checks.append(typ) - - for typ in type_checks: - if isinstance(val, typ): - ok = True - break - elif hasattr(typ, 'isLiteralClass'): - try: - validator = typ(val) - except Exception as e: - errors.append( - "Failed to coerce to '{0}': {1}".format(typ, e)) - pass - else: - validator.validate() - ok = True - break - elif util.safe_issubclass(typ, ProtocolBase): - # force conversion- thus the val rather than validator assignment - try: - val = typ(**util.coerce_for_expansion(val)) - except Exception as e: - errors.append( - "Failed to coerce to '{0}': {1}".format(typ, e)) - pass - else: - val.validate() - ok = True - break - elif util.safe_issubclass(typ, python_jsonschema_objects.wrapper_types.ArrayWrapper): - try: - val = typ(val) - except Exception as e: - errors.append( - "Failed to coerce to '{0}': {1}".format(typ, e)) - pass - else: - val.validate() - ok = True - break - - if not ok: - errstr = "\n".join(errors) - raise validators.ValidationError( - "Object must be one of {0}: \n{1}".format(info['type'], errstr)) - - elif info['type'] == 'array': - val = info['validator'](val) - val.validate() - - elif util.safe_issubclass(info['type'], - python_jsonschema_objects.wrapper_types.ArrayWrapper): - # An array type may have already been converted into an ArrayValidator - val = info['type'](val) - val.validate() - - elif getattr(info['type'], 'isLiteralClass', False) is True: - if not isinstance(val, info['type']): - validator = info['type'](val) - validator.validate() - if validator._value is not None: - # This allows setting of default Literal values - val = validator - - elif util.safe_issubclass(info['type'], ProtocolBase): - if not isinstance(val, info['type']): - val = info['type'](**util.coerce_for_expansion(val)) - - val.validate() - - elif isinstance(info['type'], TypeProxy): - val = info['type'](val) - - elif isinstance(info['type'], TypeRef): - if not isinstance(val, info['type'].ref_class): - val = info['type'](**val) - - val.validate() - - elif info['type'] is None: - # This is the null value - if val is not None: - raise validators.ValidationError( - "None is only valid value for null") - - else: - raise TypeError("Unknown object type: '{0}'".format(info['type'])) - - self._properties[prop] = val - - def delprop(self): - if prop in self.__required__: - raise AttributeError("'%s' is required" % prop) - else: - del self._properties[prop] - - return property(getprop, setprop, delprop, desc) + prop = descriptors.AttributeDescriptor(prop, info, desc) + return prop diff --git a/python_jsonschema_objects/descriptors.py b/python_jsonschema_objects/descriptors.py new file mode 100644 index 0000000..a53bcbc --- /dev/null +++ b/python_jsonschema_objects/descriptors.py @@ -0,0 +1,135 @@ +from . import validators, util, wrapper_types +from .classbuilder import ProtocolBase, TypeProxy, TypeRef + + +class AttributeDescriptor(object): + """ Provides property access for constructed class properties """ + + def __init__(self, prop, info, desc=""): + self.prop = prop + self.info = info + self.desc = desc + + def __doc__(self): + return self.desc + + def __get__(self, obj, owner=None): + if obj is None and owner is not None: + return self + + try: + return obj._properties[self.prop] + except KeyError: + raise AttributeError("No such attribute") + + def __set__(self, obj, val): + info = self.info + if isinstance(info["type"], (list, tuple)): + ok = False + errors = [] + type_checks = [] + + for typ in info["type"]: + if not isinstance(typ, dict): + type_checks.append(typ) + continue + typ = next( + t for n, t in validators.SCHEMA_TYPE_MAPPING if typ["type"] == n + ) + if typ is None: + typ = type(None) + if isinstance(typ, (list, tuple)): + type_checks.extend(typ) + else: + type_checks.append(typ) + + for typ in type_checks: + if isinstance(val, typ): + ok = True + break + elif hasattr(typ, "isLiteralClass"): + try: + validator = typ(val) + except Exception as e: + errors.append("Failed to coerce to '{0}': {1}".format(typ, e)) + pass + else: + validator.validate() + ok = True + break + elif util.safe_issubclass(typ, ProtocolBase): + # force conversion- thus the val rather than validator assignment + try: + val = typ(**util.coerce_for_expansion(val)) + except Exception as e: + errors.append("Failed to coerce to '{0}': {1}".format(typ, e)) + pass + else: + val.validate() + ok = True + break + elif util.safe_issubclass(typ, wrapper_types.ArrayWrapper): + try: + val = typ(val) + except Exception as e: + errors.append("Failed to coerce to '{0}': {1}".format(typ, e)) + pass + else: + val.validate() + ok = True + break + + if not ok: + errstr = "\n".join(errors) + raise validators.ValidationError( + "Object must be one of {0}: \n{1}".format(info["type"], errstr) + ) + + elif info["type"] == "array": + val = info["validator"](val) + val.validate() + + elif util.safe_issubclass(info["type"], wrapper_types.ArrayWrapper): + # An array type may have already been converted into an ArrayValidator + val = info["type"](val) + val.validate() + + elif getattr(info["type"], "isLiteralClass", False) is True: + if not isinstance(val, info["type"]): + validator = info["type"](val) + validator.validate() + if validator._value is not None: + # This allows setting of default Literal values + val = validator + + elif util.safe_issubclass(info["type"], ProtocolBase): + if not isinstance(val, info["type"]): + val = info["type"](**util.coerce_for_expansion(val)) + + val.validate() + + elif isinstance(info["type"], TypeProxy): + val = info["type"](val) + + elif isinstance(info["type"], TypeRef): + if not isinstance(val, info["type"].ref_class): + val = info["type"](**val) + + val.validate() + + elif info["type"] is None: + # This is the null value + if val is not None: + raise validators.ValidationError("None is only valid value for null") + + else: + raise TypeError("Unknown object type: '{0}'".format(info["type"])) + + obj._properties[self.prop] = val + + def __delete__(self, obj): + prop = self.prop + if prop in obj.__required__: + raise AttributeError("'%s' is required" % prop) + else: + del obj._properties[prop] diff --git a/python_jsonschema_objects/wrapper_types.py b/python_jsonschema_objects/wrapper_types.py index 4f5aafa..c733700 100644 --- a/python_jsonschema_objects/wrapper_types.py +++ b/python_jsonschema_objects/wrapper_types.py @@ -5,6 +5,7 @@ import six from python_jsonschema_objects import util from python_jsonschema_objects.validators import registry, ValidationError +from python_jsonschema_objects.util import lazy_format as fmt logger = logging.getLogger(__name__) @@ -16,20 +17,30 @@ class ArrayWrapper(collections.MutableSequence): with a dirty-tracking mechanism to avoid constant validation costs. """ + @property + def strict(self): + return getattr(self, '_strict_', False) + def __len__(self): return len(self.data) + def mark_or_revalidate(self): + if self.strict: + self.validate() + else: + self._dirty = True + def __delitem__(self, index): - self.data.remove(index) - self._dirty = True + self.data.pop(index) + self.mark_or_revalidate() def insert(self, index, value): self.data.insert(index, value) - self._dirty = True + self.mark_or_revalidate() def __setitem__(self, index, value): self.data[index] = value - self._dirty = True + self.mark_or_revalidate() def __getitem__(self, idx): return self.typed_elems[idx] @@ -41,23 +52,33 @@ class ArrayWrapper(collections.MutableSequence): return self.for_json() == other def __init__(self, ary): + """ Initialize a wrapper for the array + + Args: + ary: (list-like, or ArrayWrapper) + """ + + """ Marks whether or not the underlying array has been modified """ + self._dirty = True + + """ Holds a typed copy of the array """ + self._typed = None + if isinstance(ary, (list, tuple, collections.Sequence)): self.data = ary - self._dirty = True - self._typed = None elif isinstance(ary, ArrayWrapper): self.data = ary.data - self._dirty = True - self._typed = None else: raise TypeError("Invalid value given to array validator: {0}" .format(ary)) + logger.debug(fmt("Initializing ArrayWrapper {} with {}", self, ary)) + @property def typed_elems(self): + logger.debug(fmt("Accessing typed_elems of ArrayWrapper {} ", self)) if self._typed is None or self._dirty is True: - self._typed = self.validate_items() - self._dirty = False + self.validate() return self._typed @@ -77,9 +98,8 @@ class ArrayWrapper(collections.MutableSequence): return obj def serialize(self): - d = self.validate_items() enc = util.ProtocolJSONEncoder() - return enc.encode(d) + return enc.encode(self.typed_elems) def for_json(self): from python_jsonschema_objects import classbuilder @@ -97,13 +117,13 @@ class ArrayWrapper(collections.MutableSequence): return out def validate(self): - self.validate_items() - self.validate_length() - self.validate_uniqueness() + if self.strict or self._dirty: + self.validate_items() + self.validate_length() + self.validate_uniqueness() return True def validate_uniqueness(self): - from python_jsonschema_objects import classbuilder if getattr(self, 'uniqueItems', None) is not None: testset = set(self.data) @@ -113,7 +133,6 @@ class ArrayWrapper(collections.MutableSequence): .format(self.data)) def validate_length(self): - from python_jsonschema_objects import classbuilder if getattr(self, 'minItems', None) is not None: if len(self.data) < self.minItems: @@ -128,6 +147,15 @@ class ArrayWrapper(collections.MutableSequence): .format(self.maxItems, self.data)) def validate_items(self): + """ Validates the items in the backing array, including + performing type validation. + + Sets the _typed property and clears the dirty flag as a side effect + + Returns: + The typed array + """ + logger.debug(fmt("Validating {}", self)) from python_jsonschema_objects import classbuilder if self.__itemtype__ is None: @@ -188,6 +216,8 @@ class ArrayWrapper(collections.MutableSequence): val.validate() typed_elems.append(val) + self._dirty = False + self._typed = typed_elems return typed_elems @staticmethod @@ -202,6 +232,7 @@ class ArrayWrapper(collections.MutableSequence): addl_constraints is expected to be key-value pairs of any of the other constraints permitted by JSON Schema v4. """ + logger.debug(fmt("Constructing ArrayValidator with {} and {}", item_constraint, addl_constraints)) from python_jsonschema_objects import classbuilder klassbuilder = addl_constraints.pop("classbuilder", None) props = {} @@ -283,6 +314,8 @@ class ArrayWrapper(collections.MutableSequence): props['__itemtype__'] = item_constraint + strict = addl_constraints.pop('strict', False) + props['_strict_'] = strict props.update(addl_constraints) validator = type(str(name), (ArrayWrapper,), props) diff --git a/tox.ini b/tox.ini index 4a88451..dd08d32 100644 --- a/tox.ini +++ b/tox.ini @@ -10,4 +10,5 @@ deps = . coverage pytest + pytest-mock
repeated validation of arrays and nested objects I m dealing with big objects, some made of nested objects and arrays of objects. Initialization of such objects can get veeery long, and it seems that many values are validated multiple times. Each literal value is validated when set, each array re-validates its elements, each object re-validates its properties and nested objects. When initializing an object with a big nested dictionary, it can take a very long time, mostly spent in validation already done. How could we change the validation mechanism to avoid this? Constraints I see: + for the moment, some construction steps are done during validation (typed_elems are set during validation) Ideas: + add a "validated/dirty" flag in literals to avoid to validate a value already validated + only validate dirty properties Any other ideas? any side effects to consider?
cwacek/python-jsonschema-objects
diff --git a/test/test_regression_143.py b/test/test_regression_143.py new file mode 100644 index 0000000..6822a9a --- /dev/null +++ b/test/test_regression_143.py @@ -0,0 +1,82 @@ +import python_jsonschema_objects as pjs +import python_jsonschema_objects.wrapper_types + + +def test_limited_validation(mocker): + schema = { + "title": "Example Schema", + "type": "object", + "properties": { + "a": {"type": "array", "items": {"type": "string"}, "default": []} + }, + } + + ob = pjs.ObjectBuilder(schema) + ns1 = ob.build_classes() + validator = ns1.ExampleSchema.a.info["validator"]([]) + validate_items = mocker.patch.object( + validator, "validate_items", side_effect=validator.validate_items + ) + validate = mocker.patch.object( + validator, "validate", side_effect=validator.validate + ) + mocker.patch.dict( + ns1.ExampleSchema.a.info, + {"validator": mocker.MagicMock(return_value=validator)}, + ) + + foo = ns1.ExampleSchema() + # We expect validation to be called on creation + assert validate_items.call_count == 1 + + # We expect manipulation to not revalidate immediately without strict + foo.a.append("foo") + foo.a.append("bar") + assert validate_items.call_count == 1 + + # We expect accessing data elements to cause a revalidate, but only the first time + print(foo.a[0]) + assert validate_items.call_count == 2 + + print(foo.a) + assert foo.a == ["foo", "bar"] + assert validate_items.call_count == 2 + + +def test_strict_validation(mocker): + """ Validate that when specified as strict, validation still occurs on every change""" + schema = { + "title": "Example Schema", + "type": "object", + "properties": { + "a": {"type": "array", "items": {"type": "string"}, "default": []} + }, + } + + ob = pjs.ObjectBuilder(schema) + ns1 = ob.build_classes(strict=True) + validator = ns1.ExampleSchema.a.info["validator"]([]) + validate_items = mocker.patch.object( + validator, "validate_items", side_effect=validator.validate_items + ) + validate = mocker.patch.object( + validator, "validate", side_effect=validator.validate + ) + mocker.patch.dict( + ns1.ExampleSchema.a.info, + {"validator": mocker.MagicMock(return_value=validator)}, + ) + + foo = ns1.ExampleSchema() + # We expect validation to be called on creation + assert validate_items.call_count == 1 + + # We expect manipulation to revalidate immediately with strict + foo.a.append("foo") + foo.a.append("bar") + assert validate_items.call_count == 3 + + # We expect accessing data elements to not revalidate because strict would have revalidated on load + print(foo.a[0]) + print(foo.a) + assert foo.a == ["foo", "bar"]
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 4 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 execnet==1.9.0 importlib-metadata==4.8.3 inflection==0.2.0 iniconfig==1.1.1 jsonschema==2.6.0 Markdown==2.6.11 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 -e git+https://github.com/cwacek/python-jsonschema-objects.git@e5788a6eadb4bee5a28aadae351f218da0fbb4bf#egg=python_jsonschema_objects six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: python-jsonschema-objects channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - execnet==1.9.0 - importlib-metadata==4.8.3 - inflection==0.2.0 - iniconfig==1.1.1 - jsonschema==2.6.0 - markdown==2.6.11 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/python-jsonschema-objects
[ "test/test_regression_143.py::test_limited_validation", "test/test_regression_143.py::test_strict_validation" ]
[]
[]
[]
MIT License
3,046
[ "python_jsonschema_objects/descriptors.py", "python_jsonschema_objects/wrapper_types.py", "development.txt", "tox.ini", "python_jsonschema_objects/classbuilder.py" ]
[ "python_jsonschema_objects/descriptors.py", "python_jsonschema_objects/wrapper_types.py", "development.txt", "tox.ini", "python_jsonschema_objects/classbuilder.py" ]
pydicom__pydicom-734
1feec586221a54db5d3d832711de14216dd361f0
2018-09-10 11:55:46
0721bdc0b5797f40984cc55b5408e273328dc528
pep8speaks: Hello @scaramallion! Thanks for submitting the PR. - In the file [`pydicom/config.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/config.py), following are the PEP8 issues : > [Line 71:1](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/config.py#L71): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file > [Line 72:1](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/config.py#L72): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file > [Line 73:1](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/config.py#L73): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file > [Line 74:1](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/config.py#L74): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file > [Line 75:1](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/config.py#L75): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file > [Line 114:1](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/config.py#L114): [E303](https://duckduckgo.com/?q=pep8%20E303) too many blank lines (3) - In the file [`pydicom/dataset.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/dataset.py), following are the PEP8 issues : > [Line 819:62](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/dataset.py#L819): [E127](https://duckduckgo.com/?q=pep8%20E127) continuation line over-indented for visual indent - In the file [`pydicom/pixel_data_handlers/gdcm_handler.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/gdcm_handler.py), following are the PEP8 issues : > [Line 25:12](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/gdcm_handler.py#L25): [E203](https://duckduckgo.com/?q=pep8%20E203) whitespace before ':' > [Line 26:11](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/gdcm_handler.py#L26): [E203](https://duckduckgo.com/?q=pep8%20E203) whitespace before ':' - In the file [`pydicom/pixel_data_handlers/jpeg_ls_handler.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/jpeg_ls_handler.py), following are the PEP8 issues : > [Line 26:12](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/jpeg_ls_handler.py#L26): [E203](https://duckduckgo.com/?q=pep8%20E203) whitespace before ':' > [Line 27:14](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/jpeg_ls_handler.py#L27): [E203](https://duckduckgo.com/?q=pep8%20E203) whitespace before ':' - In the file [`pydicom/pixel_data_handlers/numpy_handler.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/numpy_handler.py), following are the PEP8 issues : > [Line 55:12](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/numpy_handler.py#L55): [E203](https://duckduckgo.com/?q=pep8%20E203) whitespace before ':' - In the file [`pydicom/pixel_data_handlers/pillow_handler.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/pillow_handler.py), following are the PEP8 issues : > [Line 51:12](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/pillow_handler.py#L51): [E203](https://duckduckgo.com/?q=pep8%20E203) whitespace before ':' > [Line 52:13](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/pillow_handler.py#L52): [E203](https://duckduckgo.com/?q=pep8%20E203) whitespace before ':' - In the file [`pydicom/pixel_data_handlers/rle_handler.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/rle_handler.py), following are the PEP8 issues : > [Line 52:12](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/pixel_data_handlers/rle_handler.py#L52): [E203](https://duckduckgo.com/?q=pep8%20E203) whitespace before ':' - There are no PEP8 issues in the file [`pydicom/tests/test_JPEG_LS_transfer_syntax.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/tests/test_JPEG_LS_transfer_syntax.py) ! - There are no PEP8 issues in the file [`pydicom/tests/test_dataset.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/tests/test_dataset.py) ! - There are no PEP8 issues in the file [`pydicom/tests/test_gdcm_pixel_data.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/tests/test_gdcm_pixel_data.py) ! - There are no PEP8 issues in the file [`pydicom/tests/test_jpeg_ls_pixel_data.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/tests/test_jpeg_ls_pixel_data.py) ! - There are no PEP8 issues in the file [`pydicom/tests/test_numpy_pixel_data.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/tests/test_numpy_pixel_data.py) ! - There are no PEP8 issues in the file [`pydicom/tests/test_pillow_pixel_data.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/tests/test_pillow_pixel_data.py) ! - There are no PEP8 issues in the file [`pydicom/tests/test_rle_pixel_data.py`](https://github.com/scaramallion/pydicom/blob/8550e610f5b50088fab25e82e2cea1e2f786b00c/pydicom/tests/test_rle_pixel_data.py) ! scaramallion: Not sure why travis isn't showing up, its [here](https://travis-ci.org/pydicom/pydicom/builds/426992555) scaramallion: I've changed it so that `image_handlers` is still compatible with the current use. scaramallion: I can't think of a way to show a deprecation warning on using the module attribute. darcymason: ...and if there were a way it would probably be deep black magic and quite different for python 3 and 2. Not worth it. I just wanted to make sure my recollection was correct.
diff --git a/doc/whatsnew/v1.2.0.rst b/doc/whatsnew/v1.2.0.rst index 45c12390d..c130343b9 100644 --- a/doc/whatsnew/v1.2.0.rst +++ b/doc/whatsnew/v1.2.0.rst @@ -16,6 +16,15 @@ Changes * ``uid.JPEG2000Lossy`` deprecated and will be removed in v1.3. Use ``uid.JPEG2000`` instead. (:issue:`726`) * Equality and inequality operator overrides removed from ``UID``. +* ``config.image_handlers`` deprecated and will be removed in v1.3. use + ``config.pixel_data_handlers`` instead. There is also a change in behaviour + in that ``image_handlers`` previously used to only contain the pixel data + handlers that had their dependencies met. ``pixel_data_handlers`` contains + all handlers no matter whether or not their dependencies are met. To check + if a handler is available for use (it has its dependency met) then use the + handler's ``is_available`` method. +* ``DeferredDataElement`` class deprecated and will be removed in v1.3 + (:issue:`291`) Enhancements @@ -23,8 +32,6 @@ Enhancements * Added possibility to set byte strings as value for VRs that use only the default character set (:issue:`624`) -* ``DeferredDataElement`` class deprecated and will be removed in v1.3 - (:issue:`291`) * Functions for encapsulating frames added to encaps module (:pull_request:`696`) * Added ``Dataset.fix_meta_info()`` (:issue:`584`) * Add new function for bit packing ``pack_bits`` for use with BitsAllocated @@ -33,6 +40,11 @@ Enhancements * Added ``uid.JPEGLosslessP14`` for UID 1.2.840.10008.1.2.4.57 * Added ``uid.JPEG2000MultiComponentLossless`` for UID 1.2.840.10008.1.2.4.92 * Added ``uid.JPEG2000MultiComponent`` for UID 1.2.840.10008.1.2.4.93 +* Added full support for Planar Configuration (:issue:`713`) +* Added support for single frame pixel data where BitsAllocated >8 and + SamplesPerPixel > 1 (:issue:`713`) +* Small improvement in RLE decoding speed (~10%). +* Add support for non-conformant RLE segment ordering (:pull_request:`729`) Fixes @@ -45,3 +57,6 @@ Fixes * Improve performance of bit unpacking for non-PyPy2 interpreters (:pull_request:`715`) * First character set no longer removed (:issue:`707`) +* Fixed RLE decoded data having the wrong byte order (:pull_request:`729`) +* Fixed RLE decoded data having the wrong planar configuration + (:pull_request:`729`) diff --git a/pydicom/config.py b/pydicom/config.py index 3466c1b41..54449db3c 100644 --- a/pydicom/config.py +++ b/pydicom/config.py @@ -68,20 +68,32 @@ handler.setFormatter(formatter) logger.addHandler(handler) -image_handlers = [] -"""Image handlers for converting pixel data. -This is an ordered list that the dataset._get_pixel_array() +import pydicom.pixel_data_handlers.numpy_handler as np_handler # noqa +import pydicom.pixel_data_handlers.rle_handler as rle_handler # noqa +import pydicom.pixel_data_handlers.pillow_handler as pillow_handler # noqa +import pydicom.pixel_data_handlers.jpeg_ls_handler as jpegls_handler # noqa +import pydicom.pixel_data_handlers.gdcm_handler as gdcm_handler # noqa + +pixel_data_handlers = [ + np_handler, + rle_handler, + gdcm_handler, + pillow_handler, + jpegls_handler, +] +image_handlers = [hh for hh in pixel_data_handlers if hh.is_available()] +"""Handlers for converting (7fe0,0010) Pixel Data. +This is an ordered list that the dataset.convert_pixel_data() method will try to extract a correctly sized numpy array from the -PixelData attribute. -If a handler lacks required dependencies or can not otherwise be loaded, -it shall throw an ImportError. +PixelData element. + Handers shall have two methods: -supports_transfer_syntax(dicom_dataset) +def supports_transfer_syntax(ds) This returns True if the handler might support the transfer syntax indicated in the dicom_dataset -def get_pixeldata(dicom_dataset): +def get_pixeldata(ds): This shall either throw an exception or return a correctly sized numpy array derived from the PixelData. Reshaping the array to the correct dimensions is handled outside the image handler @@ -98,42 +110,6 @@ If no one throws an exception, but they all refuse to support the transfer syntax, then this fact is announced in a NotImplementedError exception. """ -have_numpy = True -try: - import pydicom.pixel_data_handlers.numpy_handler as numpy_handler - image_handlers.append(numpy_handler) - - import pydicom.pixel_data_handlers.rle_handler as rle_handler - image_handlers.append(rle_handler) - -except ImportError as e: - logger.debug("Could not import numpy") - have_numpy = False - -have_pillow = True -try: - import pydicom.pixel_data_handlers.pillow_handler as pillow_handler - image_handlers.append(pillow_handler) -except ImportError as e: - logger.debug("Could not import pillow") - have_pillow = False - -have_jpeg_ls = True -try: - import pydicom.pixel_data_handlers.jpeg_ls_handler as jpeg_ls_handler - image_handlers.append(jpeg_ls_handler) -except ImportError as e: - logger.debug("Could not import jpeg_ls") - have_jpeg_ls = False - -have_gdcm = True -try: - import pydicom.pixel_data_handlers.gdcm_handler as gdcm_handler - image_handlers.append(gdcm_handler) -except ImportError as e: - logger.debug("Could not import gdcm") - have_gdcm = False - def debug(debug_on=True): """Turn debugging of DICOM file reading and writing on or off. diff --git a/pydicom/dataset.py b/pydicom/dataset.py index b37a5e399..a2e762a02 100644 --- a/pydicom/dataset.py +++ b/pydicom/dataset.py @@ -40,6 +40,11 @@ from pydicom.tag import Tag, BaseTag, tag_in_exception from pydicom.uid import (ExplicitVRLittleEndian, ImplicitVRLittleEndian, ExplicitVRBigEndian, PYDICOM_IMPLEMENTATION_UID) +if compat.in_py2: + from pkgutil import find_loader as have_package +else: + from importlib.util import find_spec as have_package + have_numpy = True try: import numpy @@ -759,24 +764,52 @@ class Dataset(dict): if already_have: return - # Find all pixel data handlers that support the transfer syntax - suitable_handlers = [hh for hh in pydicom.config.image_handlers - if hh and hh.supports_transfer_syntax(self)] + # Find all possible handlers that support the transfer syntax + transfer_syntax = self.file_meta.TransferSyntaxUID + possible_handlers = [hh for hh in pydicom.config.pixel_data_handlers + if hh.supports_transfer_syntax(transfer_syntax)] - # No suitable handlers are available - if not suitable_handlers: + # No handlers support the transfer syntax + if not possible_handlers: raise NotImplementedError( "Unable to decode pixel data with a transfer syntax UID of " - "'{0}' ({1}) as there are no suitable pixel data handlers " - "available. Please see the list of supported Transfer " - "Syntaxes in the pydicom documentation for information on " - "which additional packages might be required" + "'{0}' ({1}) as there are no pixel data handlers " + "available that support it. Please see the pydicom " + "documentation for information on supported transfer syntaxes " .format(self.file_meta.TransferSyntaxUID, self.file_meta.TransferSyntaxUID.name) ) + # Handlers that both support the transfer syntax and have their + # dependencies met + available_handlers = [hh for hh in possible_handlers if + hh.is_available()] + + # There are handlers that support the transfer syntax but none of them + # can be used as missing dependencies + if not available_handlers: + # For each of the possible handlers we want to find which + # dependencies are missing + msg = ( + "The following handlers are available to decode the pixel " + "data however they are missing required dependencies: " + ) + pkg_msg = [] + for hh in possible_handlers: + hh_deps = hh.DEPENDENCIES + # Missing packages + missing = [dd for dd in hh_deps if have_package(dd) is None] + # Package names + names = [hh_deps[name][1] for name in missing] + pkg_msg.append( + "{} (req. {})" + .format(hh.HANDLER_NAME, ', '.join(names)) + ) + + raise RuntimeError(msg + ', '.join(pkg_msg)) + last_exception = None - for handler in suitable_handlers: + for handler in available_handlers: try: # Use the handler to get a 1D numpy array of the pixel data arr = handler.get_pixeldata(self) @@ -786,8 +819,8 @@ class Dataset(dict): # convert the color space from YCbCr to RGB if handler.needs_to_convert_to_RGB(self): self._pixel_array = convert_color_space(self._pixel_array, - 'YBR_FULL', - 'RGB') + 'YBR_FULL', + 'RGB') self._pixel_id = id(self.PixelData) @@ -808,7 +841,7 @@ class Dataset(dict): "Please see the list of supported Transfer Syntaxes in the " "pydicom documentation for alternative packages that might " "be able to decode the data" - .format(", ".join([str(hh) for hh in suitable_handlers])) + .format(", ".join([str(hh) for hh in available_handlers])) ) raise last_exception diff --git a/pydicom/pixel_data_handlers/gdcm_handler.py b/pydicom/pixel_data_handlers/gdcm_handler.py index b662005c5..fc747716c 100644 --- a/pydicom/pixel_data_handlers/gdcm_handler.py +++ b/pydicom/pixel_data_handlers/gdcm_handler.py @@ -2,28 +2,51 @@ """Use the gdcm python package to decode pixel transfer syntaxes.""" import sys -from pydicom import compat -import pydicom -have_numpy = True + try: import numpy + HAVE_NP = True except ImportError: - have_numpy = False - raise + HAVE_NP = False -have_gdcm = True try: import gdcm + HAVE_GDCM = True except ImportError: - have_gdcm = False - raise -can_use_gdcm = have_gdcm and have_numpy + HAVE_GDCM = False + +import pydicom +from pydicom import compat +HANDLER_NAME = 'GDCM' + +DEPENDENCIES = { + 'numpy': ('http://www.numpy.org/', 'NumPy'), + 'gdcm': ('http://gdcm.sourceforge.net/wiki/index.php/Main_Page', 'GDCM'), +} + +SUPPORTED_TRANSFER_SYNTAXES = [ + pydicom.uid.JPEGBaseline, + pydicom.uid.JPEGExtended, + pydicom.uid.JPEGLosslessP14, + pydicom.uid.JPEGLossless, + pydicom.uid.JPEGLSLossless, + pydicom.uid.JPEGLSLossy, + pydicom.uid.JPEG2000Lossless, + pydicom.uid.JPEG2000, +] + should_convert_these_syntaxes_to_RGB = [ pydicom.uid.JPEGBaseline, ] +def is_available(): + """Return True if the handler has its dependencies met.""" + return HAVE_NP and HAVE_GDCM + + + def needs_to_convert_to_RGB(dicom_dataset): should_convert = (dicom_dataset.file_meta.TransferSyntaxUID in should_convert_these_syntaxes_to_RGB) @@ -38,7 +61,7 @@ def should_change_PhotometricInterpretation_to_RGB(dicom_dataset): return False -def supports_transfer_syntax(dicom_dataset): +def supports_transfer_syntax(transfer_syntax): """ Returns ------- @@ -48,7 +71,7 @@ def supports_transfer_syntax(dicom_dataset): False to prevent any attempt to try to use this handler to decode the given transfer syntax """ - return True + return transfer_syntax in SUPPORTED_TRANSFER_SYNTAXES def get_pixeldata(dicom_dataset): @@ -79,7 +102,7 @@ def get_pixeldata(dicom_dataset): # FIXME this should just use dicom_dataset.PixelData # instead of dicom_dataset.filename # but it is unclear how this should be achieved using GDCM - if not can_use_gdcm: + if not HAVE_GDCM: msg = ("GDCM requires both the gdcm package and numpy " "and one or more could not be imported") raise ImportError(msg) diff --git a/pydicom/pixel_data_handlers/jpeg_ls_handler.py b/pydicom/pixel_data_handlers/jpeg_ls_handler.py index 33c512aed..29c919dcc 100644 --- a/pydicom/pixel_data_handlers/jpeg_ls_handler.py +++ b/pydicom/pixel_data_handlers/jpeg_ls_handler.py @@ -1,33 +1,43 @@ # Copyright 2008-2018 pydicom authors. See LICENSE file for details. """ -Use the jpeg_ls (CharPyLS) python package -to decode pixel transfer syntaxes. +Use the jpeg_ls (CharPyLS) python package to decode pixel transfer syntaxes. """ -import pydicom -import pydicom.uid -from pydicom.pixel_data_handlers.util import dtype_corrected_for_endianness - -have_numpy = True try: import numpy + HAVE_NP = True except ImportError: - have_numpy = False - raise + HAVE_NP = False -have_jpeg_ls = True try: import jpeg_ls + HAVE_JPEGLS = True except ImportError: - have_jpeg_ls = False - raise + HAVE_JPEGLS = False -JPEGLSSupportedTransferSyntaxes = [ +import pydicom +from pydicom.pixel_data_handlers.util import dtype_corrected_for_endianness +import pydicom.uid + + +HANDLER_NAME = 'JPEG-LS' + +DEPENDENCIES = { + 'numpy': ('http://www.numpy.org/', 'NumPy'), + 'jpeg_ls': ('https://github.com/Who8MyLunch/CharPyLS', 'CharPyLS'), +} + +SUPPORTED_TRANSFER_SYNTAXES = [ pydicom.uid.JPEGLSLossless, pydicom.uid.JPEGLSLossy, ] +def is_available(): + """Return True if the handler has its dependencies met.""" + return HAVE_NP and HAVE_JPEGLS + + def needs_to_convert_to_RGB(dicom_dataset): return False @@ -37,7 +47,7 @@ def should_change_PhotometricInterpretation_to_RGB(dicom_dataset): return False -def supports_transfer_syntax(dicom_dataset): +def supports_transfer_syntax(transfer_syntax): """ Returns ------- @@ -47,8 +57,7 @@ def supports_transfer_syntax(dicom_dataset): False to prevent any attempt to try to use this handler to decode the given transfer syntax """ - return (dicom_dataset.file_meta.TransferSyntaxUID - in JPEGLSSupportedTransferSyntaxes) + return transfer_syntax in SUPPORTED_TRANSFER_SYNTAXES def get_pixeldata(dicom_dataset): @@ -74,13 +83,13 @@ def get_pixeldata(dicom_dataset): if the pixel data type is unsupported """ if (dicom_dataset.file_meta.TransferSyntaxUID - not in JPEGLSSupportedTransferSyntaxes): + not in SUPPORTED_TRANSFER_SYNTAXES): msg = ("The jpeg_ls does not support " "this transfer syntax {0}.".format( dicom_dataset.file_meta.TransferSyntaxUID.name)) raise NotImplementedError(msg) - if not have_jpeg_ls: + if not HAVE_JPEGLS: msg = ("The jpeg_ls package is required to use pixel_array " "for this transfer syntax {0}, and jpeg_ls could not " "be imported.".format( diff --git a/pydicom/pixel_data_handlers/numpy_handler.py b/pydicom/pixel_data_handlers/numpy_handler.py index ecfe1970d..10508aa5f 100644 --- a/pydicom/pixel_data_handlers/numpy_handler.py +++ b/pydicom/pixel_data_handlers/numpy_handler.py @@ -39,29 +39,45 @@ elements have values given in the table below. from platform import python_implementation from sys import byteorder -import numpy as np +try: + import numpy as np + HAVE_NP = True +except ImportError: + HAVE_NP = False from pydicom.compat import in_py2 as IN_PYTHON2 from pydicom.pixel_data_handlers.util import pixel_dtype -from pydicom.uid import ( - ExplicitVRLittleEndian, - ImplicitVRLittleEndian, - DeflatedExplicitVRLittleEndian, - ExplicitVRBigEndian, -) +import pydicom.uid +HANDLER_NAME = 'Numpy' + +DEPENDENCIES = { + 'numpy': ('http://www.numpy.org/', 'NumPy'), +} SUPPORTED_TRANSFER_SYNTAXES = [ - ExplicitVRLittleEndian, - ImplicitVRLittleEndian, - DeflatedExplicitVRLittleEndian, - ExplicitVRBigEndian, + pydicom.uid.ExplicitVRLittleEndian, + pydicom.uid.ImplicitVRLittleEndian, + pydicom.uid.DeflatedExplicitVRLittleEndian, + pydicom.uid.ExplicitVRBigEndian, ] -def supports_transfer_syntax(ds): - """Return True if the handler supports the transfer syntax used in `ds`.""" - return ds.file_meta.TransferSyntaxUID in SUPPORTED_TRANSFER_SYNTAXES +def is_available(): + """Return True if the handler has its dependencies met.""" + return HAVE_NP + + +def supports_transfer_syntax(transfer_syntax): + """Return True if the handler supports the `transfer_syntax`. + + Parameters + ---------- + transfer_syntax : UID + The Transfer Syntax UID of the Pixel Data that is to be used with + the handler. + """ + return transfer_syntax in SUPPORTED_TRANSFER_SYNTAXES def needs_to_convert_to_RGB(ds): diff --git a/pydicom/pixel_data_handlers/pillow_handler.py b/pydicom/pixel_data_handlers/pillow_handler.py index 78f716624..ba78c1b71 100644 --- a/pydicom/pixel_data_handlers/pillow_handler.py +++ b/pydicom/pixel_data_handlers/pillow_handler.py @@ -2,25 +2,34 @@ """Use the pillow python package to decode pixel transfer syntaxes.""" import io -import pydicom.encaps -import pydicom.uid import logging -from pydicom.pixel_data_handlers.util import dtype_corrected_for_endianness - -have_numpy = True -logger = logging.getLogger('pydicom') try: import numpy + HAVE_NP = True +except ImportError: + HAVE_NP = False + +try: + from PIL import Image + HAVE_PIL = True except ImportError: - have_numpy = False - raise + HAVE_PIL = False -have_pillow = True try: - from PIL import Image as PILImg + from PIL import _imaging + HAVE_JPEG = getattr(_imaging, "jpeg_decoder", False) + HAVE_JPEG2K = getattr(_imaging, "jpeg2k_decoder", False) except ImportError: - have_pillow = False + HAVE_JPEG = False + HAVE_JPEG2K = False + +import pydicom.encaps +from pydicom.pixel_data_handlers.util import dtype_corrected_for_endianness +import pydicom.uid + + +logger = logging.getLogger('pydicom') PillowSupportedTransferSyntaxes = [ pydicom.uid.JPEGBaseline, @@ -36,17 +45,20 @@ PillowJPEGTransferSyntaxes = [ pydicom.uid.JPEGExtended, ] -have_pillow_jpeg_plugin = False -have_pillow_jpeg2000_plugin = False -try: - from PIL import _imaging as pillow_core - have_pillow_jpeg_plugin = hasattr(pillow_core, "jpeg_decoder") - have_pillow_jpeg2000_plugin = hasattr(pillow_core, "jpeg2k_decoder") -except Exception: - pass +HANDLER_NAME = 'Pillow' + +DEPENDENCIES = { + 'numpy': ('http://www.numpy.org/', 'NumPy'), + 'PIL': ('https://python-pillow.org/', 'Pillow'), +} + +def is_available(): + """Return True if the handler has its dependencies met.""" + return HAVE_NP and HAVE_PIL -def supports_transfer_syntax(dicom_dataset): + +def supports_transfer_syntax(transfer_syntax): """ Returns ------- @@ -56,15 +68,7 @@ def supports_transfer_syntax(dicom_dataset): False to prevent any attempt to try to use this handler to decode the given transfer syntax """ - if (have_pillow_jpeg_plugin and - (dicom_dataset.file_meta.TransferSyntaxUID in - PillowJPEGTransferSyntaxes)): - return True - if (have_pillow_jpeg2000_plugin and - (dicom_dataset.file_meta.TransferSyntaxUID in - PillowJPEG2000TransferSyntaxes)): - return True - return False + return transfer_syntax in PillowSupportedTransferSyntaxes def needs_to_convert_to_RGB(dicom_dataset): @@ -96,32 +100,30 @@ def get_pixeldata(dicom_dataset): if the pixel data type is unsupported """ logger.debug("Trying to use Pillow to read pixel array " - "(has pillow = %s)", have_pillow) - if not have_pillow: + "(has pillow = %s)", HAVE_PIL) + transfer_syntax = dicom_dataset.file_meta.TransferSyntaxUID + if not HAVE_PIL: msg = ("The pillow package is required to use pixel_array for " "this transfer syntax {0}, and pillow could not be " - "imported.".format( - dicom_dataset.file_meta.TransferSyntaxUID.name)) + "imported.".format(transfer_syntax.name)) raise ImportError(msg) - if (not have_pillow_jpeg_plugin and - dicom_dataset.file_meta.TransferSyntaxUID in - PillowJPEGTransferSyntaxes): + + if not HAVE_JPEG and transfer_syntax in PillowJPEGTransferSyntaxes: msg = ("this transfer syntax {0}, can not be read because " - "Pillow lacks the jpeg decoder plugin".format( - dicom_dataset.file_meta.TransferSyntaxUID.name)) + "Pillow lacks the jpeg decoder plugin" + .format(transfer_syntax.name)) raise NotImplementedError(msg) - if (not have_pillow_jpeg2000_plugin and - dicom_dataset.file_meta.TransferSyntaxUID in - PillowJPEG2000TransferSyntaxes): + + if not HAVE_JPEG2K and transfer_syntax in PillowJPEG2000TransferSyntaxes: msg = ("this transfer syntax {0}, can not be read because " - "Pillow lacks the jpeg 2000 decoder plugin".format( - dicom_dataset.file_meta.TransferSyntaxUID.name)) + "Pillow lacks the jpeg 2000 decoder plugin" + .format(transfer_syntax.name)) raise NotImplementedError(msg) - if (dicom_dataset.file_meta.TransferSyntaxUID not in - PillowSupportedTransferSyntaxes): + + if transfer_syntax not in PillowSupportedTransferSyntaxes: msg = ("this transfer syntax {0}, can not be read because " - "Pillow does not support this syntax".format( - dicom_dataset.file_meta.TransferSyntaxUID.name)) + "Pillow does not support this syntax" + .format(transfer_syntax.name)) raise NotImplementedError(msg) # Make NumPy format code, e.g. "uint16", "int32" etc @@ -149,16 +151,14 @@ def get_pixeldata(dicom_dataset): dicom_dataset.is_little_endian, numpy_format) # decompress here - if (dicom_dataset.file_meta.TransferSyntaxUID in - PillowJPEGTransferSyntaxes): + if transfer_syntax in PillowJPEGTransferSyntaxes: logger.debug("This is a JPEG lossy format") if dicom_dataset.BitsAllocated > 8: raise NotImplementedError("JPEG Lossy only supported if " "Bits Allocated = 8") generic_jpeg_file_header = b'' frame_start_from = 0 - elif (dicom_dataset.file_meta.TransferSyntaxUID in - PillowJPEG2000TransferSyntaxes): + elif transfer_syntax in PillowJPEG2000TransferSyntaxes: logger.debug("This is a JPEG 2000 format") generic_jpeg_file_header = b'' # generic_jpeg_file_header = b'\x00\x00\x00\x0C\x6A' @@ -182,7 +182,7 @@ def get_pixeldata(dicom_dataset): frame[frame_start_from:] fio = io.BytesIO(data) try: - decompressed_image = PILImg.open(fio) + decompressed_image = Image.open(fio) except IOError as e: raise NotImplementedError(e.strerror) UncompressedPixelData.extend(decompressed_image.tobytes()) @@ -194,7 +194,7 @@ def get_pixeldata(dicom_dataset): UncompressedPixelData[frame_start_from:] try: fio = io.BytesIO(UncompressedPixelData) - decompressed_image = PILImg.open(fio) + decompressed_image = Image.open(fio) except IOError as e: raise NotImplementedError(e.strerror) UncompressedPixelData = decompressed_image.tobytes() @@ -205,7 +205,7 @@ def get_pixeldata(dicom_dataset): len(UncompressedPixelData)) pixel_array = numpy.copy( numpy.frombuffer(UncompressedPixelData, numpy_format)) - if (dicom_dataset.file_meta.TransferSyntaxUID in + if (transfer_syntax in PillowJPEG2000TransferSyntaxes and dicom_dataset.BitsStored == 16): # WHY IS THIS EVEN NECESSARY?? diff --git a/pydicom/pixel_data_handlers/rle_handler.py b/pydicom/pixel_data_handlers/rle_handler.py index d1617f52f..7cd81fc49 100644 --- a/pydicom/pixel_data_handlers/rle_handler.py +++ b/pydicom/pixel_data_handlers/rle_handler.py @@ -35,21 +35,36 @@ elements have values given in the table below. from struct import unpack -import numpy as np +try: + import numpy as np + HAVE_RLE = True +except ImportError: + HAVE_RLE = False from pydicom.encaps import decode_data_sequence, defragment_data from pydicom.pixel_data_handlers.util import pixel_dtype -from pydicom.uid import RLELossless +import pydicom.uid +HANDLER_NAME = 'RLE Lossless' + +DEPENDENCIES = { + 'numpy': ('http://www.numpy.org/', 'NumPy'), +} + SUPPORTED_TRANSFER_SYNTAXES = [ - RLELossless + pydicom.uid.RLELossless ] -def supports_transfer_syntax(ds): - """Return True if the handler supports the transfer syntax used in `ds`.""" - return ds.file_meta.TransferSyntaxUID in SUPPORTED_TRANSFER_SYNTAXES +def is_available(): + """Return True if the handler has its dependencies met.""" + return HAVE_RLE + + +def supports_transfer_syntax(transfer_syntax): + """Return True if the handler supports the `transfer_syntax`.""" + return transfer_syntax in SUPPORTED_TRANSFER_SYNTAXES def needs_to_convert_to_RGB(ds):
More helpful message if numpy not installed <!-- Instructions For Filing a Bug: https://github.com/pydicom/pydicom/blob/master/CONTRIBUTING.md#filing-bugs --> #### Description <!-- Example: Attribute Error thrown when printing (0x0010, 0x0020) patient Id> 0--> #### Steps/Code to Reproduce Install pydicom in a venv with no numpy. Try to read data\test_files\MR_small.dcm. Error message states that no image handler could decode the transfer syntax, when all that is needed is numpy. I think the error message could be more direct in this case. #### Expected Results A clear error message that numpy is needed. #### Actual Results Traceback (most recent call last): File "downsize.py", line 11, in <module> data = ds.pixel_array File "C:\env\liverc1\lib\site-packages\pydicom\dataset.py", line 800, in pixel _array return self._get_pixel_array() File "C:\env\liverc1\lib\site-packages\pydicom\dataset.py", line 692, in _get_ pixel_array self.convert_pixel_data() File "C:\env\liverc1\lib\site-packages\pydicom\dataset.py", line 743, in conve rt_pixel_data raise NotImplementedError(msg) NotImplementedError: No available image handler could decode this transfer synta x Explicit VR Little Endian #### Versions Python 3.6.4 (v3.6.4:d48eceb, Dec 19 2017, 06:54:40) [MSC v.1900 64 bit (AMD64)] pydicom 1.0.1rc1 <!-- Thanks for contributing! -->
pydicom/pydicom
diff --git a/pydicom/tests/test_JPEG_LS_transfer_syntax.py b/pydicom/tests/test_JPEG_LS_transfer_syntax.py index d0f7bed47..b9c821043 100644 --- a/pydicom/tests/test_JPEG_LS_transfer_syntax.py +++ b/pydicom/tests/test_JPEG_LS_transfer_syntax.py @@ -16,32 +16,39 @@ numpy_missing_message = ("numpy is not available " "in this test environment") jpeg_ls_missing_message = ("jpeg_ls is not available " "in this test environment") -pillow_handler = None -numpy_handler = None -gdcm_handler = None -jpeg_ls_handler = None -have_pillow_jpeg_plugin = False -have_pillow_jpeg2000_plugin = False + try: import pydicom.pixel_data_handlers.numpy_handler as numpy_handler + HAVE_NP = numpy_handler.HAVE_NP except ImportError: - have_numpy_handler = False + HAVE_NP = False + numpy_handler = None + try: import pydicom.pixel_data_handlers.pillow_handler as pillow_handler - have_pillow_jpeg_plugin = pillow_handler.have_pillow_jpeg_plugin - have_pillow_jpeg2000_plugin = \ - pillow_handler.have_pillow_jpeg2000_plugin + HAVE_PIL = pillow_handler.HAVE_PIL + HAVE_JPEG = pillow_handler.HAVE_JPEG + HAVE_JPEG2K = pillow_handler.HAVE_JPEG2K except ImportError: - have_pillow_handler = False + HAVE_PIL = False + pillow_handler = None + HAVE_JPEG = False + HAVE_JPEG2K = False + try: import pydicom.pixel_data_handlers.jpeg_ls_handler as jpeg_ls_handler + HAVE_JPEGLS = jpeg_ls_handler.HAVE_JPEGLS except ImportError: jpeg_ls_handler = None + HAVE_JPEGLS = False + try: import pydicom.pixel_data_handlers.gdcm_handler as gdcm_handler + HAVE_GDCM = gdcm_handler.HAVE_GDCM except ImportError: gdcm_handler = None + HAVE_GDCM = False mr_name = get_testdata_files("MR_small.dcm")[0] jpeg_ls_lossless_name = get_testdata_files("MR_small_jpeg_ls_lossless.dcm")[0] @@ -58,89 +65,85 @@ class Test_JPEG_LS_Lossless_transfer_syntax(): self.mr_small = dcmread(mr_name) self.emri_jpeg_ls_lossless = dcmread(emri_jpeg_ls_lossless) self.emri_small = dcmread(emri_name) - self.original_handlers = pydicom.config.image_handlers + self.original_handlers = pydicom.config.pixel_data_handlers def teardown_method(self, method): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers - @pytest.mark.skipif(numpy_handler is None, reason=numpy_missing_message) + @pytest.mark.skipif(not HAVE_NP, reason=numpy_missing_message) def test_read_mr_with_numpy(self): - pydicom.config.image_handlers = [numpy_handler] + pydicom.config.pixel_data_handlers = [numpy_handler] msg = ( r"Unable to decode pixel data with a transfer syntax UID of " r"'1.2.840.10008.1.2.4.80' \(JPEG-LS Lossless Image Compression\) " - r"as there are no suitable pixel data handlers available." + r"as there are no pixel data handlers available." ) with pytest.raises(NotImplementedError, match=msg): self.jpeg_ls_lossless.pixel_array - @pytest.mark.skipif(numpy_handler is None, reason=numpy_missing_message) + @pytest.mark.skipif(not HAVE_NP, reason=numpy_missing_message) def test_read_emri_with_numpy(self): - pydicom.config.image_handlers = [numpy_handler] + pydicom.config.pixel_data_handlers = [numpy_handler] msg = ( r"Unable to decode pixel data with a transfer syntax UID of " r"'1.2.840.10008.1.2.4.80' \(JPEG-LS Lossless Image Compression\) " - r"as there are no suitable pixel data handlers available." + r"as there are no pixel data handlers available." ) with pytest.raises(NotImplementedError, match=msg): self.emri_jpeg_ls_lossless.pixel_array - @pytest.mark.skipif(pillow_handler is None, reason=pillow_missing_message) + @pytest.mark.skipif(not HAVE_PIL, reason=pillow_missing_message) def test_read_mr_with_pillow(self): - pydicom.config.image_handlers = [pillow_handler] + pydicom.config.pixel_data_handlers = [pillow_handler] msg = ( r"Unable to decode pixel data with a transfer syntax UID of " r"'1.2.840.10008.1.2.4.80' \(JPEG-LS Lossless Image Compression\) " - r"as there are no suitable pixel data handlers available." + r"as there are no pixel data handlers available." ) with pytest.raises(NotImplementedError, match=msg): self.jpeg_ls_lossless.pixel_array - @pytest.mark.skipif(pillow_handler is None, reason=pillow_missing_message) + @pytest.mark.skipif(not HAVE_PIL, reason=pillow_missing_message) def test_read_emri_with_pillow(self): - pydicom.config.image_handlers = [pillow_handler] + pydicom.config.pixel_data_handlers = [pillow_handler] msg = ( r"Unable to decode pixel data with a transfer syntax UID of " r"'1.2.840.10008.1.2.4.80' \(JPEG-LS Lossless Image Compression\) " - r"as there are no suitable pixel data handlers available." + r"as there are no pixel data handlers available." ) with pytest.raises(NotImplementedError, match=msg): self.emri_jpeg_ls_lossless.pixel_array - @pytest.mark.skipif(gdcm_handler is None, reason=gdcm_missing_message) + @pytest.mark.skipif(not HAVE_GDCM, reason=gdcm_missing_message) def test_read_mr_with_gdcm(self): - pydicom.config.image_handlers = [numpy_handler, gdcm_handler] + pydicom.config.pixel_data_handlers = [numpy_handler, gdcm_handler] a = self.jpeg_ls_lossless.pixel_array b = self.mr_small.pixel_array assert a.mean() == b.mean(), \ "using GDCM Decoded pixel data is not " \ "all {0} (mean == {1})".format(b.mean(), a.mean()) - @pytest.mark.skipif(gdcm_handler is None, reason=gdcm_missing_message) + @pytest.mark.skipif(not HAVE_GDCM, reason=gdcm_missing_message) def test_read_emri_with_gdcm(self): - pydicom.config.image_handlers = [numpy_handler, gdcm_handler] + pydicom.config.pixel_data_handlers = [numpy_handler, gdcm_handler] a = self.emri_jpeg_ls_lossless.pixel_array b = self.emri_small.pixel_array assert a.mean() == b.mean(), \ "using GDCM Decoded pixel data is not " \ "all {0} (mean == {1})".format(b.mean(), a.mean()) - @pytest.mark.skipif( - jpeg_ls_handler is None, - reason=jpeg_ls_missing_message) + @pytest.mark.skipif(not HAVE_JPEGLS, reason=jpeg_ls_missing_message) def test_read_mr_with_jpeg_ls(self): - pydicom.config.image_handlers = [numpy_handler, jpeg_ls_handler] + pydicom.config.pixel_data_handlers = [numpy_handler, jpeg_ls_handler] a = self.jpeg_ls_lossless.pixel_array b = self.mr_small.pixel_array assert a.mean() == b.mean(), \ "using jpeg_ls decoded pixel data is not " \ "all {0} (mean == {1})".format(b.mean(), a.mean()) - @pytest.mark.skipif( - jpeg_ls_handler is None, - reason=jpeg_ls_missing_message) + @pytest.mark.skipif(not HAVE_JPEGLS, reason=jpeg_ls_missing_message) def test_read_emri_with_jpeg_ls(self): - pydicom.config.image_handlers = [numpy_handler, jpeg_ls_handler] + pydicom.config.pixel_data_handlers = [numpy_handler, jpeg_ls_handler] a = self.emri_jpeg_ls_lossless.pixel_array b = self.emri_small.pixel_array assert a.mean() == b.mean(), \ @@ -148,21 +151,21 @@ class Test_JPEG_LS_Lossless_transfer_syntax(): "(mean == {1})".format(b.mean(), a.mean()) def test_read_mr_without_any_handler(self): - pydicom.config.image_handlers = [] + pydicom.config.pixel_data_handlers = [] msg = ( r"Unable to decode pixel data with a transfer syntax UID of " r"'1.2.840.10008.1.2.4.80' \(JPEG-LS Lossless Image Compression\) " - r"as there are no suitable pixel data handlers available." + r"as there are no pixel data handlers available." ) with pytest.raises(NotImplementedError, match=msg): self.jpeg_ls_lossless.pixel_array def test_read_emri_without_any_handler(self): - pydicom.config.image_handlers = [] + pydicom.config.pixel_data_handlers = [] msg = ( r"Unable to decode pixel data with a transfer syntax UID of " r"'1.2.840.10008.1.2.4.80' \(JPEG-LS Lossless Image Compression\) " - r"as there are no suitable pixel data handlers available." + r"as there are no pixel data handlers available." ) with pytest.raises(NotImplementedError, match=msg): self.emri_jpeg_ls_lossless.pixel_array diff --git a/pydicom/tests/test_dataset.py b/pydicom/tests/test_dataset.py index ae2c0ea4c..b2e200bf3 100644 --- a/pydicom/tests/test_dataset.py +++ b/pydicom/tests/test_dataset.py @@ -1018,16 +1018,29 @@ class DatasetTests(unittest.TestCase): """Test that we try to get new pixel data if the id has changed.""" fpath = get_testdata_files("CT_small.dcm")[0] ds = dcmread(fpath) + ds.file_meta.TransferSyntaxUID = '1.2.3.4' ds._pixel_id = 1234 assert ds._pixel_id != id(ds.PixelData) ds._pixel_array = 'Test Value' # If _pixel_id doesn't match then attempt to get new pixel data - orig_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [] + orig_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [] with pytest.raises(NotImplementedError): ds.convert_pixel_data() - pydicom.config.image_handlers = orig_handlers + pydicom.config.pixel_data_handlers = orig_handlers + + def test_pixel_array_unknown_syntax(self): + """Test that pixel_array for an unknown syntax raises exception.""" + ds = dcmread(get_testdata_files("CT_small.dcm")[0]) + ds.file_meta.TransferSyntaxUID = '1.2.3.4' + msg = ( + r"Unable to decode pixel data with a transfer syntax UID of " + r"'1.2.3.4' \(1.2.3.4\) as there are no pixel data handlers " + r"available that support it" + ) + with pytest.raises(NotImplementedError, match=msg): + ds.pixel_array def test_formatted_lines(self): """Test Dataset.formatted_lines""" diff --git a/pydicom/tests/test_gdcm_pixel_data.py b/pydicom/tests/test_gdcm_pixel_data.py index fbacd869c..5c87f7e52 100644 --- a/pydicom/tests/test_gdcm_pixel_data.py +++ b/pydicom/tests/test_gdcm_pixel_data.py @@ -28,19 +28,20 @@ try: except AttributeError: have_pytest_param = False -gdcm_handler = None -have_gdcm_handler = True try: import pydicom.pixel_data_handlers.gdcm_handler as gdcm_handler + HAVE_GDCM = gdcm_handler.HAVE_GDCM except ImportError as e: - have_gdcm_handler = False -numpy_handler = None -have_numpy_handler = True + HAVE_GDCM = False + gdcm_handler = None + try: import pydicom.pixel_data_handlers.numpy_handler as numpy_handler + HAVE_NP = numpy_handler.HAVE_NP except ImportError: - have_numpy_handler = False -test_gdcm_decoder = have_gdcm_handler + HAVE_NP = False + numpy_handler = None + empty_number_tags_name = get_testdata_files( "reportsi_with_empty_number_tags.dcm")[0] @@ -117,11 +118,11 @@ class GDCM_JPEG_LS_Tests_no_gdcm(unittest.TestCase): self.mr_small = dcmread(mr_name) self.emri_jpeg_ls_lossless = dcmread(emri_jpeg_ls_lossless) self.emri_small = dcmread(emri_name) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [None] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers os.remove(self.unicode_filename) def test_JPEG_LS_PixelArray(self): @@ -141,11 +142,11 @@ class GDCM_JPEG2000Tests_no_gdcm(unittest.TestCase): self.emri_jpeg_2k_lossless = dcmread(emri_jpeg_2k_lossless) self.emri_small = dcmread(emri_name) self.sc_rgb_jpeg2k_gdcm_KY = dcmread(sc_rgb_jpeg2k_gdcm_KY) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [None] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_JPEG2000(self): """JPEG2000: Returns correct values for sample data elements""" @@ -184,11 +185,11 @@ class GDCM_JPEGlossyTests_no_gdcm(unittest.TestCase): def setUp(self): self.jpeg_lossy = dcmread(jpeg_lossy_name) self.color_3d_jpeg = dcmread(color_3d_jpeg_baseline) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [None] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_JPEGlossy(self): """JPEG-lossy: Returns correct values for sample data elements""" @@ -212,11 +213,11 @@ class GDCM_JPEGlossyTests_no_gdcm(unittest.TestCase): class GDCM_JPEGlosslessTests_no_gdcm(unittest.TestCase): def setUp(self): self.jpeg_lossless = dcmread(jpeg_lossless_name) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [None] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def testJPEGlossless(self): """JPEGlossless: Returns correct values for sample data elements""" @@ -237,7 +238,7 @@ class GDCM_JPEGlosslessTests_no_gdcm(unittest.TestCase): _ = self.jpeg_lossless.pixel_array [email protected](not test_gdcm_decoder, reason=gdcm_missing_message) [email protected](not HAVE_GDCM, reason=gdcm_missing_message) class GDCM_JPEG_LS_Tests_with_gdcm(unittest.TestCase): def setUp(self): if compat.in_py2: @@ -254,11 +255,11 @@ class GDCM_JPEG_LS_Tests_with_gdcm(unittest.TestCase): self.mr_small = dcmread(mr_name) self.emri_jpeg_ls_lossless = dcmread(emri_jpeg_ls_lossless) self.emri_small = dcmread(emri_name) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [numpy_handler, gdcm_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler, gdcm_handler] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers os.remove(self.unicode_filename) def test_JPEG_LS_PixelArray(self): @@ -280,7 +281,7 @@ class GDCM_JPEG_LS_Tests_with_gdcm(unittest.TestCase): "(mean == {1})".format(b.mean(), a.mean())) [email protected](not test_gdcm_decoder, reason=gdcm_missing_message) [email protected](not HAVE_GDCM, reason=gdcm_missing_message) class GDCM_JPEG2000Tests_with_gdcm(unittest.TestCase): def setUp(self): self.jpeg_2k = dcmread(jpeg2000_name) @@ -291,11 +292,11 @@ class GDCM_JPEG2000Tests_with_gdcm(unittest.TestCase): self.sc_rgb_jpeg2k_gdcm_KY = dcmread(sc_rgb_jpeg2k_gdcm_KY) self.ground_truth_sc_rgb_jpeg2k_gdcm_KY_gdcm = dcmread( ground_truth_sc_rgb_jpeg2k_gdcm_KY_gdcm) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [numpy_handler, gdcm_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler, gdcm_handler] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_JPEG2000(self): """JPEG2000: Returns correct values for sample data elements""" @@ -347,17 +348,17 @@ class GDCM_JPEG2000Tests_with_gdcm(unittest.TestCase): "(mean == {1})".format(b.mean(), a.mean())) [email protected](not test_gdcm_decoder, reason=gdcm_missing_message) [email protected](not HAVE_GDCM, reason=gdcm_missing_message) class GDCM_JPEGlossyTests_with_gdcm(unittest.TestCase): def setUp(self): self.jpeg_lossy = dcmread(jpeg_lossy_name) self.color_3d_jpeg = dcmread(color_3d_jpeg_baseline) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [numpy_handler, gdcm_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler, gdcm_handler] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def testJPEGlossless_odd_data_size(self): test_file = get_testdata_files('SC_rgb_small_odd_jpeg.dcm')[0] @@ -400,10 +401,10 @@ class GDCM_JPEGlossyTests_with_gdcm(unittest.TestCase): @pytest.fixture(scope="module") def test_with_gdcm(): - original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [numpy_handler, gdcm_handler] + original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler, gdcm_handler] yield original_handlers - pydicom.config.image_handlers = original_handlers + pydicom.config.pixel_data_handlers = original_handlers if have_pytest_param: @@ -552,7 +553,7 @@ else: @pytest.mark.skipif( - not test_gdcm_decoder, + not HAVE_GDCM, reason=gdcm_missing_message) @pytest.mark.parametrize( "image,PhotometricInterpretation,results,convert_yuv_to_rgb", @@ -583,15 +584,15 @@ def test_PI_RGB(test_with_gdcm, assert t.PhotometricInterpretation == PhotometricInterpretation [email protected](not test_gdcm_decoder, reason=gdcm_missing_message) [email protected](not HAVE_GDCM, reason=gdcm_missing_message) class GDCM_JPEGlosslessTests_with_gdcm(unittest.TestCase): def setUp(self): self.jpeg_lossless = dcmread(jpeg_lossless_name) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [numpy_handler, gdcm_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler, gdcm_handler] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def testJPEGlossless(self): """JPEGlossless: Returns correct values for sample data elements""" diff --git a/pydicom/tests/test_jpeg_ls_pixel_data.py b/pydicom/tests/test_jpeg_ls_pixel_data.py index f23d1eae9..512acf1a5 100644 --- a/pydicom/tests/test_jpeg_ls_pixel_data.py +++ b/pydicom/tests/test_jpeg_ls_pixel_data.py @@ -10,18 +10,20 @@ from pydicom.data import get_testdata_files jpeg_ls_missing_message = ("jpeg_ls is not available " "in this test environment") jpeg_ls_present_message = "jpeg_ls is being tested" -jpeg_ls_handler = None -have_jpeg_ls_handler = True -numpy_handler = None -have_numpy_handler = True + try: import pydicom.pixel_data_handlers.numpy_handler as numpy_handler + have_numpy_handler = numpy_handler.HAVE_NP except ImportError: have_numpy_handler = False + numpy_handler = None + try: import pydicom.pixel_data_handlers.jpeg_ls_handler as jpeg_ls_handler + have_jpeg_ls_handler = jpeg_ls_handler.HAVE_JPEGLS except ImportError: have_jpeg_ls_handler = False + jpeg_ls_handler = None test_jpeg_ls_decoder = have_numpy_handler and have_jpeg_ls_handler @@ -73,14 +75,14 @@ class jpeg_ls_JPEG_LS_Tests_no_jpeg_ls(unittest.TestCase): self.mr_small = dcmread(mr_name) self.emri_jpeg_ls_lossless = dcmread(emri_jpeg_ls_lossless) self.emri_small = dcmread(emri_name) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [None, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_JPEG_LS_PixelArray(self): - with self.assertRaises((NotImplementedError, )): + with self.assertRaises((RuntimeError, NotImplementedError)): _ = self.jpeg_ls_lossless.pixel_array @@ -91,11 +93,11 @@ class jpeg_ls_JPEG2000Tests_no_jpeg_ls(unittest.TestCase): self.mr_small = dcmread(mr_name) self.emri_jpeg_2k_lossless = dcmread(emri_jpeg_2k_lossless) self.emri_small = dcmread(emri_name) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [None, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_JPEG2000PixelArray(self): """JPEG2000: Now works""" @@ -113,11 +115,11 @@ class jpeg_ls_JPEGlossyTests_no_jpeg_ls(unittest.TestCase): def setUp(self): self.jpeg_lossy = dcmread(jpeg_lossy_name) self.color_3d_jpeg = dcmread(color_3d_jpeg_baseline) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [None, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def testJPEGlossy(self): """JPEG-lossy: Returns correct values for sample data elements""" @@ -142,11 +144,11 @@ class jpeg_ls_JPEGlossyTests_no_jpeg_ls(unittest.TestCase): class jpeg_ls_JPEGlosslessTests_no_jpeg_ls(unittest.TestCase): def setUp(self): self.jpeg_lossless = dcmread(jpeg_lossless_name) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [None, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def testJPEGlossless(self): """JPEGlossless: Returns correct values for sample data elements""" @@ -176,11 +178,11 @@ class jpeg_ls_JPEG_LS_Tests_with_jpeg_ls(unittest.TestCase): self.mr_small = dcmread(mr_name) self.emri_jpeg_ls_lossless = dcmread(emri_jpeg_ls_lossless) self.emri_small = dcmread(emri_name) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [jpeg_ls_handler, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [jpeg_ls_handler, numpy_handler] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_raises_if_endianess_not_set(self): self.jpeg_ls_lossless.is_little_endian = None @@ -216,11 +218,11 @@ class jpeg_ls_JPEG2000Tests_with_jpeg_ls(unittest.TestCase): self.mr_small = dcmread(mr_name) self.emri_jpeg_2k_lossless = dcmread(emri_jpeg_2k_lossless) self.emri_small = dcmread(emri_name) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [jpeg_ls_handler, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [jpeg_ls_handler, numpy_handler] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_JPEG2000PixelArray(self): with self.assertRaises((NotImplementedError, )): @@ -239,11 +241,11 @@ class jpeg_ls_JPEGlossyTests_with_jpeg_ls(unittest.TestCase): def setUp(self): self.jpeg_lossy = dcmread(jpeg_lossy_name) self.color_3d_jpeg = dcmread(color_3d_jpeg_baseline) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [jpeg_ls_handler, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [jpeg_ls_handler, numpy_handler] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def testJPEGlossy(self): """JPEG-lossy: Returns correct values for sample data elements""" @@ -270,11 +272,11 @@ class jpeg_ls_JPEGlossyTests_with_jpeg_ls(unittest.TestCase): class jpeg_ls_JPEGlosslessTests_with_jpeg_ls(unittest.TestCase): def setUp(self): self.jpeg_lossless = dcmread(jpeg_lossless_name) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [jpeg_ls_handler, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [jpeg_ls_handler, numpy_handler] def tearDown(self): - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def testJPEGlossless(self): """JPEGlossless: Returns correct values for sample data elements""" diff --git a/pydicom/tests/test_numpy_pixel_data.py b/pydicom/tests/test_numpy_pixel_data.py index 1746283f5..09bd11813 100644 --- a/pydicom/tests/test_numpy_pixel_data.py +++ b/pydicom/tests/test_numpy_pixel_data.py @@ -3,11 +3,12 @@ There are the following possibilities: -* numpy is not available and the numpy handler is not available +* numpy is not available and + * the numpy handler is not available + * the numpy handler is available * numpy is available and - - * The numpy handler is not available - * The numpy handler is available + * the numpy handler is not available + * the numpy handler is available **Supported transfer syntaxes** @@ -45,6 +46,11 @@ from pydicom.uid import ( try: import numpy as np + HAVE_NP = True +except ImportError: + HAVE_NP = False + +try: from pydicom.pixel_data_handlers import numpy_handler as NP_HANDLER from pydicom.pixel_data_handlers.numpy_handler import ( get_pixeldata, @@ -53,9 +59,7 @@ try: get_expected_length, pixel_dtype, ) - HAVE_NP = True except ImportError: - HAVE_NP = False NP_HANDLER = None @@ -174,17 +178,17 @@ class TestNoNumpy_NoNumpyHandler(object): """Tests for handling datasets without numpy and the handler.""" def setup(self): """Setup the environment.""" - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [] def teardown(self): """Restore the environment.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_environment(self): """Check that the testing environment is as expected.""" assert not HAVE_NP - assert NP_HANDLER is None + assert NP_HANDLER is not None def test_can_access_supported_dataset(self): """Test that we can read and access elements in dataset.""" @@ -225,18 +229,88 @@ class TestNoNumpy_NoNumpyHandler(object): ds.pixel_array +# Numpy unavailable and the numpy handler is available [email protected](HAVE_NP, reason='Numpy is available') +class TestNoNumpy_NumpyHandler(object): + """Tests for handling datasets without numpy and the handler.""" + def setup(self): + """Setup the environment.""" + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [NP_HANDLER] + + def teardown(self): + """Restore the environment.""" + pydicom.config.pixel_data_handlers = self.original_handlers + + def test_environment(self): + """Check that the testing environment is as expected.""" + assert not HAVE_NP + assert NP_HANDLER is not None + + def test_can_access_supported_dataset(self): + """Test that we can read and access elements in dataset.""" + # Explicit little + ds = dcmread(EXPL_16_1_1F) + assert 'CompressedSamples^MR1' == ds.PatientName + assert 8192 == len(ds.PixelData) + + # Implicit little + ds = dcmread(IMPL_16_1_1F) + assert 'CompressedSamples^MR1' == ds.PatientName + assert 8192 == len(ds.PixelData) + + # Deflated little + ds = dcmread(DEFL_8_1_1F) + assert '^^^^' == ds.PatientName + assert 262144 == len(ds.PixelData) + + # Explicit big + ds = dcmread(EXPB_16_1_1F) + assert 'CompressedSamples^MR1' == ds.PatientName + assert 8192 == len(ds.PixelData) + + @pytest.mark.parametrize("fpath,data", REFERENCE_DATA_UNSUPPORTED) + def test_can_access_unsupported_dataset(self, fpath, data): + """Test can read and access elements in unsupported datasets.""" + ds = dcmread(fpath) + assert data[0] == ds.file_meta.TransferSyntaxUID + assert data[1] == ds.PatientName + + def test_unsupported_pixel_array_raises(self): + """Test pixel_array raises exception for unsupported syntaxes.""" + ds = dcmread(EXPL_16_1_1F) + for uid in UNSUPPORTED_SYNTAXES: + ds.file_meta.TransferSyntaxUID = uid + with pytest.raises(NotImplementedError, + match="UID of '{}'".format(uid)): + ds.pixel_array + + def test_supported_pixel_array_raises(self): + """Test pixel_array raises exception for supported syntaxes.""" + ds = dcmread(EXPL_16_1_1F) + for uid in SUPPORTED_SYNTAXES: + ds.file_meta.TransferSyntaxUID = uid + exc_msg = ( + r"The following handlers are available to decode the pixel " + r"data however they are missing required dependencies: " + r"Numpy \(req. NumPy\)" + ) + with pytest.raises(RuntimeError, match=exc_msg): + ds.pixel_array + + # Numpy is available, the numpy handler is unavailable @pytest.mark.skipif(not HAVE_NP, reason='Numpy is unavailable') class TestNumpy_NoNumpyHandler(object): """Tests for handling datasets without the handler.""" def setup(self): """Setup the environment.""" - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [] def teardown(self): """Restore the environment.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_environment(self): """Check that the testing environment is as expected.""" @@ -278,8 +352,7 @@ class TestNumpy_NoNumpyHandler(object): ds = dcmread(EXPL_16_1_1F) for uid in ALL_TRANSFER_SYNTAXES: ds.file_meta.TransferSyntaxUID = uid - with pytest.raises(NotImplementedError, - match="UID of '{}'".format(uid)): + with pytest.raises((NotImplementedError, RuntimeError)): ds.pixel_array @@ -328,12 +401,12 @@ class TestNumpy_NumpyHandler(object): """Tests for handling Pixel Data with the handler.""" def setup(self): """Setup the test datasets and the environment.""" - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [NP_HANDLER] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [NP_HANDLER] def teardown(self): """Restore the environment.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_environment(self): """Check that the testing environment is as expected.""" @@ -346,8 +419,7 @@ class TestNumpy_NumpyHandler(object): for uid in UNSUPPORTED_SYNTAXES: ds.file_meta.TransferSyntaxUID = uid - with pytest.raises(NotImplementedError, - match="UID of '{0}' ".format(uid)): + with pytest.raises((NotImplementedError, RuntimeError)): ds.pixel_array def test_dataset_pixel_array_handler_needs_convert(self): diff --git a/pydicom/tests/test_pillow_pixel_data.py b/pydicom/tests/test_pillow_pixel_data.py index 8a85e6341..25e39cff0 100644 --- a/pydicom/tests/test_pillow_pixel_data.py +++ b/pydicom/tests/test_pillow_pixel_data.py @@ -15,32 +15,30 @@ have_pytest_param = hasattr(pytest, 'param') try: from pydicom.pixel_data_handlers import numpy_handler - have_numpy_handler = True + HAVE_NP = numpy_handler.HAVE_NP except ImportError: - have_numpy_handler = False + HAVE_NP = False numpy_handler = None try: from pydicom.pixel_data_handlers import pillow_handler - have_pillow_handler = True - have_pillow_jpeg_plugin = pillow_handler.have_pillow_jpeg_plugin - have_pillow_jpeg2000_plugin = pillow_handler.have_pillow_jpeg2000_plugin + HAVE_PIL = pillow_handler.HAVE_PIL + HAVE_JPEG = pillow_handler.HAVE_JPEG + HAVE_JPEG2K = pillow_handler.HAVE_JPEG2K import numpy as np except ImportError: pillow_handler = None - have_pillow_handler = False - have_pillow_jpeg_plugin = False - have_pillow_jpeg2000_plugin = False + HAVE_PIL = False + HAVE_JPEG = False + HAVE_JPEG2K = False pillow_missing_message = ("pillow is not available " "in this test environment") -test_pillow_decoder = have_numpy_handler and have_pillow_handler -test_pillow_jpeg_decoder = (test_pillow_decoder and - have_pillow_jpeg_plugin) -test_pillow_jpeg2000_decoder = (test_pillow_decoder and - have_pillow_jpeg2000_plugin) +TEST_PIL = HAVE_NP and HAVE_PIL +TEST_JPEG = TEST_PIL and HAVE_JPEG +TEST_JPEG2K = TEST_PIL and HAVE_JPEG2K empty_number_tags_name = get_testdata_files( "reportsi_with_empty_number_tags.dcm")[0] @@ -129,12 +127,12 @@ class Test_JPEGLS_no_pillow(object): """Setup the test datasets.""" self.jpeg_ls_lossless = dcmread(jpeg_ls_lossless_name) self.emri_jpeg_ls_lossless = dcmread(emri_jpeg_ls_lossless) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [None, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler] def teardown(self): """Reset the pixel data handlers.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_JPEG_LS_PixelArray(self): """Test decoding JPEG LS with only numpy fails.""" @@ -155,12 +153,12 @@ class Test_JPEG2000Tests_no_pillow(object): self.jpeg_2k_lossless = dcmread(jpeg2000_lossless_name) self.emri_jpeg_2k_lossless = dcmread(emri_jpeg_2k_lossless) self.sc_rgb_jpeg2k_gdcm_KY = dcmread(sc_rgb_jpeg2k_gdcm_KY) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [None, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler] def teardown(self): """Reset the pixel data handlers.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def testJPEG2000(self): """Test reading element values works OK without Pillow.""" @@ -193,12 +191,12 @@ class Test_JPEGlossyTests_no_pillow(object): """Setup the test datasets.""" self.jpeg_lossy = dcmread(jpeg_lossy_name) self.color_3d_jpeg = dcmread(color_3d_jpeg_baseline) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [None, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler] def teardown(self): """Reset the pixel data handlers.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def testJPEGlossy(self): """Test reading element values works OK without Pillow.""" @@ -221,12 +219,12 @@ class Test_JPEGlosslessTests_no_pillow(object): def setup(self): """Setup the test datasets.""" self.jpeg_lossless = dcmread(jpeg_lossless_name) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [None, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [numpy_handler] def teardown(self): """Reset the pixel data handlers.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def testJPEGlossless(self): """Test reading element values works OK without Pillow.""" @@ -241,7 +239,7 @@ class Test_JPEGlosslessTests_no_pillow(object): @pytest.mark.skipif( - not test_pillow_decoder, + not TEST_PIL, reason=pillow_missing_message) class Test_JPEG_LS_with_pillow(object): """Tests for decoding JPEG LS if pillow pixel handler is available.""" @@ -249,12 +247,12 @@ class Test_JPEG_LS_with_pillow(object): """Setup the test datasets.""" self.jpeg_ls_lossless = dcmread(jpeg_ls_lossless_name) self.emri_jpeg_ls_lossless = dcmread(emri_jpeg_ls_lossless) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [pillow_handler, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [pillow_handler, numpy_handler] def teardown(self): """Reset the pixel data handlers.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_JPEG_LS_PixelArray(self): """Test decoding JPEG LS with pillow handler fails.""" @@ -268,7 +266,7 @@ class Test_JPEG_LS_with_pillow(object): @pytest.mark.skipif( - not test_pillow_jpeg2000_decoder, + not TEST_JPEG2K, reason=pillow_missing_message) class Test_JPEG2000Tests_with_pillow(object): """Test decoding JPEG2K if pillow JPEG2K plugin is available.""" @@ -282,12 +280,12 @@ class Test_JPEG2000Tests_with_pillow(object): self.sc_rgb_jpeg2k_gdcm_KY = dcmread(sc_rgb_jpeg2k_gdcm_KY) self.ground_truth_sc_rgb_jpeg2k_gdcm_KY_gdcm = dcmread( ground_truth_sc_rgb_jpeg2k_gdcm_KY_gdcm) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [pillow_handler, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [pillow_handler, numpy_handler] def teardown(self): """Reset the pixel data handlers.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_raises_if_endianess_not_set(self): self.jpeg_2k_lossless.is_little_endian = None @@ -322,7 +320,7 @@ class Test_JPEG2000Tests_with_pillow(object): @pytest.mark.skipif( - not test_pillow_jpeg_decoder, + not TEST_JPEG, reason=pillow_missing_message) class Test_JPEGlossyTests_with_pillow(object): """Test decoding JPEG if pillow JPEG plugin is available.""" @@ -330,12 +328,12 @@ class Test_JPEGlossyTests_with_pillow(object): """Setup the test datasets.""" self.jpeg_lossy = dcmread(jpeg_lossy_name) self.color_3d_jpeg = dcmread(color_3d_jpeg_baseline) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [pillow_handler, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [pillow_handler, numpy_handler] def teardown(self): """Reset the pixel data handlers.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def testJPEGlossless_odd_data_size(self): """Test decoding JPEG with pillow handler succeeds.""" @@ -370,10 +368,10 @@ class Test_JPEGlossyTests_with_pillow(object): @pytest.fixture(scope="module") def test_with_pillow(): - original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [pillow_handler, numpy_handler] + original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [pillow_handler, numpy_handler] yield original_handlers - pydicom.config.image_handlers = original_handlers + pydicom.config.pixel_data_handlers = original_handlers if have_pytest_param: @@ -539,7 +537,7 @@ else: @pytest.mark.skipif( - not test_pillow_jpeg_decoder, + not TEST_JPEG, reason=pillow_missing_message) @pytest.mark.parametrize( "image,PhotometricInterpretation,results,ground_truth", @@ -579,19 +577,19 @@ def test_PI_RGB(test_with_pillow, @pytest.mark.skipif( - not test_pillow_jpeg_decoder, + not TEST_JPEG, reason=pillow_missing_message) class Test_JPEGlosslessTests_with_pillow(object): """Test decoding JPEG lossless if pillow JPEG plugin is available.""" def setup(self): """Setup the test datasets.""" self.jpeg_lossless = dcmread(jpeg_lossless_name) - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [pillow_handler, numpy_handler] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [pillow_handler, numpy_handler] def teardown(self): """Reset the pixel data handlers.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def testJPEGlossless(self): """Test reading element values works OK with pillow pixel handler.""" @@ -601,5 +599,5 @@ class Test_JPEGlosslessTests_with_pillow(object): def testJPEGlosslessPixelArray(self): """Test decoding JPEG lossless with pillow handler fails.""" - with pytest.raises(NotImplementedError): + with pytest.raises(RuntimeError): self.jpeg_lossless.pixel_array diff --git a/pydicom/tests/test_rle_pixel_data.py b/pydicom/tests/test_rle_pixel_data.py index 095b8ad11..b94f0f430 100644 --- a/pydicom/tests/test_rle_pixel_data.py +++ b/pydicom/tests/test_rle_pixel_data.py @@ -3,9 +3,10 @@ There are the following possibilities: -* numpy is not available and the RLE handler is not available +* numpy is not available and + * the RLE handler is not available + * the RLE handler is available * numpy is available and - * The RLE handler is not available * The RLE handler is available @@ -36,7 +37,7 @@ try: import numpy as np from pydicom.pixel_data_handlers import numpy_handler as NP_HANDLER from pydicom.pixel_data_handlers.util import reshape_pixel_array - HAVE_NP = True + HAVE_NP = NP_HANDLER.HAVE_NP except ImportError: NP_HANDLER = None HAVE_NP = False @@ -49,7 +50,9 @@ try: _rle_decode_segment, _parse_rle_header, ) + HAVE_RLE = RLE_HANDLER.HAVE_RLE except ImportError: + HAVE_RLE = False RLE_HANDLER = None @@ -144,18 +147,18 @@ def _get_pixel_array(fpath): ------- numpy.ndarray """ - if NP_HANDLER is None: + if not HAVE_NP: raise RuntimeError( 'Function only usable if the numpy handler is available' ) - original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [NP_HANDLER] + original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [NP_HANDLER] ds = dcmread(fpath) arr = ds.pixel_array - pydicom.config.image_handlers = original_handlers + pydicom.config.pixel_data_handlers = original_handlers return arr @@ -182,17 +185,18 @@ class TestNoNumpy_NoRLEHandler(object): """Tests for handling datasets without numpy and the handler.""" def setup(self): """Setup the environment.""" - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [] def teardown(self): """Restore the environment.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_environment(self): """Check that the testing environment is as expected.""" assert not HAVE_NP - assert RLE_HANDLER is None + # The RLE handler should still be available + assert RLE_HANDLER is not None def test_can_access_supported_dataset(self): """Test that we can read and access elements in an RLE dataset.""" @@ -220,18 +224,76 @@ class TestNoNumpy_NoRLEHandler(object): ds.pixel_array +# Numpy unavailable and the RLE handler is available [email protected](HAVE_NP, reason='Numpy is available') +class TestNoNumpy_RLEHandler(object): + """Tests for handling datasets without numpy and the handler.""" + def setup(self): + """Setup the environment.""" + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [RLE_HANDLER] + + def teardown(self): + """Restore the environment.""" + pydicom.config.pixel_data_handlers = self.original_handlers + + def test_environment(self): + """Check that the testing environment is as expected.""" + assert not HAVE_NP + # The RLE handler should still be available + assert RLE_HANDLER is not None + + def test_can_access_supported_dataset(self): + """Test that we can read and access elements in an RLE dataset.""" + ds = dcmread(MR_RLE_1F) + assert 'CompressedSamples^MR1' == ds.PatientName + assert 6128 == len(ds.PixelData) + + @pytest.mark.parametrize("fpath,data", REFERENCE_DATA_UNSUPPORTED) + def test_can_access_unsupported_dataset(self, fpath, data): + """Test can read and access elements in unsupported datasets.""" + ds = dcmread(fpath) + assert data[0] == ds.file_meta.TransferSyntaxUID + assert data[1] == ds.PatientName + + def test_unsupported_pixel_array_raises(self): + """Test pixel_array raises exception for unsupported syntaxes.""" + ds = dcmread(MR_EXPL_LITTLE_1F) + for uid in UNSUPPORTED_SYNTAXES: + ds.file_meta.TransferSyntaxUID = uid + exc_msg = ( + r"Unable to decode pixel data with a transfer syntax UID of " + r"'{}'".format(uid) + ) + with pytest.raises(RuntimeError, match=exc_msg): + ds.pixel_array + + def test_supported_pixel_array_raises(self): + """Test pixel_array raises exception for supported syntaxes.""" + ds = dcmread(MR_EXPL_LITTLE_1F) + for uid in SUPPORTED_SYNTAXES: + ds.file_meta.TransferSyntaxUID = uid + exc_msg = ( + r"The following handlers are available to decode the pixel " + r"data however they are missing required dependencies: " + r"RLE Lossless \(req. NumPy\)" + ) + with pytest.raises(RuntimeError, match=exc_msg): + ds.pixel_array + + # Numpy is available, the RLE handler is unavailable @pytest.mark.skipif(not HAVE_NP, reason='Numpy is not available') class TestNumpy_NoRLEHandler(object): """Tests for handling datasets with no handler.""" def setup(self): """Setup the environment.""" - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [] def teardown(self): """Restore the environment.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_environment(self): """Check that the testing environment is as expected.""" @@ -271,12 +333,12 @@ class TestNumpy_RLEHandler(object): """Tests for handling datasets with the handler.""" def setup(self): """Setup the environment.""" - self.original_handlers = pydicom.config.image_handlers - pydicom.config.image_handlers = [RLE_HANDLER] + self.original_handlers = pydicom.config.pixel_data_handlers + pydicom.config.pixel_data_handlers = [RLE_HANDLER] def teardown(self): """Restore the environment.""" - pydicom.config.image_handlers = self.original_handlers + pydicom.config.pixel_data_handlers = self.original_handlers def test_environment(self): """Check that the testing environment is as expected."""
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 8 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "numpy>=1.16.0", "pillow", "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 coverage==6.2 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work numpy==1.19.5 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work Pillow==8.4.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work -e git+https://github.com/pydicom/pydicom.git@1feec586221a54db5d3d832711de14216dd361f0#egg=pydicom pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-cov==4.0.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: pydicom channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==6.2 - numpy==1.19.5 - pillow==8.4.0 - pytest-cov==4.0.0 - tomli==1.2.3 prefix: /opt/conda/envs/pydicom
[ "pydicom/tests/test_JPEG_LS_transfer_syntax.py::Test_JPEG_LS_Lossless_transfer_syntax::test_read_mr_with_numpy", "pydicom/tests/test_JPEG_LS_transfer_syntax.py::Test_JPEG_LS_Lossless_transfer_syntax::test_read_emri_with_numpy", "pydicom/tests/test_JPEG_LS_transfer_syntax.py::Test_JPEG_LS_Lossless_transfer_syntax::test_read_mr_with_pillow", "pydicom/tests/test_JPEG_LS_transfer_syntax.py::Test_JPEG_LS_Lossless_transfer_syntax::test_read_emri_with_pillow", "pydicom/tests/test_JPEG_LS_transfer_syntax.py::Test_JPEG_LS_Lossless_transfer_syntax::test_read_mr_without_any_handler", "pydicom/tests/test_JPEG_LS_transfer_syntax.py::Test_JPEG_LS_Lossless_transfer_syntax::test_read_emri_without_any_handler", "pydicom/tests/test_dataset.py::DatasetTests::testAttributeErrorInProperty", "pydicom/tests/test_dataset.py::DatasetTests::testDeleteDicomAttr", "pydicom/tests/test_dataset.py::DatasetTests::testDeleteDicomAttrWeDontHave", "pydicom/tests/test_dataset.py::DatasetTests::testDeleteDicomCommandGroupLength", "pydicom/tests/test_dataset.py::DatasetTests::testDeleteItemLong", "pydicom/tests/test_dataset.py::DatasetTests::testDeleteItemTuple", "pydicom/tests/test_dataset.py::DatasetTests::testDeleteNonExistingItem", "pydicom/tests/test_dataset.py::DatasetTests::testDeleteOtherAttr", "pydicom/tests/test_dataset.py::DatasetTests::testEqualityInheritance", "pydicom/tests/test_dataset.py::DatasetTests::testEqualityNoSequence", "pydicom/tests/test_dataset.py::DatasetTests::testEqualityNotDataset", "pydicom/tests/test_dataset.py::DatasetTests::testEqualityPrivate", "pydicom/tests/test_dataset.py::DatasetTests::testEqualitySequence", "pydicom/tests/test_dataset.py::DatasetTests::testEqualityUnknown", "pydicom/tests/test_dataset.py::DatasetTests::testGetDefault1", "pydicom/tests/test_dataset.py::DatasetTests::testGetDefault2", "pydicom/tests/test_dataset.py::DatasetTests::testGetDefault3", "pydicom/tests/test_dataset.py::DatasetTests::testGetDefault4", "pydicom/tests/test_dataset.py::DatasetTests::testGetExists1", "pydicom/tests/test_dataset.py::DatasetTests::testGetExists2", "pydicom/tests/test_dataset.py::DatasetTests::testGetExists3", "pydicom/tests/test_dataset.py::DatasetTests::testGetExists4", "pydicom/tests/test_dataset.py::DatasetTests::testGetFromRaw", "pydicom/tests/test_dataset.py::DatasetTests::testHash", "pydicom/tests/test_dataset.py::DatasetTests::testMembership", "pydicom/tests/test_dataset.py::DatasetTests::testSetExistingDataElementByName", "pydicom/tests/test_dataset.py::DatasetTests::testSetNewDataElementByName", "pydicom/tests/test_dataset.py::DatasetTests::testSetNonDicom", "pydicom/tests/test_dataset.py::DatasetTests::testTagExceptionPrint", "pydicom/tests/test_dataset.py::DatasetTests::testTagExceptionWalk", "pydicom/tests/test_dataset.py::DatasetTests::testUpdate", "pydicom/tests/test_dataset.py::DatasetTests::test_NamedMemberUpdated", "pydicom/tests/test_dataset.py::DatasetTests::test__setitem__", "pydicom/tests/test_dataset.py::DatasetTests::test_add_repeater_elem_by_keyword", "pydicom/tests/test_dataset.py::DatasetTests::test_attribute_error_in_property_correct_debug", "pydicom/tests/test_dataset.py::DatasetTests::test_contains", "pydicom/tests/test_dataset.py::DatasetTests::test_data_element", "pydicom/tests/test_dataset.py::DatasetTests::test_delitem_slice", "pydicom/tests/test_dataset.py::DatasetTests::test_dir", "pydicom/tests/test_dataset.py::DatasetTests::test_dir_filter", "pydicom/tests/test_dataset.py::DatasetTests::test_dir_subclass", "pydicom/tests/test_dataset.py::DatasetTests::test_empty_slice", "pydicom/tests/test_dataset.py::DatasetTests::test_equality_elements", "pydicom/tests/test_dataset.py::DatasetTests::test_exit_exception", "pydicom/tests/test_dataset.py::DatasetTests::test_formatted_lines", "pydicom/tests/test_dataset.py::DatasetTests::test_formatted_lines_known_uid", "pydicom/tests/test_dataset.py::DatasetTests::test_get_item_slice", "pydicom/tests/test_dataset.py::DatasetTests::test_get_raises", "pydicom/tests/test_dataset.py::DatasetTests::test_getitem_slice", "pydicom/tests/test_dataset.py::DatasetTests::test_getitem_slice_ffff", "pydicom/tests/test_dataset.py::DatasetTests::test_getitem_slice_raises", "pydicom/tests/test_dataset.py::DatasetTests::test_group_dataset", "pydicom/tests/test_dataset.py::DatasetTests::test_inequality", "pydicom/tests/test_dataset.py::DatasetTests::test_is_original_encoding", "pydicom/tests/test_dataset.py::DatasetTests::test_iterall", "pydicom/tests/test_dataset.py::DatasetTests::test_matching_tags", "pydicom/tests/test_dataset.py::DatasetTests::test_pixel_array_already_have", "pydicom/tests/test_dataset.py::DatasetTests::test_pixel_array_id_changed", "pydicom/tests/test_dataset.py::DatasetTests::test_pixel_array_unknown_syntax", "pydicom/tests/test_dataset.py::DatasetTests::test_property", "pydicom/tests/test_dataset.py::DatasetTests::test_remove_private_tags", "pydicom/tests/test_dataset.py::DatasetTests::test_save_as", "pydicom/tests/test_dataset.py::DatasetTests::test_set_convert_private_elem_from_raw", "pydicom/tests/test_dataset.py::DatasetTests::test_setitem_slice_raises", "pydicom/tests/test_dataset.py::DatasetTests::test_top", "pydicom/tests/test_dataset.py::DatasetTests::test_trait_names", "pydicom/tests/test_dataset.py::DatasetTests::test_walk", "pydicom/tests/test_dataset.py::DatasetTests::test_with", "pydicom/tests/test_dataset.py::DatasetElementsTests::testSequenceAssignment", "pydicom/tests/test_dataset.py::DatasetElementsTests::test_ensure_file_meta", "pydicom/tests/test_dataset.py::DatasetElementsTests::test_fix_meta_info", "pydicom/tests/test_dataset.py::DatasetElementsTests::test_validate_and_correct_file_meta", "pydicom/tests/test_dataset.py::FileDatasetTests::test_creation_with_container", "pydicom/tests/test_dataset.py::FileDatasetTests::test_equality_file_meta", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEG_LS_Tests_no_gdcm::test_JPEG_LS_PixelArray", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEG_LS_Tests_no_gdcm::test_emri_JPEG_LS_PixelArray", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEG2000Tests_no_gdcm::test_JPEG2000", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEG2000Tests_no_gdcm::test_JPEG2000PixelArray", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEG2000Tests_no_gdcm::test_emri_JPEG2000PixelArray", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEG2000Tests_no_gdcm::test_jpeg2000_lossy", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEGlossyTests_no_gdcm::test_JPEGBaselineColor3DPixelArray", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEGlossyTests_no_gdcm::test_JPEGlossy", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEGlossyTests_no_gdcm::test_JPEGlossyPixelArray", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEGlosslessTests_no_gdcm::testJPEGlossless", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEGlosslessTests_no_gdcm::testJPEGlosslessPixelArray", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEG_LS_Tests_no_jpeg_ls::test_JPEG_LS_PixelArray", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEG2000Tests_no_jpeg_ls::test_JPEG2000PixelArray", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEG2000Tests_no_jpeg_ls::test_emri_JPEG2000PixelArray", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEGlossyTests_no_jpeg_ls::testJPEGBaselineColor3DPixelArray", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEGlossyTests_no_jpeg_ls::testJPEGlossy", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEGlossyTests_no_jpeg_ls::testJPEGlossyPixelArray", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEGlosslessTests_no_jpeg_ls::testJPEGlossless", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEGlosslessTests_no_jpeg_ls::testJPEGlosslessPixelArray", "pydicom/tests/test_numpy_pixel_data.py::test_unsupported_syntaxes", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_environment", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_supported_dataset", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_dcmtk.dcm-data0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG-lossy.dcm-data1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_gdcm.dcm-data2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/MR_small_jpeg_ls_lossless.dcm-data3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/emri_small_jpeg_2k_lossless.dcm-data4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG2000.dcm-data5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/MR_small_RLE.dcm-data6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_pixel_array_raises", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_environment", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_unsupported_syntax_raises", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_dataset_pixel_array_handler_needs_convert", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_dcmtk.dcm-data0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG-lossy.dcm-data1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_gdcm.dcm-data2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/MR_small_jpeg_ls_lossless.dcm-data3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/emri_small_jpeg_2k_lossless.dcm-data4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG2000.dcm-data5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/MR_small_RLE.dcm-data6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_pixel_array_8bit_un_signed", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_pixel_array_16bit_un_signed", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_pixel_array_32bit_un_signed", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_8bit_1sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_8bit_1sample_2frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_8bit_3sample_1frame_odd_size", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_8bit_3sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_8bit_3sample_2frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/liver_1frame.dcm-data0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/liver.dcm-data1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/OBXXXX1A.dcm-data2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/SC_rgb_small_odd.dcm-data3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/OBXXXX1A_2frame.dcm-data4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/SC_rgb.dcm-data5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/SC_rgb_2frame.dcm-data6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/MR_small.dcm-data7]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/emri_small.dcm-data8]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/SC_rgb_16bit_2frame.dcm-data9]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/rtdose_1frame.dcm-data10]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/rtdose.dcm-data11]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/SC_rgb_32bit_2frame.dcm-data12]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_1bit_1sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_1bit_1sample_3frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_16bit_1sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_16bit_1sample_10frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_16bit_3sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_16bit_3sample_2frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_32bit_1sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_32bit_1sample_15frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_32bit_3sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_32bit_3sample_2frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/liver_1frame.dcm-/pydicom/pydicom/data/test_files/liver_expb_1frame.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/liver.dcm-/pydicom/pydicom/data/test_files/liver_expb.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/OBXXXX1A.dcm-/pydicom/pydicom/data/test_files/OBXXXX1A_expb.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/OBXXXX1A_2frame.dcm-/pydicom/pydicom/data/test_files/OBXXXX1A_expb_2frame.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/SC_rgb.dcm-/pydicom/pydicom/data/test_files/SC_rgb_expb.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/SC_rgb_2frame.dcm-/pydicom/pydicom/data/test_files/SC_rgb_expb_2frame.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/MR_small.dcm-/pydicom/pydicom/data/test_files/MR_small_expb.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/emri_small.dcm-/pydicom/pydicom/data/test_files/emri_small_big_endian.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/SC_rgb_16bit_2frame.dcm-/pydicom/pydicom/data/test_files/SC_rgb_expb_16bit_2frame.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/rtdose_1frame.dcm-/pydicom/pydicom/data/test_files/rtdose_expb_1frame.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/rtdose.dcm-/pydicom/pydicom/data/test_files/rtdose_expb.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/SC_rgb_32bit_2frame.dcm-/pydicom/pydicom/data/test_files/SC_rgb_expb_32bit_2frame.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_endianness_not_set", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetPixelData::test_no_pixel_data_raises", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetPixelData::test_unknown_pixel_representation_raises", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetPixelData::test_unsupported_syntaxes_raises", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetPixelData::test_change_photometric_interpretation", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[-output0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x00-output1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x01-output2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x02-output3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x04-output4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x08-output5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x10-output6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[@-output8]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x80-output9]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\xaa-output10]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\xf0-output11]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x0f-output12]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\xff-output13]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x00\\x00-output14]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x00\\x01-output15]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x00\\x80-output16]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x00\\xff-output17]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x01\\x80-output18]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x80\\x80-output19]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\xff\\x80-output20]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[-input0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x00-input1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x01-input2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x02-input3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x04-input4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x08-input5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x10-input6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[@-input8]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x80-input9]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\xaa-input10]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\xf0-input11]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x0f-input12]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\xff-input13]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x00\\x00-input14]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x00\\x01-input15]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x00\\x80-input16]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x00\\xff-input17]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x01\\x80-input18]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x80\\x80-input19]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\xff\\x80-input20]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_non_binary_input", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_non_array_input", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00@-input0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00\\x10-input2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00\\x08-input3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00\\x04-input4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00\\x02-input5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00\\x01-input6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x80-input7]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[@-input8]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x10-input10]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x08-input11]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x04-input12]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x02-input13]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x01-input14]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[-input15]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_functional", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape0-1-length0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape1-1-length1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape2-1-length2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape3-1-length3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape4-1-length4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape5-1-length5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape6-1-length6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape7-1-length7]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape8-1-length8]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape9-8-length9]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape10-8-length10]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape11-8-length11]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape12-8-length12]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape13-8-length13]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape14-8-length14]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape15-16-length15]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape16-16-length16]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape17-16-length17]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape18-16-length18]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape19-16-length19]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape20-32-length20]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape21-32-length21]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape22-32-length22]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape23-32-length23]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape24-32-length24]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape25-1-length25]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape26-1-length26]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape27-1-length27]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape28-1-length28]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape29-1-length29]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape30-1-length30]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape31-1-length31]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape32-1-length32]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape33-1-length33]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape34-8-length34]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape35-8-length35]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape36-8-length36]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape37-8-length37]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape38-8-length38]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape39-8-length39]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape40-16-length40]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape41-16-length41]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape42-16-length42]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape43-32-length43]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape44-32-length44]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape45-32-length45]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape46-1-length46]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape47-1-length47]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape48-1-length48]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape49-1-length49]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape50-1-length50]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape51-1-length51]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape52-1-length52]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape53-1-length53]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape54-1-length54]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape55-8-length55]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape56-8-length56]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape57-8-length57]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape58-16-length58]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape59-16-length59]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape60-16-length60]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape61-32-length61]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape62-32-length62]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape63-32-length63]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape0-1-length0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape1-1-length1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape2-1-length2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape3-1-length3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape4-1-length4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape5-1-length5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape6-1-length6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape7-1-length7]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape8-1-length8]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape9-8-length9]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape10-8-length10]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape11-8-length11]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape12-8-length12]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape13-8-length13]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape14-8-length14]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape15-16-length15]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape16-16-length16]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape17-16-length17]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape18-16-length18]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape19-16-length19]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape20-32-length20]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape21-32-length21]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape22-32-length22]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape23-32-length23]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape24-32-length24]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape25-1-length25]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape26-1-length26]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape27-1-length27]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape28-1-length28]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape29-1-length29]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape30-1-length30]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape31-1-length31]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape32-1-length32]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape33-1-length33]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape34-8-length34]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape35-8-length35]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape36-8-length36]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape37-8-length37]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape38-8-length38]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape39-8-length39]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape40-16-length40]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape41-16-length41]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape42-16-length42]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape43-32-length43]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape44-32-length44]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape45-32-length45]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape46-1-length46]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape47-1-length47]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape48-1-length48]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape49-1-length49]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape50-1-length50]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape51-1-length51]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape52-1-length52]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape53-1-length53]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape54-1-length54]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape55-8-length55]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape56-8-length56]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape57-8-length57]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape58-16-length58]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape59-16-length59]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape60-16-length60]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape61-32-length61]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape62-32-length62]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape63-32-length63]", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGLS_no_pillow::test_JPEG_LS_PixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGLS_no_pillow::test_emri_JPEG_LS_PixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_no_pillow::testJPEG2000", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_no_pillow::testJPEG2000PixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_no_pillow::test_emri_JPEG2000PixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_no_pillow::test_jpeg2000_lossy", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlossyTests_no_pillow::testJPEGlossy", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlossyTests_no_pillow::testJPEGlossyPixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlossyTests_no_pillow::testJPEGBaselineColor3DPixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlosslessTests_no_pillow::testJPEGlossless", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlosslessTests_no_pillow::testJPEGlosslessPixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG_LS_with_pillow::test_JPEG_LS_PixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG_LS_with_pillow::test_emri_JPEG_LS_PixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_with_pillow::test_raises_if_endianess_not_set", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_with_pillow::testJPEG2000", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_with_pillow::testJPEG2000PixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_with_pillow::test_emri_JPEG2000PixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_with_pillow::test_jpeg2000_lossy", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlossyTests_with_pillow::testJPEGlossless_odd_data_size", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlossyTests_with_pillow::testJPEGlossy", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlossyTests_with_pillow::testJPEGlossyPixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlossyTests_with_pillow::testJPEGBaselineColor3DPixelArray", "pydicom/tests/test_pillow_pixel_data.py::test_PI_RGB[JPEG_RGB_RGB]", "pydicom/tests/test_pillow_pixel_data.py::test_PI_RGB[JPEG_RGB_422_AS_YBR_FULL]", "pydicom/tests/test_pillow_pixel_data.py::test_PI_RGB[JPEG_RGB_422_AS_YBR_FULL_422]", "pydicom/tests/test_pillow_pixel_data.py::test_PI_RGB[JPEG_RGB_444_AS_YBR_FULL]", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlosslessTests_with_pillow::testJPEGlossless", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlosslessTests_with_pillow::testJPEGlosslessPixelArray", "pydicom/tests/test_rle_pixel_data.py::test_unsupported_syntaxes", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_environment", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_supported_dataset", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/rtdose_1frame.dcm-data0]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb.dcm-data1]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/image_dfl.dcm-data2]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_expb_2frame.dcm-data3]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_dcmtk.dcm-data4]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG-lossy.dcm-data5]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_gdcm.dcm-data6]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/MR_small_jpeg_ls_lossless.dcm-data7]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/emri_small_jpeg_2k_lossless.dcm-data8]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG2000.dcm-data9]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_pixel_array_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_environment", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_unsupported_syntax_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/rtdose_1frame.dcm-data0]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb.dcm-data1]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/image_dfl.dcm-data2]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_expb_2frame.dcm-data3]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_dcmtk.dcm-data4]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG-lossy.dcm-data5]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_gdcm.dcm-data6]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/MR_small_jpeg_ls_lossless.dcm-data7]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/emri_small_jpeg_2k_lossless.dcm-data8]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG2000.dcm-data9]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_signed", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_1bit_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_8bit_1sample_1f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_8bit_1sample_2f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_8bit_3sample_1f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_8bit_3sample_2f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_16bit_1sample_1f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_16bit_1sample_10f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_16bit_3sample_1f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_16bit_3sample_2f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_32bit_1sample_1f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_32bit_1sample_15f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_32bit_3sample_1f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_32bit_3sample_2f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_GetPixelData::test_no_pixel_data_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_GetPixelData::test_unknown_pixel_representation_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_GetPixelData::test_unsupported_syntaxes_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_GetPixelData::test_change_photometric_interpretation", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_GetPixelData::test_little_endian_segment_order", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_invalid_header_length", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_invalid_nr_segments_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_parse_header[0-offsets0]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_parse_header[1-offsets1]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_parse_header[2-offsets2]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_parse_header[8-offsets3]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_parse_header[14-offsets4]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_parse_header[15-offsets5]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_unsupported_bits_allocated_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x00\\x00\\x00\\x00-1-8]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x02\\x00\\x00\\x00-1-8]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x02\\x00\\x00\\x00-3-8]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x04\\x00\\x00\\x00-3-8]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x01\\x00\\x00\\x00-1-16]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x03\\x00\\x00\\x00-1-16]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x05\\x00\\x00\\x00-3-16]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x07\\x00\\x00\\x00-3-16]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x03\\x00\\x00\\x00-1-32]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x05\\x00\\x00\\x00-1-32]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x0b\\x00\\x00\\x00-3-32]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\r\\x00\\x00\\x00-3-32]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x07\\x00\\x00\\x00-1-64]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\t\\x00\\x00\\x00-1-64]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_frame_data_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_8bit_1sample", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_8bit_3sample", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_16bit_1sample", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_16bit_3sample", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_32bit_1sample", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_32bit_3sample", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeSegment::test_noop", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeSegment::test_literal", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeSegment::test_copy" ]
[ "pydicom/tests/test_dataset.py::DatasetTests::test_get_item" ]
[]
[]
MIT License
3,047
[ "pydicom/pixel_data_handlers/numpy_handler.py", "pydicom/config.py", "pydicom/pixel_data_handlers/rle_handler.py", "pydicom/pixel_data_handlers/jpeg_ls_handler.py", "doc/whatsnew/v1.2.0.rst", "pydicom/pixel_data_handlers/pillow_handler.py", "pydicom/pixel_data_handlers/gdcm_handler.py", "pydicom/dataset.py" ]
[ "pydicom/pixel_data_handlers/numpy_handler.py", "pydicom/config.py", "pydicom/pixel_data_handlers/rle_handler.py", "pydicom/pixel_data_handlers/jpeg_ls_handler.py", "doc/whatsnew/v1.2.0.rst", "pydicom/pixel_data_handlers/pillow_handler.py", "pydicom/pixel_data_handlers/gdcm_handler.py", "pydicom/dataset.py" ]
marshmallow-code__marshmallow-945
cdcede15926f90448c2b532c78f1d158ae22eed5
2018-09-10 14:05:58
8e217c8d6fefb7049ab3389f31a8d35824fa2d96
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index bfe5bf52..d531cad7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,12 +6,17 @@ Changelog Features: -- Add ``fields.Pluck`` for serializing a single field from a nested object (:issue:`800`). Thanks :user:`timc13` for the - feedback and :user:`deckar01` for the implementation. -- *Backwards-incompatible*: Passing a string argument as ``only`` to ``fields.Nested`` is no longer supported. - Use ``fields.Pluck`` instead (:issue:`800`). +- Add ``fields.Pluck`` for serializing a single field from a nested object + (:issue:`800`). Thanks :user:`timc13` for the feedback and :user:`deckar01` + for the implementation. +- *Backwards-incompatible*: Passing a string argument as ``only`` to + ``fields.Nested`` is no longer supported. Use ``fields.Pluck`` instead + (:issue:`800`). - Raise a `StringNotCollectionError` if ``only`` or ``exclude`` is passed as a string to ``fields.Nested`` (:pr:`931`). +- *Backwards-incompatible*: ``Float`` takes an ``allow_nan`` parameter to + explicitly allow serializing and deserializing special values (``nan``, + ``inf`` and ``-inf``). ``allow_nan`` defaults to ``False``. Other changes: diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 71e8eb20..7e576520 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -569,7 +569,7 @@ Processors that mutate the data should be updated to also return it. in_data['slug'] = in_data['slug'].lower().strip().replace(' ', '-') return in_data -The Nested field no longer supports plucking +``Nested`` field no longer supports plucking ******************************************** In marshmallow 2.x, when a string was passed to a ``Nested`` field's ```only`` parameter, the field would be plucked. In marshmallow 3.x, the ``Pluck`` field must be used instead. @@ -587,7 +587,31 @@ In marshmallow 2.x, when a string was passed to a ``Nested`` field's ```only`` p name = fields.Str() friends = fields.Pluck('self', 'name', many=True) +``Float`` field takes a new ``allow_nan`` parameter +*************************************************** +In marshmallow 2.x, ``Float`` field would serialize and deserialize special values such as ``nan``, ``inf`` or ``-inf``. In marshmallow 3, those values trigger a ``ValidationError`` unless ``allow_nan`` is ``True``. ``allow_nan`` defaults to ``False``. + + +.. code-block:: python + + # 2.x + class MySchema(Schema): + x = fields.Float() + + MySchema().load({'x': 'nan'}) + # => {{'x': nan}} + + # 3.x + class MySchema(Schema): + x = fields.Float() + y = fields.Float(allow_nan=True) + + MySchema().load({'x': 12, 'y': 'nan'}) + # => {{'x': 12.0, 'y': nan}} + + MySchema().load({'x': 'nan'}) + # marshmallow.exceptions.ValidationError: {'x': ['Special numeric values (nan or infinity) are not permitted.']} Upgrading to 2.3 ++++++++++++++++ diff --git a/marshmallow/fields.py b/marshmallow/fields.py index 13bad238..b0580fd3 100755 --- a/marshmallow/fields.py +++ b/marshmallow/fields.py @@ -9,6 +9,7 @@ import numbers import uuid import warnings import decimal +import math from marshmallow import validate, utils, class_registry from marshmallow.base import FieldABC, SchemaABC @@ -667,18 +668,15 @@ class Number(Field): def _format_num(self, value): """Return the number value for value, given this field's `num_type`.""" - if value is None: - return None # (value is True or value is False) is ~5x faster than isinstance(value, bool) if value is True or value is False: - raise TypeError( - 'value must be a Number, not a boolean. value is ' - '{}'.format(value), - ) + raise TypeError('value must be a Number, not a boolean.') return self.num_type(value) def _validated(self, value): """Format the value or raise a :exc:`ValidationError` if an error occurs.""" + if value is None: + return None try: return self._format_num(value) except (TypeError, ValueError): @@ -720,6 +718,33 @@ class Integer(Number): return super(Integer, self)._format_num(value) +class Float(Number): + """ + A double as IEEE-754 double precision string. + + :param bool allow_nan: If `True`, `NaN`, `Infinity` and `-Infinity` are allowed, + even though they are illegal according to the JSON specification. + :param bool as_string: If True, format the value as a string. + :param kwargs: The same keyword arguments that :class:`Number` receives. + """ + + num_type = float + default_error_messages = { + 'special': 'Special numeric values (nan or infinity) are not permitted.', + } + + def __init__(self, allow_nan=False, as_string=False, **kwargs): + self.allow_nan = allow_nan + super(Float, self).__init__(as_string=as_string, **kwargs) + + def _format_num(self, value): + num = super(Float, self)._format_num(value) + if self.allow_nan is False: + if math.isnan(num) or num == float('inf') or num == float('-inf'): + self.fail('special') + return num + + class Decimal(Number): """A field that (de)serializes to the Python ``decimal.Decimal`` type. It's safe to use when dealing with money values, percentages, ratios @@ -760,7 +785,7 @@ class Decimal(Number): num_type = decimal.Decimal default_error_messages = { - 'special': 'Special numeric values are not permitted.', + 'special': 'Special numeric values (nan or infinity) are not permitted.', } def __init__(self, places=None, rounding=None, allow_nan=False, as_string=False, **kwargs): @@ -771,9 +796,6 @@ class Decimal(Number): # override Number def _format_num(self, value): - if value is None: - return None - num = decimal.Decimal(str(value)) if self.allow_nan: @@ -894,17 +916,6 @@ class FormattedString(Field): self.fail('format') -class Float(Number): - """ - A double as IEEE-754 double precision string. - - :param bool as_string: If True, format the value as a string. - :param kwargs: The same keyword arguments that :class:`Number` receives. - """ - - num_type = float - - class DateTime(Field): """A formatted datetime string in UTC.
RFC: extend allow_nan parameter to all number fields `Decimal` field has an `allow_nan` parameter (`False` by default): > If `True`, `NaN`, `Infinity` and `-Infinity` are allowed, even though they are illegal according to the JSON specification. Any objection to extend it to all numbers? Currently, `Int` will reject those as they are not integers, but they pass `Float` which just pawned me. Should we add the same validation in `Float`? Move it up to `Number`? Even if it is useless for `Int`, I'd be tempted to extend it to `Number`. - Those are special values with a special meaning so a different error message makes sense. - Any `Number` subclass would inherit it -> less duplication. This is a breaking change as the default would be `False` so `Float` would need to be explicitly passed `allow_none=True`.
marshmallow-code/marshmallow
diff --git a/tests/test_deserialization.py b/tests/test_deserialization.py index a0eb9c7f..f7a1f672 100644 --- a/tests/test_deserialization.py +++ b/tests/test_deserialization.py @@ -2,6 +2,7 @@ import datetime as dt import uuid import decimal +import math import pytest @@ -229,7 +230,9 @@ class TestFieldDeserialization: with pytest.raises(ValidationError) as excinfo: field.deserialize(m1) - assert str(excinfo.value.args[0]) == 'Special numeric values are not permitted.' + assert str(excinfo.value.args[0]) == ( + 'Special numeric values (nan or infinity) are not permitted.' + ) with pytest.raises(ValidationError): field.deserialize(m2) with pytest.raises(ValidationError): @@ -245,6 +248,30 @@ class TestFieldDeserialization: assert isinstance(m7d, decimal.Decimal) assert m7d.is_zero() and m7d.is_signed() + @pytest.mark.parametrize('allow_nan', (None, False, True)) + @pytest.mark.parametrize('value', ('nan', '-nan', 'inf', '-inf')) + def test_float_field_allow_nan(self, value, allow_nan): + + if allow_nan is None: + # Test default case is False + field = fields.Float() + else: + field = fields.Float(allow_nan=allow_nan) + + if allow_nan is True: + res = field.deserialize(value) + assert isinstance(res, float) + if value.endswith('nan'): + assert math.isnan(res) + else: + assert res == float(value) + else: + with pytest.raises(ValidationError) as excinfo: + field.deserialize(value) + assert str(excinfo.value.args[0]) == ( + 'Special numeric values (nan or infinity) are not permitted.' + ) + def test_string_field_deserialization(self): field = fields.String() assert field.deserialize('foo') == 'foo' diff --git a/tests/test_serialization.py b/tests/test_serialization.py index cb515b8d..5b977a36 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -5,6 +5,7 @@ import datetime as dt import itertools import decimal import uuid +import math import pytest @@ -312,6 +313,32 @@ class TestFieldSerialization: assert isinstance(m7s, decimal.Decimal) assert m7s.is_zero() and m7s.is_signed() + @pytest.mark.parametrize('allow_nan', (None, False, True)) + @pytest.mark.parametrize('value', ('nan', '-nan', 'inf', '-inf')) + def test_float_field_allow_nan(self, value, allow_nan, user): + + user.key = value + + if allow_nan is None: + # Test default case is False + field = fields.Float() + else: + field = fields.Float(allow_nan=allow_nan) + + if allow_nan is True: + res = field.serialize('key', user) + assert isinstance(res, float) + if value.endswith('nan'): + assert math.isnan(res) + else: + assert res == float(value) + else: + with pytest.raises(ValidationError) as excinfo: + field.serialize('key', user) + assert str(excinfo.value.args[0]) == ( + 'Special numeric values (nan or infinity) are not permitted.' + ) + def test_decimal_field_fixed_point_representation(self, user): """ Test we get fixed-point string representation for a Decimal number that would normally
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 3 }
3.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[reco]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": [], "python": "3.9", "reqs_path": [ "dev-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aspy.yaml==1.3.0 atomicwrites==1.4.1 attrs==25.3.0 cached-property==2.0.1 cachetools==5.5.2 cfgv==3.4.0 chardet==5.2.0 colorama==0.4.6 coverage==7.8.0 distlib==0.3.9 exceptiongroup==1.2.2 execnet==2.1.1 filelock==3.18.0 flake8==3.5.0 identify==2.6.9 iniconfig==2.1.0 invoke==1.1.1 -e git+https://github.com/marshmallow-code/marshmallow.git@cdcede15926f90448c2b532c78f1d158ae22eed5#egg=marshmallow mccabe==0.6.1 more-itertools==10.6.0 nodeenv==1.9.1 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pre-commit==1.11.0 py==1.11.0 pycodestyle==2.3.1 pyflakes==1.6.0 pyproject-api==1.9.0 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 python-dateutil==2.7.3 pytz==2018.5 PyYAML==6.0.2 simplejson==3.16.0 six==1.17.0 -e git+ssh://[email protected]/nebius/swebench_matterhorn.git@ae4d15b4472bd322342107dd10c47d793189f5b2#egg=swebench_matterhorn toml==0.10.2 tomli==2.2.1 tox==4.25.0 typing_extensions==4.13.0 virtualenv==20.29.3
name: marshmallow channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aspy-yaml==1.3.0 - atomicwrites==1.4.1 - attrs==25.3.0 - cached-property==2.0.1 - cachetools==5.5.2 - cfgv==3.4.0 - chardet==5.2.0 - colorama==0.4.6 - coverage==7.8.0 - distlib==0.3.9 - exceptiongroup==1.2.2 - execnet==2.1.1 - filelock==3.18.0 - flake8==3.5.0 - identify==2.6.9 - iniconfig==2.1.0 - invoke==1.1.1 - mccabe==0.6.1 - more-itertools==10.6.0 - nodeenv==1.9.1 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pre-commit==1.11.0 - py==1.11.0 - pycodestyle==2.3.1 - pyflakes==1.6.0 - pyproject-api==1.9.0 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - python-dateutil==2.7.3 - pytz==2018.5 - pyyaml==6.0.2 - simplejson==3.16.0 - six==1.17.0 - toml==0.10.2 - tomli==2.2.1 - tox==4.25.0 - typing-extensions==4.13.0 - virtualenv==20.29.3 prefix: /opt/conda/envs/marshmallow
[ "tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_special_values_not_permitted", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_allow_nan[nan-None]", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_allow_nan[nan-False]", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_allow_nan[-nan-None]", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_allow_nan[-nan-False]", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_allow_nan[inf-None]", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_allow_nan[inf-False]", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_allow_nan[-inf-None]", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_allow_nan[-inf-False]", "tests/test_serialization.py::TestFieldSerialization::test_float_field_allow_nan[nan-None]", "tests/test_serialization.py::TestFieldSerialization::test_float_field_allow_nan[nan-False]", "tests/test_serialization.py::TestFieldSerialization::test_float_field_allow_nan[-nan-None]", "tests/test_serialization.py::TestFieldSerialization::test_float_field_allow_nan[-nan-False]", "tests/test_serialization.py::TestFieldSerialization::test_float_field_allow_nan[inf-None]", "tests/test_serialization.py::TestFieldSerialization::test_float_field_allow_nan[inf-False]", "tests/test_serialization.py::TestFieldSerialization::test_float_field_allow_nan[-inf-None]", "tests/test_serialization.py::TestFieldSerialization::test_float_field_allow_nan[-inf-False]" ]
[]
[ "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[String]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Integer]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Boolean]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Float]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Number]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[DateTime]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[LocalDateTime]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Time]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Date]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[TimeDelta]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Dict]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Url]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Email]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[FormattedString]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[UUID]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Decimal]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[String]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Integer]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Boolean]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Float]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Number]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[DateTime]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[LocalDateTime]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Time]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Date]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[TimeDelta]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Dict]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Url]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Email]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[FormattedString]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[UUID]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Decimal]", "tests/test_deserialization.py::TestDeserializingNone::test_allow_none_is_true_if_missing_is_true", "tests/test_deserialization.py::TestDeserializingNone::test_list_field_deserialize_none_to_empty_list", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[bad]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[in_val2]", "tests/test_deserialization.py::TestFieldDeserialization::test_integer_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_strict_integer_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_with_places", "tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_with_places_and_rounding", "tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_deserialization_string", "tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_special_values", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_allow_nan[nan-True]", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_allow_nan[-nan-True]", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_allow_nan[inf-True]", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_allow_nan[-inf-True]", "tests/test_deserialization.py::TestFieldDeserialization::test_string_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values", "tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values_invalid[notvalid]", "tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values_invalid[123]", "tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_empty_truthy", "tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_falsy_values", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[not-a-datetime]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[42]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[in_value3]", "tests/test_deserialization.py::TestFieldDeserialization::test_datetime_passed_year_is_invalid", "tests/test_deserialization.py::TestFieldDeserialization::test_datetime_passed_date_is_invalid", "tests/test_deserialization.py::TestFieldDeserialization::test_custom_date_format_datetime_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_rfc_datetime_field_deserialization[rfc]", "tests/test_deserialization.py::TestFieldDeserialization::test_rfc_datetime_field_deserialization[rfc822]", "tests/test_deserialization.py::TestFieldDeserialization::test_iso_datetime_field_deserialization[iso]", "tests/test_deserialization.py::TestFieldDeserialization::test_iso_datetime_field_deserialization[iso8601]", "tests/test_deserialization.py::TestFieldDeserialization::test_localdatetime_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_time_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[badvalue]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[in_data2]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[42]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_precision", "tests/test_deserialization.py::TestFieldDeserialization::test_timedelta_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[badvalue]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[in_value2]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[9999999999]", "tests/test_deserialization.py::TestFieldDeserialization::test_date_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[123]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[in_value2]", "tests/test_deserialization.py::TestFieldDeserialization::test_dict_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_structured_dict_value_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_structured_dict_key_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_structured_dict_key_value_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_url_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_relative_url_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_url_field_schemes_argument", "tests/test_deserialization.py::TestFieldDeserialization::test_email_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_is_noop_by_default", "tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_with_callable", "tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_with_context", "tests/test_deserialization.py::TestFieldDeserialization::test_function_field_passed_deserialize_only_is_load_only", "tests/test_deserialization.py::TestFieldDeserialization::test_function_field_passed_deserialize_and_serialize_is_not_load_only", "tests/test_deserialization.py::TestFieldDeserialization::test_uuid_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[malformed]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[123]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[in_value2]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[tooshort]", "tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_function_must_be_callable", "tests/test_deserialization.py::TestFieldDeserialization::test_method_field_deserialization_is_noop_by_default", "tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_method", "tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_method_must_be_a_method", "tests/test_deserialization.py::TestFieldDeserialization::test_method_field_deserialize_only", "tests/test_deserialization.py::TestFieldDeserialization::test_datetime_list_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_invalid_item", "tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_multiple_invalid_items", "tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[notalist]", "tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[42]", "tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[value2]", "tests/test_deserialization.py::TestFieldDeserialization::test_constant_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_constant_is_always_included_in_deserialized_data", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_function", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_class_that_returns_bool", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_that_raises_error_with_list", "tests/test_deserialization.py::TestFieldDeserialization::test_validator_must_return_false_to_raise_error", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_validator_with_nonascii_input", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validators", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_custom_error_message", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_non_utf8_value", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_to_dict", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_values", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_many", "tests/test_deserialization.py::TestSchemaDeserialization::test_exclude", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_deserialization_to_dict", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_list_deserialization_to_dict", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_none_not_allowed", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_many_non_not_allowed", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_required_missing", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_many_required_missing", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_only_basestring", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_only_basestring_with_list_data", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_none_deserialization", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_symmetry", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_error_returns_field_name_not_attribute_name", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_error_returns_data_key_not_attribute_name", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_data_key_param", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_dump_only_param", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_value", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_callable", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_none", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_raises_with_errors", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_raises_with_errors_with_multiple_validators", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_many_raises_errors", "tests/test_deserialization.py::TestSchemaDeserialization::test_validation_errors_are_stored", "tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_a_field", "tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_an_email_field", "tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_a_url_field", "tests/test_deserialization.py::TestSchemaDeserialization::test_required_value_only_passed_to_validators_if_provided", "tests/test_deserialization.py::TestSchemaDeserialization::test_partial_deserialization[True]", "tests/test_deserialization.py::TestSchemaDeserialization::test_partial_deserialization[False]", "tests/test_deserialization.py::TestSchemaDeserialization::test_partial_fields_deserialization", "tests/test_deserialization.py::TestSchemaDeserialization::test_partial_fields_validation", "tests/test_deserialization.py::TestSchemaDeserialization::test_unknown_fields_deserialization", "tests/test_deserialization.py::TestSchemaDeserialization::test_unknown_fields_deserialization_precedence", "tests/test_deserialization.py::TestSchemaDeserialization::test_unknown_fields_deserialization_with_data_key", "tests/test_deserialization.py::TestSchemaDeserialization::test_unknown_fields_deserialization_with_index_errors_false", "tests/test_deserialization.py::TestSchemaDeserialization::test_dump_only_fields_considered_unknown", "tests/test_deserialization.py::TestValidation::test_integer_with_validator", "tests/test_deserialization.py::TestValidation::test_integer_with_validators[field0]", "tests/test_deserialization.py::TestValidation::test_integer_with_validators[field1]", "tests/test_deserialization.py::TestValidation::test_integer_with_validators[field2]", "tests/test_deserialization.py::TestValidation::test_float_with_validators[field0]", "tests/test_deserialization.py::TestValidation::test_float_with_validators[field1]", "tests/test_deserialization.py::TestValidation::test_float_with_validators[field2]", "tests/test_deserialization.py::TestValidation::test_string_validator", "tests/test_deserialization.py::TestValidation::test_function_validator", "tests/test_deserialization.py::TestValidation::test_function_validators[field0]", "tests/test_deserialization.py::TestValidation::test_function_validators[field1]", "tests/test_deserialization.py::TestValidation::test_function_validators[field2]", "tests/test_deserialization.py::TestValidation::test_method_validator", "tests/test_deserialization.py::TestValidation::test_nested_data_is_stored_when_validation_fails", "tests/test_deserialization.py::TestValidation::test_false_value_validation", "tests/test_deserialization.py::test_required_field_failure[String]", "tests/test_deserialization.py::test_required_field_failure[Integer]", "tests/test_deserialization.py::test_required_field_failure[Boolean]", "tests/test_deserialization.py::test_required_field_failure[Float]", "tests/test_deserialization.py::test_required_field_failure[Number]", "tests/test_deserialization.py::test_required_field_failure[DateTime]", "tests/test_deserialization.py::test_required_field_failure[LocalDateTime]", "tests/test_deserialization.py::test_required_field_failure[Time]", "tests/test_deserialization.py::test_required_field_failure[Date]", "tests/test_deserialization.py::test_required_field_failure[TimeDelta]", "tests/test_deserialization.py::test_required_field_failure[Dict]", "tests/test_deserialization.py::test_required_field_failure[Url]", "tests/test_deserialization.py::test_required_field_failure[Email]", "tests/test_deserialization.py::test_required_field_failure[UUID]", "tests/test_deserialization.py::test_required_field_failure[Decimal]", "tests/test_deserialization.py::test_required_message_can_be_changed[My", "tests/test_deserialization.py::test_required_message_can_be_changed[message1]", "tests/test_deserialization.py::test_required_message_can_be_changed[message2]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[True-exclude]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[True-include]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[True-raise]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[False-exclude]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[False-include]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[False-raise]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[42-exclude]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[42-include]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[42-raise]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[None-exclude]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[None-include]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[None-raise]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[data4-exclude]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[data4-include]", "tests/test_deserialization.py::test_deserialize_raises_exception_if_input_type_is_incorrect[data4-raise]", "tests/test_serialization.py::TestFieldSerialization::test_number[42-42.0]", "tests/test_serialization.py::TestFieldSerialization::test_number[0-0.0]", "tests/test_serialization.py::TestFieldSerialization::test_number[None-None]", "tests/test_serialization.py::TestFieldSerialization::test_number_as_string", "tests/test_serialization.py::TestFieldSerialization::test_number_as_string_passed_none", "tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_func", "tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_serialize_only_is_dump_only", "tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_deserialize_and_serialize_is_not_dump_only", "tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_serialize", "tests/test_serialization.py::TestFieldSerialization::test_function_field_does_not_swallow_attribute_error", "tests/test_serialization.py::TestFieldSerialization::test_function_field_load_only", "tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_serialize_with_context", "tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_uncallable_object", "tests/test_serialization.py::TestFieldSerialization::test_integer_field", "tests/test_serialization.py::TestFieldSerialization::test_integer_as_string_field", "tests/test_serialization.py::TestFieldSerialization::test_integer_field_default", "tests/test_serialization.py::TestFieldSerialization::test_integer_field_default_set_to_none", "tests/test_serialization.py::TestFieldSerialization::test_uuid_field", "tests/test_serialization.py::TestFieldSerialization::test_decimal_field", "tests/test_serialization.py::TestFieldSerialization::test_decimal_field_string", "tests/test_serialization.py::TestFieldSerialization::test_decimal_field_special_values", "tests/test_serialization.py::TestFieldSerialization::test_decimal_field_special_values_not_permitted", "tests/test_serialization.py::TestFieldSerialization::test_float_field_allow_nan[nan-True]", "tests/test_serialization.py::TestFieldSerialization::test_float_field_allow_nan[-nan-True]", "tests/test_serialization.py::TestFieldSerialization::test_float_field_allow_nan[inf-True]", "tests/test_serialization.py::TestFieldSerialization::test_float_field_allow_nan[-inf-True]", "tests/test_serialization.py::TestFieldSerialization::test_decimal_field_fixed_point_representation", "tests/test_serialization.py::TestFieldSerialization::test_boolean_field_serialization", "tests/test_serialization.py::TestFieldSerialization::test_function_with_uncallable_param", "tests/test_serialization.py::TestFieldSerialization::test_email_field_serialize_none", "tests/test_serialization.py::TestFieldSerialization::test_dict_field_serialize_none", "tests/test_serialization.py::TestFieldSerialization::test_dict_field_invalid_dict_but_okay", "tests/test_serialization.py::TestFieldSerialization::test_dict_field_serialize", "tests/test_serialization.py::TestFieldSerialization::test_dict_field_serialize_ordereddict", "tests/test_serialization.py::TestFieldSerialization::test_structured_dict_value_serialize", "tests/test_serialization.py::TestFieldSerialization::test_structured_dict_key_serialize", "tests/test_serialization.py::TestFieldSerialization::test_structured_dict_key_value_serialize", "tests/test_serialization.py::TestFieldSerialization::test_structured_dict_validates", "tests/test_serialization.py::TestFieldSerialization::test_url_field_serialize_none", "tests/test_serialization.py::TestFieldSerialization::test_method_field_with_method_missing", "tests/test_serialization.py::TestFieldSerialization::test_method_field_passed_serialize_only_is_dump_only", "tests/test_serialization.py::TestFieldSerialization::test_method_field_passed_deserialize_only_is_load_only", "tests/test_serialization.py::TestFieldSerialization::test_method_field_with_uncallable_attribute", "tests/test_serialization.py::TestFieldSerialization::test_method_field_does_not_swallow_attribute_error", "tests/test_serialization.py::TestFieldSerialization::test_method_with_no_serialize_is_missing", "tests/test_serialization.py::TestFieldSerialization::test_serialize_with_data_key_param", "tests/test_serialization.py::TestFieldSerialization::test_serialize_with_attribute_and_data_key_uses_data_key", "tests/test_serialization.py::TestFieldSerialization::test_datetime_serializes_to_iso_by_default", "tests/test_serialization.py::TestFieldSerialization::test_datetime_invalid_serialization[invalid]", "tests/test_serialization.py::TestFieldSerialization::test_datetime_invalid_serialization[value1]", "tests/test_serialization.py::TestFieldSerialization::test_datetime_invalid_serialization[24]", "tests/test_serialization.py::TestFieldSerialization::test_datetime_field_rfc822[rfc]", "tests/test_serialization.py::TestFieldSerialization::test_datetime_field_rfc822[rfc822]", "tests/test_serialization.py::TestFieldSerialization::test_localdatetime_rfc_field", "tests/test_serialization.py::TestFieldSerialization::test_datetime_iso8601[iso]", "tests/test_serialization.py::TestFieldSerialization::test_datetime_iso8601[iso8601]", "tests/test_serialization.py::TestFieldSerialization::test_localdatetime_iso", "tests/test_serialization.py::TestFieldSerialization::test_datetime_format", "tests/test_serialization.py::TestFieldSerialization::test_string_field", "tests/test_serialization.py::TestFieldSerialization::test_formattedstring_field", "tests/test_serialization.py::TestFieldSerialization::test_formattedstring_field_on_schema", "tests/test_serialization.py::TestFieldSerialization::test_string_field_default_to_empty_string", "tests/test_serialization.py::TestFieldSerialization::test_time_field", "tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[badvalue]", "tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[]", "tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[in_data2]", "tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[42]", "tests/test_serialization.py::TestFieldSerialization::test_date_field", "tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[badvalue]", "tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[]", "tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[in_data2]", "tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[42]", "tests/test_serialization.py::TestFieldSerialization::test_timedelta_field", "tests/test_serialization.py::TestFieldSerialization::test_datetime_list_field", "tests/test_serialization.py::TestFieldSerialization::test_list_field_with_error", "tests/test_serialization.py::TestFieldSerialization::test_datetime_list_serialize_single_value", "tests/test_serialization.py::TestFieldSerialization::test_list_field_serialize_none_returns_none", "tests/test_serialization.py::TestFieldSerialization::test_list_field_respect_inner_attribute", "tests/test_serialization.py::TestFieldSerialization::test_list_field_respect_inner_attribute_single_value", "tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generator_single_value", "tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generators_multiple_values", "tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generators_error", "tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generators_empty_generator_returns_none_for_every_non_returning_yield_statement", "tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_set", "tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_custom_class_with_iterator_protocol", "tests/test_serialization.py::TestFieldSerialization::test_bad_list_field", "tests/test_serialization.py::TestFieldSerialization::test_serialize_does_not_apply_validators", "tests/test_serialization.py::TestFieldSerialization::test_constant_field_serialization", "tests/test_serialization.py::TestFieldSerialization::test_constant_is_always_included_in_serialized_data", "tests/test_serialization.py::TestFieldSerialization::test_constant_field_serialize_when_omitted", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[String]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Integer]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Boolean]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Float]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Number]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[DateTime]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[LocalDateTime]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Time]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Date]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[TimeDelta]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Dict]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Url]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Email]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[FormattedString]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[UUID]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Decimal]", "tests/test_serialization.py::TestSchemaSerialization::test_serialize_with_missing_param_value", "tests/test_serialization.py::TestSchemaSerialization::test_serialize_with_missing_param_callable", "tests/test_serialization.py::test_serializing_named_tuple", "tests/test_serialization.py::test_serializing_named_tuple_with_meta", "tests/test_serialization.py::test_serializing_slice" ]
[]
MIT License
3,048
[ "CHANGELOG.rst", "docs/upgrading.rst", "marshmallow/fields.py" ]
[ "CHANGELOG.rst", "docs/upgrading.rst", "marshmallow/fields.py" ]
aio-libs__aiohttp-session-317
cecdc7282e3c2bdd0fd4931ce2a562c8812a82a6
2018-09-10 16:30:39
cecdc7282e3c2bdd0fd4931ce2a562c8812a82a6
codecov[bot]: # [Codecov](https://codecov.io/gh/aio-libs/aiohttp-session/pull/317?src=pr&el=h1) Report > Merging [#317](https://codecov.io/gh/aio-libs/aiohttp-session/pull/317?src=pr&el=desc) into [master](https://codecov.io/gh/aio-libs/aiohttp-session/commit/cecdc7282e3c2bdd0fd4931ce2a562c8812a82a6?src=pr&el=desc) will **increase** coverage by `0.12%`. > The diff coverage is `100%`. [![Impacted file tree graph](https://codecov.io/gh/aio-libs/aiohttp-session/pull/317/graphs/tree.svg?width=650&token=M1jTmwOZ5C&height=150&src=pr)](https://codecov.io/gh/aio-libs/aiohttp-session/pull/317?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #317 +/- ## ========================================= + Coverage 97.08% 97.2% +0.12% ========================================= Files 4 4 Lines 206 215 +9 Branches 25 25 ========================================= + Hits 200 209 +9 Misses 4 4 Partials 2 2 ``` | [Impacted Files](https://codecov.io/gh/aio-libs/aiohttp-session/pull/317?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [aiohttp\_session/nacl\_storage.py](https://codecov.io/gh/aio-libs/aiohttp-session/pull/317/diff?src=pr&el=tree#diff-YWlvaHR0cF9zZXNzaW9uL25hY2xfc3RvcmFnZS5weQ==) | `100% <100%> (ø)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/aio-libs/aiohttp-session/pull/317?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/aio-libs/aiohttp-session/pull/317?src=pr&el=footer). Last update [cecdc72...7240128](https://codecov.io/gh/aio-libs/aiohttp-session/pull/317?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/aiohttp_session/nacl_storage.py b/aiohttp_session/nacl_storage.py index eb7105d..c855ff8 100644 --- a/aiohttp_session/nacl_storage.py +++ b/aiohttp_session/nacl_storage.py @@ -1,10 +1,13 @@ +import binascii import json import nacl.secret import nacl.utils +import nacl.exceptions from nacl.encoding import Base64Encoder from . import AbstractStorage, Session +from .log import log class NaClCookieStorage(AbstractStorage): @@ -22,16 +25,26 @@ class NaClCookieStorage(AbstractStorage): self._secretbox = nacl.secret.SecretBox(secret_key) + def empty_session(self): + return Session(None, data=None, new=True, max_age=self.max_age) + async def load_session(self, request): cookie = self.load_cookie(request) if cookie is None: - return Session(None, data=None, new=True, max_age=self.max_age) + return self.empty_session() else: - data = self._decoder( - self._secretbox.decrypt(cookie.encode('utf-8'), - encoder=Base64Encoder).decode('utf-8') - ) - return Session(None, data=data, new=False, max_age=self.max_age) + try: + data = self._decoder( + self._secretbox.decrypt( + cookie.encode('utf-8'), + encoder=Base64Encoder).decode('utf-8') + ) + return Session(None, data=data, new=False, + max_age=self.max_age) + except (binascii.Error, nacl.exceptions.CryptoError): + log.warning("Cannot decrypt cookie value, " + "create a new fresh session") + return self.empty_session() async def save_session(self, request, response, session): if session.empty: diff --git a/docs/reference.rst b/docs/reference.rst index 267c882..a22970b 100644 --- a/docs/reference.rst +++ b/docs/reference.rst @@ -348,7 +348,7 @@ To use the storage you should push it into :func:`~aiohttp_session.session_middleware`:: app = aiohttp.web.Application(middlewares=[ - aiohttp_session.cookie_storage.NaClCookieStorage( + aiohttp_session.nacl_storage.NaClCookieStorage( b'Thirty two length bytes key.']) .. class:: NaClCookieStorage(secret_key, *, \
Error 500 when switching from EncryptedCookieStorage to NaClCookieStorage ```Traceback (most recent call last): File "[..]/site-packages/aiohttp/web_protocol.py", line 390, in start resp = await self._request_handler(request) File "[..]/site-packages/aiohttp/web_app.py", line 366, in _handle resp = await handler(request) File "[..]/site-packages/aiohttp/web_middlewares.py", line 106, in impl return await handler(request) File "[..]/site-packages/aiohttp_session/__init__.py", line 149, in factory response = await handler(request) File "/Users/az02096/dev/nestor/elasticsearch_authorizations/elasticsearch_authorizations/app.py", line 122, in index_handler session = await get_session(request) File "[..]/site-packages/aiohttp_session/__init__.py", line 113, in get_session session = await storage.load_session(request) File "[..]/site-packages/aiohttp_session/nacl_storage.py", line 32, in load_session encoder=Base64Encoder).decode('utf-8') File "[..]/site-packages/nacl/secret.py", line 112, in decrypt ciphertext = encoder.decode(ciphertext) File "[..]/site-packages/nacl/encoding.py", line 73, in decode return base64.b64decode(data) File "[..]/base64.py", line 87, in b64decode return binascii.a2b_base64(s) binascii.Error: Incorrect padding INFO:aiohttp.access:::1 [10/Sep/2018:14:27:23 +0000] "GET / HTTP/1.1" 500 330 "http://localhost:8000/" ``` Same issue when changing the secret. probably we need to port this patch to NaClCookieStorage: https://github.com/aio-libs/aiohttp-session/commit/0741f5a5380094ff6a970194870b2c41451747cf Not sure what is the benefit of NaClCookieStorage over EncryptedCookieStorage.
aio-libs/aiohttp-session
diff --git a/tests/test_nacl_storage.py b/tests/test_nacl_storage.py index 66bd814..836bfe6 100644 --- a/tests/test_nacl_storage.py +++ b/tests/test_nacl_storage.py @@ -179,3 +179,36 @@ async def test_load_session_dont_load_expired_session(aiohttp_client, resp = await client.get('/?exp=yes') assert resp.status == 200 + + +async def test_load_corrupted_session(aiohttp_client, key): + + async def handler(request): + session = await get_session(request) + assert isinstance(session, Session) + assert session.new + assert {} == session + return web.Response(body=b'OK') + + client = await aiohttp_client(create_app(handler, key)) + client.session.cookie_jar.update_cookies({'AIOHTTP_SESSION': 'bad key'}) + resp = await client.get('/') + assert resp.status == 200 + + +async def test_load_session_different_key(aiohttp_client, key): + + async def handler(request): + session = await get_session(request) + assert isinstance(session, Session) + assert session.new + assert {} == session + return web.Response(body=b'OK') + + client = await aiohttp_client(create_app(handler, key)) + # create another box with another key + key = nacl.utils.random(nacl.secret.SecretBox.KEY_SIZE) + secretbox = nacl.secret.SecretBox(key) + make_cookie(client, secretbox, {'a': 1, 'b': 12}) + resp = await client.get('/') + assert resp.status == 200
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 2 }
2.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-asyncio", "pytest-bdd", "pytest-benchmark", "pytest-randomly", "responses", "mock", "hypothesis", "freezegun", "trustme", "requests-mock", "requests", "tomlkit" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiohttp==3.3.2 -e git+https://github.com/aio-libs/aiohttp-session.git@cecdc7282e3c2bdd0fd4931ce2a562c8812a82a6#egg=aiohttp_session aiomcache==0.6.0 aioredis==1.1.0 alabaster==0.7.16 asn1crypto==1.5.1 async-timeout==3.0.1 atomicwrites==1.4.1 attrs==25.3.0 babel==2.17.0 certifi==2025.1.31 cffi==1.17.1 chardet==3.0.4 charset-normalizer==3.4.1 coverage==7.8.0 cryptography==44.0.2 docker==3.4.1 docker-pycreds==0.4.0 docutils==0.21.2 exceptiongroup==1.2.2 flake8==3.5.0 freezegun==1.5.1 gherkin-official==29.0.0 hiredis==3.1.0 hypothesis==6.130.5 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 Jinja2==3.1.6 Mako==1.3.9 MarkupSafe==3.0.2 mccabe==0.6.1 mock==5.2.0 more-itertools==10.6.0 multidict==4.3.0 packaging==24.2 parse==1.20.2 parse_type==0.6.4 pep257==0.7.0 pluggy==1.5.0 py==1.11.0 py-cpuinfo==9.0.0 pycodestyle==2.3.1 pycparser==2.22 pyflakes==1.6.0 Pygments==2.19.1 PyNaCl==1.2.1 pytest==8.3.5 pytest-aiohttp==0.3.0 pytest-asyncio==0.26.0 pytest-bdd==8.1.0 pytest-benchmark==5.1.0 pytest-cov==2.5.1 pytest-mock==1.10.0 pytest-randomly==3.16.0 pytest-sugar==0.9.1 python-dateutil==2.9.0.post0 PyYAML==6.0.2 requests==2.32.3 requests-mock==1.12.1 responses==0.25.7 six==1.17.0 snowballstemmer==2.2.0 sortedcontainers==2.4.0 Sphinx==1.7.6 sphinxcontrib-serializinghtml==2.0.0 sphinxcontrib-websupport==1.2.4 termcolor==3.0.0 tomli==2.2.1 tomlkit==0.13.2 trustme==1.2.1 typing_extensions==4.13.0 urllib3==2.3.0 websocket-client==1.8.0 yarl==1.2.6 zipp==3.21.0
name: aiohttp-session channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aiohttp==3.3.2 - aiomcache==0.6.0 - aioredis==1.1.0 - alabaster==0.7.16 - asn1crypto==1.5.1 - async-timeout==3.0.1 - atomicwrites==1.4.1 - attrs==25.3.0 - babel==2.17.0 - certifi==2025.1.31 - cffi==1.17.1 - chardet==3.0.4 - charset-normalizer==3.4.1 - coverage==7.8.0 - cryptography==44.0.2 - docker==3.4.1 - docker-pycreds==0.4.0 - docutils==0.21.2 - exceptiongroup==1.2.2 - flake8==3.5.0 - freezegun==1.5.1 - gherkin-official==29.0.0 - hiredis==3.1.0 - hypothesis==6.130.5 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jinja2==3.1.6 - mako==1.3.9 - markupsafe==3.0.2 - mccabe==0.6.1 - mock==5.2.0 - more-itertools==10.6.0 - multidict==4.3.0 - packaging==24.2 - parse==1.20.2 - parse-type==0.6.4 - pep257==0.7.0 - pluggy==1.5.0 - py==1.11.0 - py-cpuinfo==9.0.0 - pycodestyle==2.3.1 - pycparser==2.22 - pyflakes==1.6.0 - pygments==2.19.1 - pynacl==1.2.1 - pytest==8.3.5 - pytest-aiohttp==0.3.0 - pytest-asyncio==0.26.0 - pytest-bdd==8.1.0 - pytest-benchmark==5.1.0 - pytest-cov==2.5.1 - pytest-mock==1.10.0 - pytest-randomly==3.16.0 - pytest-sugar==0.9.1 - python-dateutil==2.9.0.post0 - pyyaml==6.0.2 - requests==2.32.3 - requests-mock==1.12.1 - responses==0.25.7 - six==1.17.0 - snowballstemmer==2.2.0 - sortedcontainers==2.4.0 - sphinx==1.7.6 - sphinxcontrib-serializinghtml==2.0.0 - sphinxcontrib-websupport==1.2.4 - termcolor==3.0.0 - tomli==2.2.1 - tomlkit==0.13.2 - trustme==1.2.1 - typing-extensions==4.13.0 - urllib3==2.3.0 - websocket-client==1.8.0 - yarl==1.2.6 - zipp==3.21.0 prefix: /opt/conda/envs/aiohttp-session
[ "tests/test_nacl_storage.py::test_load_session_different_key", "tests/test_nacl_storage.py::test_load_corrupted_session" ]
[]
[ "tests/test_nacl_storage.py::test_change_session", "tests/test_nacl_storage.py::test_create_new_session", "tests/test_nacl_storage.py::test_del_cookie_on_session_invalidation", "tests/test_nacl_storage.py::test_nacl_session_fixation", "tests/test_nacl_storage.py::test_load_session_dont_load_expired_session", "tests/test_nacl_storage.py::test_invalid_key", "tests/test_nacl_storage.py::test_load_existing_session" ]
[]
Apache License 2.0
3,049
[ "aiohttp_session/nacl_storage.py", "docs/reference.rst" ]
[ "aiohttp_session/nacl_storage.py", "docs/reference.rst" ]
zopefoundation__zope.schema-67
d7d91931293294952a26aec2f7047fdda50822e0
2018-09-10 18:22:47
b6cc7d2cff904129c36e867f369c577d85c1ba2e
diff --git a/CHANGES.rst b/CHANGES.rst index dd41b9c..4ae52fb 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -5,7 +5,10 @@ 4.6.2 (unreleased) ================== -- Nothing changed yet. +- Fix checking a field's constraint to set the ``field`` and ``value`` + properties if the constraint raises a ``ValidationError``. See + `issue 66 + <https://github.com/zopefoundation/zope.schema/issues/66>`_. 4.6.1 (2018-09-10) diff --git a/src/zope/schema/_bootstrapfields.py b/src/zope/schema/_bootstrapfields.py index ab6aaed..622ef8e 100644 --- a/src/zope/schema/_bootstrapfields.py +++ b/src/zope/schema/_bootstrapfields.py @@ -331,7 +331,15 @@ class Field(Attribute): if self._type is not None and not isinstance(value, self._type): raise WrongType(value, self._type, self.__name__).with_field_and_value(self, value) - if not self.constraint(value): + try: + constraint = self.constraint(value) + except ValidationError as e: + if e.field is None: + e.field = self + if e.value is None: + e.value = value + raise + if not constraint: raise ConstraintNotSatisfied(value, self.__name__).with_field_and_value(self, value) def get(self, object):
Checking constraints should set `field` and `value` for ValidationErrors Sometimes custom `constraint` functions raise a `ValidationError` subclass instead of returning a boolean, usually because they have a better error message. But if the constraint is a function, it doesn't have access to the field instance, so the `ValidationError` probably doesn't have a value set for `field`.
zopefoundation/zope.schema
diff --git a/src/zope/schema/tests/test__bootstrapfields.py b/src/zope/schema/tests/test__bootstrapfields.py index 837db92..2e89563 100644 --- a/src/zope/schema/tests/test__bootstrapfields.py +++ b/src/zope/schema/tests/test__bootstrapfields.py @@ -527,6 +527,30 @@ class FieldTests(EqualityTestsMixin, field._type = int field.validate(1) # doesn't raise + def test_validate_constraint_raises_custom_exception(self): + from zope.schema._bootstrapinterfaces import ValidationError + + def _fail(value): + raise ValidationError + field = self._makeOne(constraint=_fail) + with self.assertRaises(ValidationError) as exc: + field.validate(1) + + self.assertIs(exc.exception.field, field) + self.assertEqual(exc.exception.value, 1) + + def test_validate_constraint_raises_custom_exception_no_overwrite(self): + from zope.schema._bootstrapinterfaces import ValidationError + + def _fail(value): + raise ValidationError(value).with_field_and_value(self, self) + field = self._makeOne(constraint=_fail) + with self.assertRaises(ValidationError) as exc: + field.validate(1) + + self.assertIs(exc.exception.field, self) + self.assertIs(exc.exception.value, self) + def test_get_miss(self): field = self._makeOne(__name__='nonesuch') inst = DummyInst()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
4.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "coverage", "sphinx" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 babel==2.17.0 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 docutils==0.21.2 exceptiongroup==1.2.2 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 Jinja2==3.1.6 MarkupSafe==3.0.2 packaging==24.2 pluggy==1.5.0 Pygments==2.19.1 pytest==8.3.5 requests==2.32.3 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 tomli==2.2.1 urllib3==2.3.0 zipp==3.21.0 zope.event==5.0 zope.exceptions==5.2 zope.i18nmessageid==7.0 zope.interface==7.2 -e git+https://github.com/zopefoundation/zope.schema.git@d7d91931293294952a26aec2f7047fdda50822e0#egg=zope.schema zope.testing==5.1 zope.testrunner==7.2
name: zope.schema channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - babel==2.17.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - docutils==0.21.2 - exceptiongroup==1.2.2 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jinja2==3.1.6 - markupsafe==3.0.2 - packaging==24.2 - pluggy==1.5.0 - pygments==2.19.1 - pytest==8.3.5 - requests==2.32.3 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - tomli==2.2.1 - urllib3==2.3.0 - zipp==3.21.0 - zope-event==5.0 - zope-exceptions==5.2 - zope-i18nmessageid==7.0 - zope-interface==7.2 - zope-testing==5.1 - zope-testrunner==7.2 prefix: /opt/conda/envs/zope.schema
[ "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_raises_custom_exception" ]
[ "src/zope/schema/tests/test__bootstrapfields.py::test_suite" ]
[ "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___get__", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___not_missing_w_check", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___not_missing_wo_check", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___w_missing_wo_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___w_defaultFactory_not_ICAF_no_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___w_defaultFactory_w_ICAF_w_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___wo_defaultFactory_hit", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___wo_defaultFactory_miss", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test__get___wo_defaultFactory_in_dict", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_bind", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_description_preserved", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_order_madness", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_w_both_title_and_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_w_title_wo_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_wo_title_w_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_constraint_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_defaultFactory", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_defaultFactory_returning_missing_value", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_required_readonly_missingValue", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_getDoc", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_get_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_get_miss", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_miss_no_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_miss_w_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_set_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_set_readonly", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_fails", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_raises_StopValidation", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_raises_custom_exception_no_overwrite", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_missing_and_required", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_missing_not_required", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_wrong_type", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_collection_but_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_not_collection_but_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_not_collection_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_w_collections", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_collection_but_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_not_collection_but_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_not_collection_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_w_collections", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_default_too_large", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_default_too_small", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_validate_too_long", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_validate_too_short", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_w_invalid_default", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_wrong_types", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_constraint", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_wrong_types", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_constraint", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_set_normal", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_set_unchanged", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_unchanged_already_set", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_unchanged_not_already_set", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test__validate_w_int", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_set_w_int", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_ctor_real_min_max", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_empty_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_not_providing_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_providing_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_providing_schema_but_invalid_fields", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_providing_schema_but_missing_fields", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_bound_field_of_collection_with_choice", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_class_conforms_to_IObject", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_ctor_w_bad_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_getDoc", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_instance_conforms_to_IObject", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_set_allows_IBOAE_subscr_to_replace_value", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_set_emits_IBOAE", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_w_cycles", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_w_cycles_collection_not_valid", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_w_cycles_object_not_valid", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validates_invariants_by_default" ]
[]
Zope Public License 2.1
3,050
[ "src/zope/schema/_bootstrapfields.py", "CHANGES.rst" ]
[ "src/zope/schema/_bootstrapfields.py", "CHANGES.rst" ]
zopefoundation__zope.schema-68
b6cc7d2cff904129c36e867f369c577d85c1ba2e
2018-09-10 22:21:23
b6cc7d2cff904129c36e867f369c577d85c1ba2e
diff --git a/.gitignore b/.gitignore index b90c7ed..a063dff 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ eggs develop-eggs build docs/_build +docs/.doctrees parts *.egg-info .tox diff --git a/CHANGES.rst b/CHANGES.rst index f7abd23..ed38933 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -2,11 +2,23 @@ Changes ========= -4.6.3 (unreleased) +4.7.0 (unreleased) ================== -- Nothing changed yet. +- Make ``WrongType`` have an ``expected_type`` field. +- Add ``NotAnInterface``, an exception derived from ``WrongType`` and + ``SchemaNotProvided`` and raised by the constructor of ``Object`` + and when validation fails for ``InterfaceField``. + +- Give ``SchemaNotProvided`` a ``schema`` field. + +- Give ``WrongContainedType`` an ``errors`` list. + +- Give ``TooShort``, ``TooLong``, ``TooBig`` and ``TooSmall`` a + ``bound`` field and the common superclasses ``LenOutOfBounds``, + ``OrderableOutOfBounds``, respectively, both of which inherit from + ``OutOfBounds``. 4.6.2 (2018-09-10) ================== diff --git a/docs/api.rst b/docs/api.rst index 7877e76..c866ff7 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -123,11 +123,19 @@ Exceptions .. autoexception:: zope.schema.interfaces.ConstraintNotSatisfied .. autoexception:: zope.schema.interfaces.NotAContainer .. autoexception:: zope.schema.interfaces.NotAnIterator +.. autoexception:: zope.schema.interfaces.NotAnInterface +Bounds +~~~~~~ + +.. autoexception:: zope.schema.interfaces.OutOfBounds +.. autoexception:: zope.schema.interfaces.OrderableOutOfBounds +.. autoexception:: zope.schema.interfaces.LenOutOfBounds .. autoexception:: zope.schema.interfaces.TooSmall .. autoexception:: zope.schema.interfaces.TooBig .. autoexception:: zope.schema.interfaces.TooLong .. autoexception:: zope.schema.interfaces.TooShort + .. autoexception:: zope.schema.interfaces.InvalidValue .. autoexception:: zope.schema.interfaces.WrongContainedType .. autoexception:: zope.schema.interfaces.NotUnique @@ -146,7 +154,9 @@ Schema APIs .. autofunction:: zope.schema.getFieldNames .. autofunction:: zope.schema.getFieldNamesInOrder .. autofunction:: zope.schema.getValidationErrors + :noindex: .. autofunction:: zope.schema.getSchemaValidationErrors + :noindex: Fields ====== diff --git a/src/zope/schema/_bootstrapfields.py b/src/zope/schema/_bootstrapfields.py index 622ef8e..12cc2ec 100644 --- a/src/zope/schema/_bootstrapfields.py +++ b/src/zope/schema/_bootstrapfields.py @@ -40,6 +40,7 @@ from zope.schema._bootstrapinterfaces import IFromUnicode from zope.schema._bootstrapinterfaces import IValidatable from zope.schema._bootstrapinterfaces import NotAContainer from zope.schema._bootstrapinterfaces import NotAnIterator +from zope.schema._bootstrapinterfaces import NotAnInterface from zope.schema._bootstrapinterfaces import RequiredMissing from zope.schema._bootstrapinterfaces import SchemaNotCorrectlyImplemented from zope.schema._bootstrapinterfaces import SchemaNotFullyImplemented @@ -918,7 +919,10 @@ class Object(Field): schema = self.schema if not IInterface.providedBy(schema): - raise WrongType + # Note that we don't provide 'self' as the 'field' + # by calling with_field_and_value(): We're not fully constructed, + # we don't want this instance to escape. + raise NotAnInterface(schema, self.__name__) self.schema = schema self.validate_invariants = kw.pop('validate_invariants', True) @@ -947,10 +951,11 @@ class Object(Field): errors = list(schema_error_dict.values()) + invariant_errors exception = SchemaNotCorrectlyImplemented( errors, - self.__name__ + self.__name__, + schema_error_dict, + invariant_errors ).with_field_and_value(self, value) - exception.schema_errors = schema_error_dict - exception.invariant_errors = invariant_errors + try: raise exception finally: diff --git a/src/zope/schema/_bootstrapinterfaces.py b/src/zope/schema/_bootstrapinterfaces.py index eeb4f0e..110aa4f 100644 --- a/src/zope/schema/_bootstrapinterfaces.py +++ b/src/zope/schema/_bootstrapinterfaces.py @@ -17,9 +17,12 @@ from functools import total_ordering import zope.interface from zope.interface import Attribute +from zope.interface.interfaces import IInterface from zope.schema._messageid import _ +# pylint:disable=inherit-non-class,keyword-arg-before-vararg, +# pylint:disable=no-self-argument class StopValidation(Exception): """Raised if the validation is completed early. @@ -76,22 +79,98 @@ class RequiredMissing(ValidationError): class WrongType(ValidationError): __doc__ = _("""Object is of wrong type.""") + #: The type or tuple of types that was expected. + #: + #: .. versionadded:: 4.7.0 + expected_type = None -class TooBig(ValidationError): + def __init__(self, value=None, expected_type=None, name=None, *args): + """ + WrongType(value, expected_type, name) + + .. versionchanged:: 4.7.0 + Added named arguments to the constructor and the `expected_type` + field. + """ + ValidationError.__init__(self, value, expected_type, name, *args) + self.expected_type = expected_type + self.value = value + + +class OutOfBounds(ValidationError): + """ + A value was out of the allowed bounds. + + This is the common superclass for `OrderableOutOfBounds` and + `LenOutOfBounds`, which in turn are the superclasses for `TooBig` + and `TooSmall`, and `TooLong` and `TooShort`, respectively. + + .. versionadded:: 4.7.0 + """ + + #: The value that was exceeded + bound = None + + #: A constant for `violation_direction`. + TOO_LARGE = type('TOO_LARGE', (), {'__slots__': ()})() + + #: A constant for `violation_direction`. + TOO_SMALL = type('TOO_SMALL', (), {'__slots__': ()})() + + #: Whether the value was too large or + #: not large enough. One of the values + #: defined by the constants `TOO_LARGE` + #: or `TOO_SMALL` + violation_direction = None + + def __init__(self, value=None, bound=None, *args): + """ + OutOfBounds(value, bound) + """ + super(OutOfBounds, self).__init__(value, bound, *args) + self.value = value + self.bound = bound + + +class OrderableOutOfBounds(OutOfBounds): + """ + A value was too big or too small in comparison to another value. + + .. versionadded:: 4.7.0 + """ + + +class TooBig(OrderableOutOfBounds): __doc__ = _("""Value is too big""") + violation_direction = OutOfBounds.TOO_LARGE -class TooSmall(ValidationError): + +class TooSmall(OrderableOutOfBounds): __doc__ = _("""Value is too small""") + violation_direction = OutOfBounds.TOO_SMALL + + +class LenOutOfBounds(OutOfBounds): + """ + The length of the value was out of bounds. + + .. versionadded:: 4.7.0 + """ + -class TooLong(ValidationError): +class TooLong(LenOutOfBounds): __doc__ = _("""Value is too long""") + violation_direction = OutOfBounds.TOO_LARGE -class TooShort(ValidationError): + +class TooShort(LenOutOfBounds): __doc__ = _("""Value is too short""") + violation_direction = OutOfBounds.TOO_SMALL + class InvalidValue(ValidationError): __doc__ = _("""Invalid value""") @@ -109,10 +188,25 @@ class NotAnIterator(ValidationError): __doc__ = _("""Not an iterator""") - class WrongContainedType(ValidationError): __doc__ = _("""Wrong contained type""") + #: A collection of exceptions raised when validating + #: the *value*. + #: + #: .. versionadded:: 4.7.0 + errors = () + + def __init__(self, errors=None, name=None, *args): + """ + WrongContainedType(errors, name) + + .. versionchanged:: 4.7.0 + Added named arguments to the constructor, and the `errors` property. + """ + super(WrongContainedType, self).__init__(errors, name, *args) + self.errors = errors + class SchemaNotCorrectlyImplemented(WrongContainedType): __doc__ = _("""An object failed schema or invariant validation.""") @@ -125,6 +219,17 @@ class SchemaNotCorrectlyImplemented(WrongContainedType): #: of the schema. invariant_errors = () + def __init__(self, errors=None, name=None, schema_errors=None, invariant_errors=(), *args): + """ + SchemaNotCorrectlyImplemented(errors, name, schema_errors, invariant_errors) + + .. versionchanged:: 4.7.0 + Added named arguments to the constructor. + """ + super(SchemaNotCorrectlyImplemented, self).__init__(errors, name, *args) + self.schema_errors = schema_errors + self.invariant_errors = invariant_errors + class SchemaNotFullyImplemented(ValidationError): __doc__ = _("""Schema not fully implemented""") @@ -133,6 +238,41 @@ class SchemaNotFullyImplemented(ValidationError): class SchemaNotProvided(ValidationError): __doc__ = _("""Schema not provided""") + #: The interface that the *value* was supposed to provide, + #: but does not. + schema = None + + def __init__(self, schema=None, value=None, *args): + """ + SchemaNotProvided(schema, value) + + .. versionchanged:: 4.7.0 + Added named arguments to the constructor and the `schema` property. + """ + super(SchemaNotProvided, self).__init__(schema, value, *args) + self.schema = schema + self.value = value + + +class NotAnInterface(WrongType, SchemaNotProvided): + """ + Object is not an interface. + + This is a `WrongType` exception for backwards compatibility with + existing ``except`` clauses, but it is raised when + ``IInterface.providedBy`` is not true, so it's also a + `SchemaNotProvided`. The ``expected_type`` field is filled in as + ``IInterface``; this is not actually a `type`, and + ``isinstance(thing, IInterface)`` is always false. + + .. versionadded:: 4.7.0 + """ + + expected_type = IInterface + + def __init__(self, value, name): + super(NotAnInterface, self).__init__(value, IInterface, name) + class IFromUnicode(zope.interface.Interface): """Parse a unicode string to a value @@ -188,8 +328,10 @@ class IValidatable(zope.interface.Interface): with the additional constraint. """ + class NO_VALUE(object): def __repr__(self): # pragma: no cover return '<NO_VALUE>' + NO_VALUE = NO_VALUE() diff --git a/src/zope/schema/_field.py b/src/zope/schema/_field.py index f659877..17e270a 100644 --- a/src/zope/schema/_field.py +++ b/src/zope/schema/_field.py @@ -85,6 +85,7 @@ from zope.schema.interfaces import InvalidValue from zope.schema.interfaces import WrongType from zope.schema.interfaces import WrongContainedType from zope.schema.interfaces import NotUnique +from zope.schema.interfaces import NotAnInterface from zope.schema.interfaces import InvalidURI from zope.schema.interfaces import InvalidId from zope.schema.interfaces import InvalidDottedName @@ -552,8 +553,7 @@ class InterfaceField(Field): def _validate(self, value): super(InterfaceField, self)._validate(value) if not IInterface.providedBy(value): - raise WrongType( - "An interface is required", + raise NotAnInterface( value, self.__name__ ).with_field_and_value(self, value) diff --git a/src/zope/schema/interfaces.py b/src/zope/schema/interfaces.py index 26ddf5a..434b2ca 100644 --- a/src/zope/schema/interfaces.py +++ b/src/zope/schema/interfaces.py @@ -41,8 +41,12 @@ from zope.schema._bootstrapinterfaces import IContextAwareDefaultFactory from zope.schema._bootstrapinterfaces import IFromUnicode from zope.schema._bootstrapinterfaces import IValidatable from zope.schema._bootstrapinterfaces import InvalidValue +from zope.schema._bootstrapinterfaces import LenOutOfBounds from zope.schema._bootstrapinterfaces import NotAContainer +from zope.schema._bootstrapinterfaces import NotAnInterface from zope.schema._bootstrapinterfaces import NotAnIterator +from zope.schema._bootstrapinterfaces import OrderableOutOfBounds +from zope.schema._bootstrapinterfaces import OutOfBounds from zope.schema._bootstrapinterfaces import RequiredMissing from zope.schema._bootstrapinterfaces import SchemaNotCorrectlyImplemented from zope.schema._bootstrapinterfaces import SchemaNotFullyImplemented @@ -67,9 +71,13 @@ __all__ = [ 'InvalidId', 'InvalidURI', 'InvalidValue', + 'LenOutOfBounds', 'NotAContainer', + 'NotAnInterface', 'NotAnIterator', 'NotUnique', + 'OrderableOutOfBounds', + 'OutOfBounds', 'RequiredMissing', 'SchemaNotCorrectlyImplemented', 'SchemaNotFullyImplemented', @@ -84,6 +92,7 @@ __all__ = [ 'WrongContainedType', 'WrongType', + # Interfaces 'IASCII', 'IASCIILine',
Exceptions could use some named attributes In our validation code using zope.schema fields, we have lots of things that look like this: ```python try: field.validate(value) except WrongContainedType as e: if e.args and all(isinstance(SchemaNotProvided, v) for v in e.args[0]): # try to adapt except WrongType as e: if len(e.args) == 3: expected_type = e.args[1] # try to adapt except TooLong as e: if len(e.args) == 2: max_size = e.args[1] # Produce a pretty error message ``` All this argument parsing feels fragile. With named attributes, we could document what exceptions are expected to have. We could use keyword arguments (with defaults) in the constructors to maintain compatibility with code that raises bare exceptions or uses different arguments.
zopefoundation/zope.schema
diff --git a/src/zope/schema/tests/test__bootstrapfields.py b/src/zope/schema/tests/test__bootstrapfields.py index 2e89563..11ed308 100644 --- a/src/zope/schema/tests/test__bootstrapfields.py +++ b/src/zope/schema/tests/test__bootstrapfields.py @@ -138,6 +138,111 @@ class OrderableMissingValueMixin(object): self.assertEqual(self.mvm_missing_value, field.missing_value) +class OrderableTestsMixin(object): + + def assertRaisesTooBig(self, field, value): + from zope.schema.interfaces import TooBig + with self.assertRaises(TooBig) as exc: + field.validate(value) + + ex = exc.exception + self.assertEqual(value, ex.value) + self.assertEqual(field.max, ex.bound) + self.assertEqual(TooBig.TOO_LARGE, ex.violation_direction) + + def assertRaisesTooSmall(self, field, value): + from zope.schema.interfaces import TooSmall + with self.assertRaises(TooSmall) as exc: + field.validate(value) + + ex = exc.exception + self.assertEqual(value, ex.value) + self.assertEqual(field.min, ex.bound) + self.assertEqual(TooSmall.TOO_SMALL, ex.violation_direction) + + MIN = 10 + MAX = 20 + VALID = (10, 11, 19, 20) + TOO_SMALL = (9, -10) + TOO_BIG = (21, 22) + + def test_validate_min(self): + field = self._makeOne(min=self.MIN) + for value in self.VALID + self.TOO_BIG: + field.validate(value) + for value in self.TOO_SMALL: + self.assertRaisesTooSmall(field, value) + + def test_validate_max(self): + field = self._makeOne(max=self.MAX) + for value in self.VALID + self.TOO_SMALL: + field.validate(value) + for value in self.TOO_BIG: + self.assertRaisesTooBig(field, value) + + def test_validate_min_and_max(self): + field = self._makeOne(min=self.MIN, max=self.MAX) + for value in self.TOO_SMALL: + self.assertRaisesTooSmall(field, value) + for value in self.VALID: + field.validate(value) + for value in self.TOO_BIG: + self.assertRaisesTooBig(field, value) + + +class LenTestsMixin(object): + + def assertRaisesTooLong(self, field, value): + from zope.schema.interfaces import TooLong + with self.assertRaises(TooLong) as exc: + field.validate(value) + + ex = exc.exception + self.assertEqual(value, ex.value) + self.assertEqual(field.max_length, ex.bound) + self.assertEqual(TooLong.TOO_LARGE, ex.violation_direction) + + def assertRaisesTooShort(self, field, value): + from zope.schema.interfaces import TooShort + with self.assertRaises(TooShort) as exc: + field.validate(value) + + ex = exc.exception + self.assertEqual(value, ex.value) + self.assertEqual(field.min_length, ex.bound) + self.assertEqual(TooShort.TOO_SMALL, ex.violation_direction) + + +class WrongTypeTestsMixin(object): + + def assertRaisesWrongType(self, field_or_meth, expected_type, *args, **kwargs): + from zope.schema.interfaces import WrongType + field = None + with self.assertRaises(WrongType) as exc: + if hasattr(field_or_meth, 'validate'): + field = field_or_meth + field.validate(*args, **kwargs) + else: + field_or_meth(*args, **kwargs) + + ex = exc.exception + self.assertIs(ex.expected_type, expected_type) + if field is not None: + self.assertIs(ex.field, field) + if len(args) == 1 and not kwargs: + # Just a value + self.assertIs(ex.value, args[0]) + if not args and len(kwargs) == 1: + # A single keyword argument + self.assertIs(ex.value, kwargs.popitem()[1]) + + + def assertAllRaiseWrongType(self, field, expected_type, *values): + for value in values: + __traceback_info__ = value + self.assertRaisesWrongType(field, expected_type, value) + + class ValidatedPropertyTests(unittest.TestCase): def _getTargetClass(self): @@ -267,6 +372,7 @@ class DefaultPropertyTests(unittest.TestCase): class FieldTests(EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -505,11 +611,10 @@ class FieldTests(EqualityTestsMixin, self.assertRaises(RequiredMissing, field.validate, missing) def test_validate_wrong_type(self): - from zope.schema._bootstrapinterfaces import WrongType field = self._makeOne(required=True, constraint=lambda x: False) field._type = str - self.assertRaises(WrongType, field.validate, 1) + self.assertRaisesWrongType(field, str, 1) def test_validate_constraint_fails(self): from zope.schema._bootstrapinterfaces import ConstraintNotSatisfied @@ -703,7 +808,8 @@ class OrderableTests(unittest.TestCase): self.assertRaises(TooBig, self._makeOne, max=10, default=11) -class MinMaxLenTests(unittest.TestCase): +class MinMaxLenTests(LenTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._bootstrapfields import MinMaxLen @@ -723,17 +829,16 @@ class MinMaxLenTests(unittest.TestCase): self.assertEqual(mml.max_length, None) def test_validate_too_short(self): - from zope.schema._bootstrapinterfaces import TooShort mml = self._makeOne(min_length=1) - self.assertRaises(TooShort, mml._validate, ()) + self.assertRaisesTooShort(mml, ()) def test_validate_too_long(self): - from zope.schema._bootstrapinterfaces import TooLong mml = self._makeOne(max_length=2) - self.assertRaises(TooLong, mml._validate, (0, 1, 2)) + self.assertRaisesTooLong(mml, (0, 1, 2)) class TextTests(EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -750,18 +855,19 @@ class TextTests(EqualityTestsMixin, self.assertEqual(txt._type, text_type) def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType - field = self._makeOne() - self.assertRaises(WrongType, field.validate, b'') - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) + self.assertAllRaiseWrongType( + field, + field._type, + b'', + 1, + 1.0, + (), + [], + {}, + set(), + frozenset(), + object()) def test_validate_w_invalid_default(self): @@ -786,11 +892,9 @@ class TextTests(EqualityTestsMixin, self.assertRaises(RequiredMissing, field.validate, None) def test_fromUnicode_miss(self): - from zope.schema._bootstrapinterfaces import WrongType - deadbeef = b'DEADBEEF' txt = self._makeOne() - self.assertRaises(WrongType, txt.fromUnicode, deadbeef) + self.assertRaisesWrongType(txt.fromUnicode, txt._type, deadbeef) def test_fromUnicode_hit(self): @@ -800,6 +904,7 @@ class TextTests(EqualityTestsMixin, class TextLineTests(EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -811,18 +916,19 @@ class TextLineTests(EqualityTestsMixin, return ITextLine def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType - field = self._makeOne() - self.assertRaises(WrongType, field.validate, b'') - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) + self.assertAllRaiseWrongType( + field, + field._type, + b'', + 1, + 1.0, + (), + [], + {}, + set(), + frozenset(), + object()) def test_validate_not_required(self): @@ -848,6 +954,7 @@ class TextLineTests(EqualityTestsMixin, class PasswordTests(EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -889,12 +996,10 @@ class PasswordTests(EqualityTestsMixin, self.assertRaises(RequiredMissing, field.validate, None) def test_validate_unchanged_not_already_set(self): - from zope.schema._bootstrapinterfaces import WrongType klass = self._getTargetClass() inst = DummyInst() pw = self._makeOne(__name__='password').bind(inst) - self.assertRaises(WrongType, - pw.validate, klass.UNCHANGED_PASSWORD) + self.assertRaisesWrongType(pw, pw._type, klass.UNCHANGED_PASSWORD) def test_validate_unchanged_already_set(self): klass = self._getTargetClass() @@ -957,6 +1062,7 @@ class BoolTests(EqualityTestsMixin, class NumberTests(EqualityTestsMixin, OrderableMissingValueMixin, + OrderableTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -988,7 +1094,8 @@ class ComplexTests(NumberTests): from zope.schema.interfaces import IComplex return IComplex -class RealTests(NumberTests): +class RealTests(WrongTypeTestsMixin, + NumberTests): def _getTargetClass(self): from zope.schema._bootstrapfields import Real @@ -999,22 +1106,18 @@ class RealTests(NumberTests): return IReal def test_ctor_real_min_max(self): - from zope.schema.interfaces import WrongType - from zope.schema.interfaces import TooSmall - from zope.schema.interfaces import TooBig from fractions import Fraction - with self.assertRaises(WrongType): - self._makeOne(min='') - with self.assertRaises(WrongType): - self._makeOne(max='') + self.assertRaisesWrongType(self._makeOne, self._getTargetClass()._type, min='') + self.assertRaisesWrongType(self._makeOne, self._getTargetClass()._type, max='') field = self._makeOne(min=Fraction(1, 2), max=2) field.validate(1.0) field.validate(2.0) - self.assertRaises(TooSmall, field.validate, 0) - self.assertRaises(TooSmall, field.validate, 0.4) - self.assertRaises(TooBig, field.validate, 2.1) + self.assertRaisesTooSmall(field, 0) + self.assertRaisesTooSmall(field, 0.4) + self.assertRaisesTooBig(field, 2.1) + class RationalTests(NumberTests): @@ -1052,34 +1155,7 @@ class IntegralTests(RationalTests): field.validate(-1) self.assertRaises(RequiredMissing, field.validate, None) - def test_validate_min(self): - from zope.schema.interfaces import TooSmall - field = self._makeOne(min=10) - field.validate(10) - field.validate(20) - self.assertRaises(TooSmall, field.validate, 9) - self.assertRaises(TooSmall, field.validate, -10) - - def test_validate_max(self): - from zope.schema.interfaces import TooBig - field = self._makeOne(max=10) - field.validate(5) - field.validate(9) - field.validate(10) - self.assertRaises(TooBig, field.validate, 11) - self.assertRaises(TooBig, field.validate, 20) - def test_validate_min_and_max(self): - from zope.schema.interfaces import TooBig - from zope.schema.interfaces import TooSmall - field = self._makeOne(min=0, max=10) - field.validate(0) - field.validate(5) - field.validate(10) - self.assertRaises(TooSmall, field.validate, -10) - self.assertRaises(TooSmall, field.validate, -1) - self.assertRaises(TooBig, field.validate, 11) - self.assertRaises(TooBig, field.validate, 20) def test_fromUnicode_miss(self): @@ -1113,6 +1189,7 @@ class IntTests(IntegralTests): class ObjectTests(EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): def setUp(self): @@ -1142,10 +1219,10 @@ class ObjectTests(EqualityTestsMixin, return InterfaceClass('ISchema', (Interface,), kw) def _getErrors(self, f, *args, **kw): - from zope.schema.interfaces import WrongContainedType - with self.assertRaises(WrongContainedType) as e: + from zope.schema.interfaces import SchemaNotCorrectlyImplemented + with self.assertRaises(SchemaNotCorrectlyImplemented) as e: f(*args, **kw) - return e.exception.args[0] + return e.exception.errors def _makeCycles(self): from zope.interface import Interface @@ -1205,8 +1282,8 @@ class ObjectTests(EqualityTestsMixin, verifyObject(IObject, self._makeOne()) def test_ctor_w_bad_schema(self): - from zope.schema.interfaces import WrongType - self.assertRaises(WrongType, self._makeOne, object()) + from zope.interface.interfaces import IInterface + self.assertRaisesWrongType(self._makeOne, IInterface, object()) def test_validate_not_required(self): schema = self._makeSchema() @@ -1355,19 +1432,39 @@ class ObjectTests(EqualityTestsMixin, field.validate(unit) # doesn't raise def test_validate_w_cycles_object_not_valid(self): - from zope.schema.interfaces import WrongContainedType + from zope.schema.interfaces import SchemaNotCorrectlyImplemented + from zope.schema.interfaces import SchemaNotProvided IUnit, Person, Unit = self._makeCycles() field = self._makeOne(schema=IUnit) person1 = Person(None) person2 = Person(None) - person3 = Person(object()) - unit = Unit(person3, [person1, person2]) + boss_unit = object() + boss = Person(boss_unit) + unit = Unit(boss, [person1, person2]) person1.unit = unit person2.unit = unit - self.assertRaises(WrongContainedType, field.validate, unit) + with self.assertRaises(SchemaNotCorrectlyImplemented) as exc: + field.validate(unit) + + ex = exc.exception + self.assertEqual(1, len(ex.schema_errors)) + self.assertEqual(1, len(ex.errors)) + self.assertEqual(0, len(ex.invariant_errors)) + + boss_error = ex.schema_errors['boss'] + self.assertIsInstance(boss_error, SchemaNotCorrectlyImplemented) + + self.assertEqual(1, len(boss_error.schema_errors)) + self.assertEqual(1, len(boss_error.errors)) + self.assertEqual(0, len(boss_error.invariant_errors)) + + unit_error = boss_error.schema_errors['unit'] + self.assertIsInstance(unit_error, SchemaNotProvided) + self.assertIs(IUnit, unit_error.schema) + self.assertIs(boss_unit, unit_error.value) def test_validate_w_cycles_collection_not_valid(self): - from zope.schema.interfaces import WrongContainedType + from zope.schema.interfaces import SchemaNotCorrectlyImplemented IUnit, Person, Unit = self._makeCycles() field = self._makeOne(schema=IUnit) person1 = Person(None) @@ -1376,7 +1473,7 @@ class ObjectTests(EqualityTestsMixin, unit = Unit(person1, [person2, person3]) person1.unit = unit person2.unit = unit - self.assertRaises(WrongContainedType, field.validate, unit) + self.assertRaises(SchemaNotCorrectlyImplemented, field.validate, unit) def test_set_emits_IBOAE(self): from zope.event import subscribers @@ -1517,7 +1614,10 @@ class ObjectTests(EqualityTestsMixin, self.assertIs(field.schema, IValueType) # Non implementation is bad - self.assertRaises(SchemaNotProvided, field.validate, object()) + with self.assertRaises(SchemaNotProvided) as exc: + field.validate(object()) + + self.assertIs(IValueType, exc.exception.schema) # Actual implementation works @interface.implementer(IValueType) @@ -1536,6 +1636,7 @@ class ObjectTests(EqualityTestsMixin, from zope.schema.fieldproperty import FieldProperty from zope.schema.interfaces import IContextSourceBinder from zope.schema.interfaces import WrongContainedType + from zope.schema.interfaces import ConstraintNotSatisfied from zope.schema.interfaces import SchemaNotCorrectlyImplemented from zope.schema.vocabulary import SimpleVocabulary @@ -1577,12 +1678,28 @@ class ObjectTests(EqualityTestsMixin, # Ranges outside the context fail bad_choices = Choices({1, 8}) - with self.assertRaises(WrongContainedType) as exc: + with self.assertRaises(SchemaNotCorrectlyImplemented) as exc: IFavorites['fav'].validate(bad_choices) e = exc.exception self.assertEqual(IFavorites['fav'], e.field) self.assertEqual(bad_choices, e.value) + self.assertEqual(1, len(e.schema_errors)) + self.assertEqual(0, len(e.invariant_errors)) + self.assertEqual(1, len(e.errors)) + + fav_error = e.schema_errors['choices'] + self.assertIs(fav_error, e.errors[0]) + self.assertIsInstance(fav_error, WrongContainedType) + self.assertNotIsInstance(fav_error, SchemaNotCorrectlyImplemented) + # The field is not actually equal to the one in the interface + # anymore because its bound. + self.assertEqual('choices', fav_error.field.__name__) + self.assertEqual(bad_choices, fav_error.field.context) + self.assertEqual({1, 8}, fav_error.value) + self.assertEqual(1, len(fav_error.errors)) + + self.assertIsInstance(fav_error.errors[0], ConstraintNotSatisfied) # Validation through field property favorites = Favorites() diff --git a/src/zope/schema/tests/test__bootstrapinterfaces.py b/src/zope/schema/tests/test__bootstrapinterfaces.py index 73e4dd1..77815c0 100644 --- a/src/zope/schema/tests/test__bootstrapinterfaces.py +++ b/src/zope/schema/tests/test__bootstrapinterfaces.py @@ -48,11 +48,26 @@ class ValidationErrorTests(unittest.TestCase): def test___eq___no_args(self): ve = self._makeOne() - self.assertEqual(ve == object(), False) + self.assertNotEqual(ve, object()) + self.assertNotEqual(object(), ve) def test___eq___w_args(self): left = self._makeOne('abc') right = self._makeOne('def') - self.assertEqual(left == right, False) - self.assertEqual(left == left, True) - self.assertEqual(right == right, True) + self.assertNotEqual(left, right) + self.assertNotEqual(right, left) + self.assertEqual(left, left) + self.assertEqual(right, right) + + +class TestOutOfBounds(unittest.TestCase): + + def _getTargetClass(self): + from zope.schema._bootstrapinterfaces import OutOfBounds + return OutOfBounds + + def test_TOO_LARGE_repr(self): + self.assertIn('TOO_LARGE', repr(self._getTargetClass().TOO_LARGE)) + + def test_TOO_SMALL_repr(self): + self.assertIn('TOO_SMALL', repr(self._getTargetClass().TOO_SMALL)) diff --git a/src/zope/schema/tests/test__field.py b/src/zope/schema/tests/test__field.py index c176326..a5b2c7b 100644 --- a/src/zope/schema/tests/test__field.py +++ b/src/zope/schema/tests/test__field.py @@ -16,9 +16,13 @@ import decimal import doctest import unittest -from zope.schema.tests.test__bootstrapfields import OrderableMissingValueMixin from zope.schema.tests.test__bootstrapfields import EqualityTestsMixin +from zope.schema.tests.test__bootstrapfields import LenTestsMixin +from zope.schema.tests.test__bootstrapfields import OrderableMissingValueMixin +from zope.schema.tests.test__bootstrapfields import OrderableTestsMixin +from zope.schema.tests.test__bootstrapfields import WrongTypeTestsMixin +from zope.schema.tests.test__bootstrapfields import NumberTests # pylint:disable=protected-access # pylint:disable=too-many-lines @@ -26,7 +30,9 @@ from zope.schema.tests.test__bootstrapfields import EqualityTestsMixin # pylint:disable=no-member # pylint:disable=blacklisted-name -class BytesTests(EqualityTestsMixin, unittest.TestCase): +class BytesTests(EqualityTestsMixin, + WrongTypeTestsMixin, + unittest.TestCase): def _getTargetClass(self): from zope.schema._field import Bytes @@ -37,18 +43,19 @@ class BytesTests(EqualityTestsMixin, unittest.TestCase): return IBytes def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType - field = self._makeOne() - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) + self.assertAllRaiseWrongType( + field, + field._type, + u'', + 1, + 1.0, + (), + [], + {}, + set(), + frozenset(), + object()) def test_validate_w_invalid_default(self): @@ -84,6 +91,7 @@ class BytesTests(EqualityTestsMixin, unittest.TestCase): class ASCIITests(EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -95,18 +103,20 @@ class ASCIITests(EqualityTestsMixin, return IASCII def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType from zope.schema._compat import non_native_string field = self._makeOne() - self.assertRaises(WrongType, field.validate, non_native_string('')) - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) + self.assertAllRaiseWrongType( + field, + field._type, + non_native_string(''), + 1, + 1.0, + (), + [], + {}, + set(), + frozenset(), + object()) def test__validate_empty(self): asc = self._makeOne() @@ -129,6 +139,7 @@ class ASCIITests(EqualityTestsMixin, class BytesLineTests(EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -140,18 +151,19 @@ class BytesLineTests(EqualityTestsMixin, return IBytesLine def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType - field = self._makeOne() - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) + self.assertAllRaiseWrongType( + field, + field._type, + u'', + 1, + 1.0, + (), + [], + {}, + set(), + frozenset(), + object()) def test_validate_not_required(self): @@ -181,6 +193,7 @@ class BytesLineTests(EqualityTestsMixin, class ASCIILineTests(EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -192,18 +205,20 @@ class ASCIILineTests(EqualityTestsMixin, return IASCIILine def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType from zope.schema._compat import non_native_string field = self._makeOne() - self.assertRaises(WrongType, field.validate, non_native_string('')) - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) + self.assertAllRaiseWrongType( + field, + field._type, + non_native_string(''), + 1, + 1.0, + (), + [], + {}, + set(), + frozenset(), + object()) def test_validate_not_required(self): from zope.schema.interfaces import InvalidValue @@ -232,12 +247,18 @@ class ASCIILineTests(EqualityTestsMixin, self.assertEqual(field.constraint('abc\ndef'), False) -class FloatTests(OrderableMissingValueMixin, EqualityTestsMixin, - unittest.TestCase): +class FloatTests(NumberTests): mvm_missing_value = -1.0 mvm_default = 0.0 + MIN = float(NumberTests.MIN) + MAX = float(NumberTests.MAX) + VALID = tuple(float(x) for x in NumberTests.VALID) + TOO_SMALL = tuple(float(x) for x in NumberTests.TOO_SMALL) + TOO_BIG = tuple(float(x) for x in NumberTests.TOO_BIG) + + def _getTargetClass(self): from zope.schema._field import Float return Float @@ -261,34 +282,6 @@ class FloatTests(OrderableMissingValueMixin, EqualityTestsMixin, field.validate(1000.0003) self.assertRaises(RequiredMissing, field.validate, None) - def test_validate_min(self): - from zope.schema.interfaces import TooSmall - field = self._makeOne(min=10.5) - field.validate(10.6) - field.validate(20.2) - self.assertRaises(TooSmall, field.validate, -9.0) - self.assertRaises(TooSmall, field.validate, 10.4) - - def test_validate_max(self): - from zope.schema.interfaces import TooBig - field = self._makeOne(max=10.5) - field.validate(5.3) - field.validate(-9.1) - self.assertRaises(TooBig, field.validate, 10.51) - self.assertRaises(TooBig, field.validate, 20.7) - - def test_validate_min_and_max(self): - from zope.schema.interfaces import TooBig - from zope.schema.interfaces import TooSmall - field = self._makeOne(min=-0.6, max=10.1) - field.validate(0.0) - field.validate(-0.03) - field.validate(10.0001) - self.assertRaises(TooSmall, field.validate, -10.0) - self.assertRaises(TooSmall, field.validate, -1.6) - self.assertRaises(TooBig, field.validate, 11.45) - self.assertRaises(TooBig, field.validate, 20.02) - def test_fromUnicode_miss(self): flt = self._makeOne() @@ -304,12 +297,17 @@ class FloatTests(OrderableMissingValueMixin, EqualityTestsMixin, self.assertEqual(flt.fromUnicode(u'1.23e6'), 1230000.0) -class DecimalTests(OrderableMissingValueMixin, EqualityTestsMixin, - unittest.TestCase): +class DecimalTests(NumberTests): mvm_missing_value = decimal.Decimal("-1") mvm_default = decimal.Decimal("0") + MIN = decimal.Decimal(NumberTests.MIN) + MAX = decimal.Decimal(NumberTests.MAX) + VALID = tuple(decimal.Decimal(x) for x in NumberTests.VALID) + TOO_SMALL = tuple(decimal.Decimal(x) for x in NumberTests.TOO_SMALL) + TOO_BIG = tuple(decimal.Decimal(x) for x in NumberTests.TOO_BIG) + def _getTargetClass(self): from zope.schema._field import Decimal return Decimal @@ -333,46 +331,6 @@ class DecimalTests(OrderableMissingValueMixin, EqualityTestsMixin, field.validate(decimal.Decimal("1000.0003")) self.assertRaises(RequiredMissing, field.validate, None) - def test_validate_min(self): - from zope.schema.interfaces import TooSmall - field = self._makeOne(min=decimal.Decimal("10.5")) - field.validate(decimal.Decimal("10.6")) - field.validate(decimal.Decimal("20.2")) - self.assertRaises(TooSmall, field.validate, decimal.Decimal("-9.0")) - self.assertRaises(TooSmall, field.validate, decimal.Decimal("10.4")) - - def test_validate_max(self): - from zope.schema.interfaces import TooBig - field = self._makeOne(max=decimal.Decimal("10.5")) - field.validate(decimal.Decimal("5.3")) - field.validate(decimal.Decimal("-9.1")) - self.assertRaises(TooBig, field.validate, decimal.Decimal("10.51")) - self.assertRaises(TooBig, field.validate, decimal.Decimal("20.7")) - - def test_validate_min_and_max(self): - from zope.schema.interfaces import TooBig - from zope.schema.interfaces import TooSmall - field = self._makeOne(min=decimal.Decimal("-0.6"), - max=decimal.Decimal("10.1")) - field.validate(decimal.Decimal("0.0")) - field.validate(decimal.Decimal("-0.03")) - field.validate(decimal.Decimal("10.0001")) - self.assertRaises(TooSmall, field.validate, decimal.Decimal("-10.0")) - with self.assertRaises(TooSmall) as exc: - field.validate(decimal.Decimal("-1.6")) - - too_small = exc.exception - self.assertIs(too_small.field, field) - self.assertEqual(too_small.value, decimal.Decimal("-1.6")) - - self.assertRaises(TooBig, field.validate, decimal.Decimal("11.45")) - with self.assertRaises(TooBig) as exc: - field.validate(decimal.Decimal("20.02")) - - too_big = exc.exception - self.assertIs(too_big.field, field) - self.assertEqual(too_big.value, decimal.Decimal("20.02")) - def test_fromUnicode_miss(self): from zope.schema.interfaces import ValidationError flt = self._makeOne() @@ -395,7 +353,10 @@ class DecimalTests(OrderableMissingValueMixin, EqualityTestsMixin, self.assertEqual(flt.fromUnicode(u'12345.6'), Decimal('12345.6')) -class DatetimeTests(OrderableMissingValueMixin, EqualityTestsMixin, +class DatetimeTests(OrderableMissingValueMixin, + OrderableTestsMixin, + EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): mvm_missing_value = datetime.datetime.now() @@ -411,21 +372,20 @@ class DatetimeTests(OrderableMissingValueMixin, EqualityTestsMixin, def test_validate_wrong_types(self): from datetime import date - from zope.schema.interfaces import WrongType - - field = self._makeOne() - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) - self.assertRaises(WrongType, field.validate, date.today()) + self.assertAllRaiseWrongType( + field, + field._type, + u'', + 1, + 1.0, + (), + [], + {}, + set(), + frozenset(), + object(), + date.today()) def test_validate_not_required(self): field = self._makeOne(required=False) @@ -437,42 +397,17 @@ class DatetimeTests(OrderableMissingValueMixin, EqualityTestsMixin, field = self._makeOne(required=True) self.assertRaises(RequiredMissing, field.validate, None) - def test_validate_w_min(self): - from zope.schema.interfaces import TooSmall - d1 = datetime.datetime(2000, 10, 1) - d2 = datetime.datetime(2000, 10, 2) - field = self._makeOne(min=d1) - field.validate(d1) # doesn't raise - field.validate(d2) # doesn't raise - self.assertRaises(TooSmall, field.validate, datetime.datetime(2000, 9, 30)) - - def test_validate_w_max(self): - from zope.schema.interfaces import TooBig - d1 = datetime.datetime(2000, 10, 1) - d2 = datetime.datetime(2000, 10, 2) - d3 = datetime.datetime(2000, 10, 3) - field = self._makeOne(max=d2) - field.validate(d1) # doesn't raise - field.validate(d2) # doesn't raise - self.assertRaises(TooBig, field.validate, d3) - - def test_validate_w_min_and_max(self): - from zope.schema.interfaces import TooBig - from zope.schema.interfaces import TooSmall - d1 = datetime.datetime(2000, 10, 1) - d2 = datetime.datetime(2000, 10, 2) - d3 = datetime.datetime(2000, 10, 3) - d4 = datetime.datetime(2000, 10, 4) - d5 = datetime.datetime(2000, 10, 5) - field = self._makeOne(min=d2, max=d4) - field.validate(d2) # doesn't raise - field.validate(d3) # doesn't raise - field.validate(d4) # doesn't raise - self.assertRaises(TooSmall, field.validate, d1) - self.assertRaises(TooBig, field.validate, d5) - - -class DateTests(OrderableMissingValueMixin, EqualityTestsMixin, + MIN = datetime.datetime(2000, 10, 1) + MAX = datetime.datetime(2000, 10, 4) + TOO_BIG = tuple((datetime.datetime(2000, 10, x) for x in (5, 6))) + TOO_SMALL = tuple((datetime.datetime(2000, 9, x) for x in (5, 6))) + VALID = tuple((datetime.datetime(2000, 10, x) for x in (1, 2, 3, 4))) + + +class DateTests(OrderableMissingValueMixin, + OrderableTestsMixin, + EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): mvm_missing_value = datetime.date.today() @@ -487,26 +422,20 @@ class DateTests(OrderableMissingValueMixin, EqualityTestsMixin, return IDate def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType - field = self._makeOne() - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) - now = datetime.datetime.now() - with self.assertRaises(WrongType) as exc: - field.validate(now) - - wrong_type = exc.exception - self.assertIs(wrong_type.field, field) - self.assertIs(wrong_type.value, now) + self.assertAllRaiseWrongType( + field, + field._type, + u'', + 1, + 1.0, + (), + [], + {}, + set(), + frozenset(), + object(), + datetime.datetime.now()) def test_validate_not_required(self): from datetime import date @@ -520,46 +449,16 @@ class DateTests(OrderableMissingValueMixin, EqualityTestsMixin, field.validate(datetime.datetime.now().date()) self.assertRaises(RequiredMissing, field.validate, None) - def test_validate_w_min(self): - from datetime import date - from zope.schema.interfaces import TooSmall - d1 = date(2000, 10, 1) - d2 = date(2000, 10, 2) - field = self._makeOne(min=d1) - field.validate(d1) - field.validate(d2) - field.validate(datetime.datetime.now().date()) - self.assertRaises(TooSmall, field.validate, date(2000, 9, 30)) + MIN = datetime.date(2000, 10, 1) + MAX = datetime.date(2000, 10, 4) + TOO_BIG = tuple((datetime.date(2000, 10, x) for x in (5, 6))) + TOO_SMALL = tuple((datetime.date(2000, 9, x) for x in (5, 6))) + VALID = tuple((datetime.date(2000, 10, x) for x in (1, 2, 3, 4))) - def test_validate_w_max(self): - from datetime import date - from zope.schema.interfaces import TooBig - d1 = date(2000, 10, 1) - d2 = date(2000, 10, 2) - d3 = date(2000, 10, 3) - field = self._makeOne(max=d2) - field.validate(d1) - field.validate(d2) - self.assertRaises(TooBig, field.validate, d3) - - def test_validate_w_min_and_max(self): - from datetime import date - from zope.schema.interfaces import TooBig - from zope.schema.interfaces import TooSmall - d1 = date(2000, 10, 1) - d2 = date(2000, 10, 2) - d3 = date(2000, 10, 3) - d4 = date(2000, 10, 4) - d5 = date(2000, 10, 5) - field = self._makeOne(min=d2, max=d4) - field.validate(d2) - field.validate(d3) - field.validate(d4) - self.assertRaises(TooSmall, field.validate, d1) - self.assertRaises(TooBig, field.validate, d5) - - -class TimedeltaTests(OrderableMissingValueMixin, EqualityTestsMixin, + +class TimedeltaTests(OrderableMissingValueMixin, + OrderableTestsMixin, + EqualityTestsMixin, unittest.TestCase): mvm_missing_value = datetime.timedelta(minutes=15) @@ -586,45 +485,16 @@ class TimedeltaTests(OrderableMissingValueMixin, EqualityTestsMixin, field.validate(timedelta(minutes=15)) self.assertRaises(RequiredMissing, field.validate, None) - def test_validate_min(self): - from datetime import timedelta - from zope.schema.interfaces import TooSmall - t1 = timedelta(hours=2) - t2 = timedelta(hours=3) - field = self._makeOne(min=t1) - field.validate(t1) - field.validate(t2) - self.assertRaises(TooSmall, field.validate, timedelta(hours=1)) - - def test_validate_max(self): - from datetime import timedelta - from zope.schema.interfaces import TooBig - t1 = timedelta(minutes=1) - t2 = timedelta(minutes=2) - t3 = timedelta(minutes=3) - field = self._makeOne(max=t2) - field.validate(t1) - field.validate(t2) - self.assertRaises(TooBig, field.validate, t3) - - def test_validate_min_and_max(self): - from datetime import timedelta - from zope.schema.interfaces import TooBig - from zope.schema.interfaces import TooSmall - t1 = timedelta(days=1) - t2 = timedelta(days=2) - t3 = timedelta(days=3) - t4 = timedelta(days=4) - t5 = timedelta(days=5) - field = self._makeOne(min=t2, max=t4) - field.validate(t2) - field.validate(t3) - field.validate(t4) - self.assertRaises(TooSmall, field.validate, t1) - self.assertRaises(TooBig, field.validate, t5) - - -class TimeTests(OrderableMissingValueMixin, EqualityTestsMixin, + MIN = datetime.timedelta(minutes=NumberTests.MIN) + MAX = datetime.timedelta(minutes=NumberTests.MAX) + VALID = tuple(datetime.timedelta(minutes=x) for x in NumberTests.VALID) + TOO_SMALL = tuple(datetime.timedelta(minutes=x) for x in NumberTests.TOO_SMALL) + TOO_BIG = tuple(datetime.timedelta(x) for x in NumberTests.TOO_BIG) + + +class TimeTests(OrderableMissingValueMixin, + OrderableTestsMixin, + EqualityTestsMixin, unittest.TestCase): mvm_missing_value = datetime.time(12, 15, 37) @@ -651,43 +521,11 @@ class TimeTests(OrderableMissingValueMixin, EqualityTestsMixin, field.validate(time(12, 15, 37)) self.assertRaises(RequiredMissing, field.validate, None) - def test_validate_min(self): - from datetime import time - from zope.schema.interfaces import TooSmall - t1 = time(12, 15, 37) - t2 = time(12, 25, 18) - t3 = time(12, 42, 43) - field = self._makeOne(min=t2) - field.validate(t2) - field.validate(t3) - self.assertRaises(TooSmall, field.validate, t1) - - def test_validate_max(self): - from datetime import time - from zope.schema.interfaces import TooBig - t1 = time(12, 15, 37) - t2 = time(12, 25, 18) - t3 = time(12, 42, 43) - field = self._makeOne(max=t2) - field.validate(t1) - field.validate(t2) - self.assertRaises(TooBig, field.validate, t3) - - def test_validate_min_and_max(self): - from datetime import time - from zope.schema.interfaces import TooBig - from zope.schema.interfaces import TooSmall - t1 = time(12, 15, 37) - t2 = time(12, 25, 18) - t3 = time(12, 42, 43) - t4 = time(13, 7, 12) - t5 = time(14, 22, 9) - field = self._makeOne(min=t2, max=t4) - field.validate(t2) - field.validate(t3) - field.validate(t4) - self.assertRaises(TooSmall, field.validate, t1) - self.assertRaises(TooBig, field.validate, t5) + MIN = datetime.time(12, 10, 1) + MAX = datetime.time(12, 10, 4) + TOO_BIG = tuple((datetime.time(12, 10, x) for x in (5, 6))) + TOO_SMALL = tuple((datetime.time(12, 9, x) for x in (5, 6))) + VALID = tuple((datetime.time(12, 10, x) for x in (1, 2, 3, 4))) class ChoiceTests(EqualityTestsMixin, @@ -983,6 +821,7 @@ class ChoiceTests(EqualityTestsMixin, class URITests(EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -997,15 +836,18 @@ class URITests(EqualityTestsMixin, from zope.schema.interfaces import WrongType from zope.schema._compat import non_native_string field = self._makeOne() - self.assertRaises(WrongType, field.validate, non_native_string('')) - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) + self.assertAllRaiseWrongType( + field, + field._type, + non_native_string(''), + 1, + 1.0, + (), + [], + {}, + set(), + frozenset(), + object()) def test_validate_not_required(self): field = self._makeOne(required=False) @@ -1052,6 +894,7 @@ class URITests(EqualityTestsMixin, class DottedNameTests(EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -1082,18 +925,20 @@ class DottedNameTests(EqualityTestsMixin, self.assertEqual(dotted.max_dots, 2) def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType from zope.schema._compat import non_native_string field = self._makeOne() - self.assertRaises(WrongType, field.validate, non_native_string('')) - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) + self.assertAllRaiseWrongType( + field, + field._type, + non_native_string(''), + 1, + 1.0, + (), + [], + {}, + set(), + frozenset(), + object()) def test_validate_not_required(self): field = self._makeOne(required=False) @@ -1162,6 +1007,7 @@ class DottedNameTests(EqualityTestsMixin, class IdTests(EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -1176,21 +1022,18 @@ class IdTests(EqualityTestsMixin, from zope.schema.interfaces import WrongType from zope.schema._compat import non_native_string field = self._makeOne() - self.assertRaises(WrongType, field.validate, non_native_string('')) - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - bad_value = object() - with self.assertRaises(WrongType) as exc: - field.validate(bad_value) - - wrong = exc.exception - self.assertIs(wrong.field, field) - self.assertEqual(wrong.value, bad_value) + self.assertAllRaiseWrongType( + field, + field._type, + non_native_string(''), + 1, + 1.0, + (), + [], + {}, + set(), + frozenset(), + object()) def test_validate_not_required(self): field = self._makeOne(required=False) @@ -1245,6 +1088,7 @@ class IdTests(EqualityTestsMixin, class InterfaceFieldTests(EqualityTestsMixin, + WrongTypeTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -1256,28 +1100,24 @@ class InterfaceFieldTests(EqualityTestsMixin, return IInterfaceField def test_validate_wrong_types(self): + from zope.interface.interfaces import IInterface from datetime import date - from zope.schema.interfaces import WrongType - field = self._makeOne() - with self.assertRaises(WrongType) as exc: - field.validate(u'') - - wrong = exc.exception - self.assertIs(wrong.field, field) - self.assertEqual(wrong.value, u'') - - self.assertRaises(WrongType, field.validate, b'') - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) - self.assertRaises(WrongType, field.validate, date.today()) + self.assertAllRaiseWrongType( + field, + IInterface, + u'', + b'', + 1, + 1.0, + (), + [], + {}, + set(), + frozenset(), + object(), + date.today()) def test_validate_not_required(self): from zope.interface import Interface @@ -1302,6 +1142,7 @@ class InterfaceFieldTests(EqualityTestsMixin, class CollectionTests(EqualityTestsMixin, + LenTestsMixin, unittest.TestCase): _DEFAULT_UNIQUE = False @@ -1320,6 +1161,7 @@ class CollectionTests(EqualityTestsMixin, def test_schema_defined_by_subclass(self): from zope import interface from zope.schema import Object + from zope.schema.interfaces import SchemaNotProvided from zope.schema.interfaces import WrongContainedType class IValueType(interface.Interface): @@ -1337,7 +1179,14 @@ class CollectionTests(EqualityTestsMixin, field.validate(self._makeCollection([])) # Collection with a non-implemented object is bad - self.assertRaises(WrongContainedType, field.validate, self._makeCollection([object()])) + with self.assertRaises(WrongContainedType) as exc: + field.validate(self._makeCollection([object()])) + + ex = exc.exception + self.assertIs(ex.__class__, WrongContainedType) + self.assertEqual(1, len(ex.errors)) + self.assertIsInstance(ex.errors[0], SchemaNotProvided) + self.assertIs(ex.errors[0].schema, IValueType) # Actual implementation works @interface.implementer(IValueType) @@ -1385,6 +1234,7 @@ class CollectionTests(EqualityTestsMixin, def test__validate_wrong_contained_type(self): from zope.schema.interfaces import WrongContainedType + from zope.schema.interfaces import WrongType from zope.schema._bootstrapfields import Text text = Text() absc = self._makeOne(text) @@ -1394,6 +1244,10 @@ class CollectionTests(EqualityTestsMixin, wct = exc.exception self.assertIs(wct.field, absc) self.assertEqual(wct.value, self._makeCollection([1])) + self.assertIs(wct.__class__, WrongContainedType) + self.assertEqual(1, len(wct.errors)) + self.assertIsInstance(wct.errors[0], WrongType) + self.assertIs(wct.errors[0].expected_type, text._type) def test__validate_miss_uniqueness(self): from zope.schema.interfaces import NotUnique @@ -1411,30 +1265,26 @@ class CollectionTests(EqualityTestsMixin, [u'a', u'a']) def test_validate_min_length(self): - from zope.schema.interfaces import TooShort field = self._makeOne(min_length=2) field.validate(self._makeCollection((1, 2))) field.validate(self._makeCollection((1, 2, 3))) - self.assertRaises(TooShort, field.validate, self._makeCollection()) - self.assertRaises(TooShort, field.validate, self._makeCollection((1,))) + self.assertRaisesTooShort(field, self._makeCollection()) + self.assertRaisesTooShort(field, self._makeCollection((1,))) def test_validate_max_length(self): - from zope.schema.interfaces import TooLong field = self._makeOne(max_length=2) field.validate(self._makeCollection()) field.validate(self._makeCollection((1,))) field.validate(self._makeCollection((1, 2))) - self.assertRaises(TooLong, field.validate, self._makeCollection((1, 2, 3, 4))) - self.assertRaises(TooLong, field.validate, self._makeCollection((1, 2, 3))) + self.assertRaisesTooLong(field, self._makeCollection((1, 2, 3, 4))) + self.assertRaisesTooLong(field, self._makeCollection((1, 2, 3))) def test_validate_min_length_and_max_length(self): - from zope.schema.interfaces import TooLong - from zope.schema.interfaces import TooShort field = self._makeOne(min_length=1, max_length=2) field.validate(self._makeCollection((1,))) field.validate(self._makeCollection((1, 2))) - self.assertRaises(TooShort, field.validate, self._makeCollection()) - self.assertRaises(TooLong, field.validate, self._makeCollection((1, 2, 3))) + self.assertRaisesTooShort(field, self._makeCollection()) + self.assertRaisesTooLong(field, self._makeCollection((1, 2, 3))) def test_validate_not_required(self): field = self._makeOne(required=False) @@ -1455,7 +1305,8 @@ class CollectionTests(EqualityTestsMixin, self.assertRaises(RequiredMissing, field.validate, None) -class SequenceTests(CollectionTests): +class SequenceTests(WrongTypeTestsMixin, + CollectionTests): def _getTargetClass(self): from zope.schema._field import Sequence @@ -1466,15 +1317,16 @@ class SequenceTests(CollectionTests): return ISequence def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType - field = self._makeOne() - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) + self.assertAllRaiseWrongType( + field, + field._type, + 1, + 1.0, + {}, + set(), + frozenset(), + object()) def test_sequence(self): from zope.schema._field import abc @@ -1534,11 +1386,13 @@ class TupleTests(SequenceTests): super(TupleTests, self).test_sequence() def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType field = self._makeOne() - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, b'') - self.assertRaises(WrongType, field.validate, []) + self.assertAllRaiseWrongType( + field, + field._type, + u'', + b'', + []) super(TupleTests, self).test_validate_wrong_types() @@ -1553,11 +1407,13 @@ class MutableSequenceTests(SequenceTests): return IMutableSequence def test_validate_wrong_types(self): - from zope.schema.interfaces import WrongType field = self._makeOne() - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, b'') - self.assertRaises(WrongType, field.validate, ()) + self.assertAllRaiseWrongType( + field, + field._type, + u'', + b'', + ()) super(MutableSequenceTests, self).test_validate_wrong_types() def test_sequence(self): @@ -1582,7 +1438,8 @@ class ListTests(MutableSequenceTests): super(ListTests, self).test_mutable_sequence() -class SetTests(CollectionTests): +class SetTests(WrongTypeTestsMixin, + CollectionTests): _DEFAULT_UNIQUE = True _makeCollection = set @@ -1605,15 +1462,18 @@ class SetTests(CollectionTests): from zope.schema.interfaces import WrongType field = self._makeOne() - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, b'') - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, {}) - self.assertRaises(WrongType, field.validate, self._makeWrongSet()) - self.assertRaises(WrongType, field.validate, object()) + self.assertAllRaiseWrongType( + field, + field._type, + u'', + b'', + 1, + 1.0, + (), + [], + {}, + self._makeWrongSet(), + object()) class FrozenSetTests(SetTests): @@ -1631,6 +1491,8 @@ class FrozenSetTests(SetTests): class MappingTests(EqualityTestsMixin, + WrongTypeTestsMixin, + LenTestsMixin, unittest.TestCase): def _getTargetClass(self): @@ -1651,15 +1513,18 @@ class MappingTests(EqualityTestsMixin, from zope.schema.interfaces import WrongType field = self._makeOne() - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, u'') - self.assertRaises(WrongType, field.validate, 1) - self.assertRaises(WrongType, field.validate, 1.0) - self.assertRaises(WrongType, field.validate, ()) - self.assertRaises(WrongType, field.validate, []) - self.assertRaises(WrongType, field.validate, set()) - self.assertRaises(WrongType, field.validate, frozenset()) - self.assertRaises(WrongType, field.validate, object()) + self.assertAllRaiseWrongType( + field, + field._type, + u'', + b'', + 1, + 1.0, + (), + [], + set(), + frozenset(), + object()) def test_validate_not_required(self): field = self._makeOne(required=False) @@ -1678,6 +1543,7 @@ class MappingTests(EqualityTestsMixin, def test_validate_invalid_key_type(self): from zope.schema.interfaces import WrongContainedType + from zope.schema.interfaces import WrongType from zope.schema._bootstrapfields import Int field = self._makeOne(key_type=Int()) field.validate({}) @@ -1689,9 +1555,14 @@ class MappingTests(EqualityTestsMixin, wct = exc.exception self.assertIs(wct.field, field) self.assertEqual(wct.value, {'a': 1}) + self.assertIs(wct.__class__, WrongContainedType) + self.assertEqual(1, len(wct.errors)) + self.assertIsInstance(wct.errors[0], WrongType) + self.assertIs(field.key_type._type, wct.errors[0].expected_type) def test_validate_invalid_value_type(self): from zope.schema.interfaces import WrongContainedType + from zope.schema.interfaces import WrongType from zope.schema._bootstrapfields import Int field = self._makeOne(value_type=Int()) field.validate({}) @@ -1703,30 +1574,31 @@ class MappingTests(EqualityTestsMixin, wct = exc.exception self.assertIs(wct.field, field) self.assertEqual(wct.value, {1: 'a'}) + self.assertIs(wct.__class__, WrongContainedType) + self.assertEqual(1, len(wct.errors)) + self.assertIsInstance(wct.errors[0], WrongType) + self.assertIs(field.value_type._type, wct.errors[0].expected_type) + def test_validate_min_length(self): - from zope.schema.interfaces import TooShort field = self._makeOne(min_length=1) field.validate({1: 'a'}) field.validate({1: 'a', 2: 'b'}) - self.assertRaises(TooShort, field.validate, {}) + self.assertRaisesTooShort(field, {}) def test_validate_max_length(self): - from zope.schema.interfaces import TooLong field = self._makeOne(max_length=1) field.validate({}) field.validate({1: 'a'}) - self.assertRaises(TooLong, field.validate, {1: 'a', 2: 'b'}) - self.assertRaises(TooLong, field.validate, {1: 'a', 2: 'b', 3: 'c'}) + self.assertRaisesTooLong(field, {1: 'a', 2: 'b'}) + self.assertRaisesTooLong(field, {1: 'a', 2: 'b', 3: 'c'}) def test_validate_min_length_and_max_length(self): - from zope.schema.interfaces import TooLong - from zope.schema.interfaces import TooShort field = self._makeOne(min_length=1, max_length=2) field.validate({1: 'a'}) field.validate({1: 'a', 2: 'b'}) - self.assertRaises(TooShort, field.validate, {}) - self.assertRaises(TooLong, field.validate, {1: 'a', 2: 'b', 3: 'c'}) + self.assertRaisesTooShort(field, {}) + self.assertRaisesTooLong(field, {1: 'a', 2: 'b', 3: 'c'}) def test_bind_binds_key_and_value_types(self): from zope.schema import Int
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 7 }
4.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "coverage", "sphinx" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 babel==2.17.0 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 docutils==0.21.2 exceptiongroup==1.2.2 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 Jinja2==3.1.6 MarkupSafe==3.0.2 packaging==24.2 pluggy==1.5.0 Pygments==2.19.1 pytest==8.3.5 requests==2.32.3 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 tomli==2.2.1 urllib3==2.3.0 zipp==3.21.0 zope.event==5.0 zope.exceptions==5.2 zope.i18nmessageid==7.0 zope.interface==7.2 -e git+https://github.com/zopefoundation/zope.schema.git@b6cc7d2cff904129c36e867f369c577d85c1ba2e#egg=zope.schema zope.testing==5.1 zope.testrunner==7.2
name: zope.schema channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - babel==2.17.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - docutils==0.21.2 - exceptiongroup==1.2.2 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jinja2==3.1.6 - markupsafe==3.0.2 - packaging==24.2 - pluggy==1.5.0 - pygments==2.19.1 - pytest==8.3.5 - requests==2.32.3 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - tomli==2.2.1 - urllib3==2.3.0 - zipp==3.21.0 - zope-event==5.0 - zope-exceptions==5.2 - zope-i18nmessageid==7.0 - zope-interface==7.2 - zope-testing==5.1 - zope-testrunner==7.2 prefix: /opt/conda/envs/zope.schema
[ "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_wrong_type", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_validate_too_long", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_validate_too_short", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_wrong_types", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_wrong_types", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_unchanged_not_already_set", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_ctor_real_min_max", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_min", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_min_and_max", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_providing_schema_but_invalid_fields", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_providing_schema_but_missing_fields", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_bound_field_of_collection_with_choice", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_ctor_w_bad_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_w_cycles_object_not_valid", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validates_invariants_by_default", "src/zope/schema/tests/test__bootstrapinterfaces.py::TestOutOfBounds::test_TOO_LARGE_repr", "src/zope/schema/tests/test__bootstrapinterfaces.py::TestOutOfBounds::test_TOO_SMALL_repr", "src/zope/schema/tests/test__field.py::NumberTests::test_validate_max", "src/zope/schema/tests/test__field.py::NumberTests::test_validate_min", "src/zope/schema/tests/test__field.py::NumberTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ASCIITests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_min", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_min", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_min", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DateTests::test_validate_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_min", "src/zope/schema/tests/test__field.py::DateTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_min", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_min", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_min_and_max", "src/zope/schema/tests/test__field.py::URITests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::IdTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::CollectionTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::CollectionTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::SequenceTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::SequenceTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::TupleTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::TupleTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::ListTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::ListTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::ListTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::ListTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::SetTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::SetTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::SetTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::SetTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::FrozenSetTests::test__validate_wrong_contained_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_schema_defined_by_subclass", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_wrong_types", "src/zope/schema/tests/test__field.py::DictTests::test_validate_invalid_key_type", "src/zope/schema/tests/test__field.py::DictTests::test_validate_invalid_value_type", "src/zope/schema/tests/test__field.py::DictTests::test_validate_max_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_min_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_min_length_and_max_length", "src/zope/schema/tests/test__field.py::DictTests::test_validate_wrong_types" ]
[ "src/zope/schema/tests/test__bootstrapfields.py::test_suite" ]
[ "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___get__", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___not_missing_w_check", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___not_missing_wo_check", "src/zope/schema/tests/test__bootstrapfields.py::ValidatedPropertyTests::test___set___w_missing_wo_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___w_defaultFactory_not_ICAF_no_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___w_defaultFactory_w_ICAF_w_check", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___wo_defaultFactory_hit", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test___get___wo_defaultFactory_miss", "src/zope/schema/tests/test__bootstrapfields.py::DefaultPropertyTests::test__get___wo_defaultFactory_in_dict", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_bind", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_description_preserved", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_order_madness", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_w_both_title_and_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_w_title_wo_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_ctor_wo_title_w_description", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_constraint_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_defaultFactory", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_defaultFactory_returning_missing_value", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_explicit_required_readonly_missingValue", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_getDoc", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_get_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_get_miss", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_miss_no_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_query_miss_w_default", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_set_hit", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_set_readonly", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_fails", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_raises_StopValidation", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_raises_custom_exception", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_constraint_raises_custom_exception_no_overwrite", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_missing_and_required", "src/zope/schema/tests/test__bootstrapfields.py::FieldTests::test_validate_missing_not_required", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_collection_but_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_not_collection_but_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_not_collection_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test__validate_w_collections", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::ContainerTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_collection_but_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_not_collection_but_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_not_collection_not_iterable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test__validate_w_collections", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IterableTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_default_too_large", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_default_too_small", "src/zope/schema/tests/test__bootstrapfields.py::OrderableTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::MinMaxLenTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::TextTests::test_validate_w_invalid_default", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_constraint", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::TextLineTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_constraint", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_set_normal", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_set_unchanged", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::PasswordTests::test_validate_unchanged_already_set", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test__validate_w_int", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::BoolTests::test_set_w_int", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::NumberTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ComplexTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::RealTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::RationalTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IntegralTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_ctor_defaults", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_fromUnicode_hit", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_fromUnicode_miss", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::IntTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test___eq___different_type", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_empty_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_not_providing_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test__validate_w_value_providing_schema", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_class_conforms_to_IObject", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_getDoc", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_instance_conforms_to_IObject", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_is_hashable", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_set_allows_IBOAE_subscr_to_replace_value", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_set_emits_IBOAE", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_not_required", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_required", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_w_cycles", "src/zope/schema/tests/test__bootstrapfields.py::ObjectTests::test_validate_w_cycles_collection_not_valid", "src/zope/schema/tests/test__bootstrapinterfaces.py::ValidationErrorTests::test___cmp___hit", "src/zope/schema/tests/test__bootstrapinterfaces.py::ValidationErrorTests::test___cmp___no_args", "src/zope/schema/tests/test__bootstrapinterfaces.py::ValidationErrorTests::test___eq___no_args", "src/zope/schema/tests/test__bootstrapinterfaces.py::ValidationErrorTests::test___eq___w_args", "src/zope/schema/tests/test__bootstrapinterfaces.py::ValidationErrorTests::test_doc", "src/zope/schema/tests/test__field.py::NumberTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::NumberTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::NumberTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::NumberTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::NumberTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::NumberTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::NumberTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::NumberTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::NumberTests::test_is_hashable", "src/zope/schema/tests/test__field.py::NumberTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::BytesTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::BytesTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::BytesTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::BytesTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::BytesTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::BytesTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::BytesTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::BytesTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::BytesTests::test_is_hashable", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_required", "src/zope/schema/tests/test__field.py::BytesTests::test_validate_w_invalid_default", "src/zope/schema/tests/test__field.py::ASCIITests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ASCIITests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ASCIITests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_empty", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_non_empty_hit", "src/zope/schema/tests/test__field.py::ASCIITests::test__validate_non_empty_miss", "src/zope/schema/tests/test__field.py::ASCIITests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIITests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ASCIITests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ASCIITests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIITests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ASCIITests::test_is_hashable", "src/zope/schema/tests/test__field.py::BytesLineTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::BytesLineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::BytesLineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::BytesLineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesLineTests::test_constraint", "src/zope/schema/tests/test__field.py::BytesLineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::BytesLineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::BytesLineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::BytesLineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::BytesLineTests::test_is_hashable", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::BytesLineTests::test_validate_required", "src/zope/schema/tests/test__field.py::ASCIILineTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ASCIILineTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ASCIILineTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_constraint", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_is_hashable", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ASCIILineTests::test_validate_required", "src/zope/schema/tests/test__field.py::FloatTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::FloatTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::FloatTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::FloatTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::FloatTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::FloatTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::FloatTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::FloatTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::FloatTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::FloatTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::FloatTests::test_is_hashable", "src/zope/schema/tests/test__field.py::FloatTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::FloatTests::test_validate_required", "src/zope/schema/tests/test__field.py::DecimalTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DecimalTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DecimalTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DecimalTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DecimalTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DecimalTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::DecimalTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::DecimalTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DecimalTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DecimalTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DecimalTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DecimalTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DecimalTests::test_validate_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DatetimeTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DatetimeTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DatetimeTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DatetimeTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DatetimeTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DatetimeTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DatetimeTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DatetimeTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DatetimeTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DatetimeTests::test_validate_required", "src/zope/schema/tests/test__field.py::DateTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DateTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DateTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DateTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DateTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DateTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DateTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DateTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DateTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DateTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::DateTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DateTests::test_validate_required", "src/zope/schema/tests/test__field.py::TimedeltaTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::TimedeltaTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::TimedeltaTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_is_hashable", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TimedeltaTests::test_validate_required", "src/zope/schema/tests/test__field.py::TimeTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::TimeTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::TimeTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::TimeTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimeTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::TimeTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::TimeTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TimeTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::TimeTests::test_is_hashable", "src/zope/schema/tests/test__field.py::TimeTests::test_missing_value_no_min_or_max", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TimeTests::test_validate_required", "src/zope/schema/tests/test__field.py::ChoiceTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ChoiceTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ChoiceTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_int", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_mixed", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_source_is_ICSB_bound", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_source_is_ICSB_unbound", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_string", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_tuple", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary_invalid", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary_passes_context", "src/zope/schema/tests/test__field.py::ChoiceTests::test__validate_w_named_vocabulary_raises_LookupError", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_preconstructed_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_is_ICSB", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_is_ICSB_but_not_ISource", "src/zope/schema/tests/test__field.py::ChoiceTests::test_bind_w_voc_not_ICSB", "src/zope/schema/tests/test__field.py::ChoiceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_both_vocabulary_and_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_both_vocabulary_and_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_invalid_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_invalid_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_named_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_preconstructed_vocabulary", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_unicode_non_ascii_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_w_values", "src/zope/schema/tests/test__field.py::ChoiceTests::test_ctor_wo_values_vocabulary_or_source", "src/zope/schema/tests/test__field.py::ChoiceTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ChoiceTests::test_fromUnicode_hit", "src/zope/schema/tests/test__field.py::ChoiceTests::test_fromUnicode_miss", "src/zope/schema/tests/test__field.py::ChoiceTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ChoiceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ChoiceTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ChoiceTests::test_is_hashable", "src/zope/schema/tests/test__field.py::URITests::test___eq___different_type", "src/zope/schema/tests/test__field.py::URITests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::URITests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::URITests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::URITests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::URITests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::URITests::test_fromUnicode_ok", "src/zope/schema/tests/test__field.py::URITests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::URITests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::URITests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::URITests::test_is_hashable", "src/zope/schema/tests/test__field.py::URITests::test_validate_not_a_uri", "src/zope/schema/tests/test__field.py::URITests::test_validate_not_required", "src/zope/schema/tests/test__field.py::URITests::test_validate_required", "src/zope/schema/tests/test__field.py::DottedNameTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DottedNameTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DottedNameTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DottedNameTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_max_dots_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_max_dots_valid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_min_dots_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_ctor_min_dots_valid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DottedNameTests::test_fromUnicode_dotted_name_ok", "src/zope/schema/tests/test__field.py::DottedNameTests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::DottedNameTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DottedNameTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DottedNameTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DottedNameTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_not_a_dotted_name", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_required", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_w_max_dots", "src/zope/schema/tests/test__field.py::DottedNameTests::test_validate_w_min_dots", "src/zope/schema/tests/test__field.py::IdTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::IdTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::IdTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::IdTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::IdTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_dotted_name_ok", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_invalid", "src/zope/schema/tests/test__field.py::IdTests::test_fromUnicode_url_ok", "src/zope/schema/tests/test__field.py::IdTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::IdTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::IdTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::IdTests::test_is_hashable", "src/zope/schema/tests/test__field.py::IdTests::test_validate_not_a_uri", "src/zope/schema/tests/test__field.py::IdTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::IdTests::test_validate_required", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_is_hashable", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::InterfaceFieldTests::test_validate_required", "src/zope/schema/tests/test__field.py::CollectionTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::CollectionTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::CollectionTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::CollectionTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::CollectionTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::CollectionTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::CollectionTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::CollectionTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::CollectionTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::CollectionTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::CollectionTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::CollectionTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::CollectionTests::test_is_hashable", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::CollectionTests::test_validate_required", "src/zope/schema/tests/test__field.py::SequenceTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::SequenceTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::SequenceTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::SequenceTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::SequenceTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::SequenceTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::SequenceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::SequenceTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::SequenceTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::SequenceTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::SequenceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::SequenceTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::SequenceTests::test_is_hashable", "src/zope/schema/tests/test__field.py::SequenceTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::SequenceTests::test_sequence", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::SequenceTests::test_validate_required", "src/zope/schema/tests/test__field.py::TupleTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::TupleTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::TupleTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::TupleTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::TupleTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::TupleTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::TupleTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::TupleTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::TupleTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::TupleTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::TupleTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::TupleTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::TupleTests::test_is_hashable", "src/zope/schema/tests/test__field.py::TupleTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::TupleTests::test_sequence", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::TupleTests::test_validate_required", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_is_hashable", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_sequence", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MutableSequenceTests::test_validate_required", "src/zope/schema/tests/test__field.py::ListTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::ListTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::ListTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::ListTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::ListTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::ListTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::ListTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::ListTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::ListTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::ListTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::ListTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::ListTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::ListTests::test_is_hashable", "src/zope/schema/tests/test__field.py::ListTests::test_mutable_sequence", "src/zope/schema/tests/test__field.py::ListTests::test_sequence", "src/zope/schema/tests/test__field.py::ListTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::ListTests::test_validate_required", "src/zope/schema/tests/test__field.py::SetTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::SetTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::SetTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::SetTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::SetTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::SetTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::SetTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_disallows_unique", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::SetTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::SetTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::SetTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::SetTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::SetTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::SetTests::test_is_hashable", "src/zope/schema/tests/test__field.py::SetTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::SetTests::test_validate_required", "src/zope/schema/tests/test__field.py::FrozenSetTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::FrozenSetTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::FrozenSetTests::test__validate_miss_uniqueness", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_bind_w_value_Type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_bind_wo_value_Type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_defaults", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_disallows_unique", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_explicit", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_ctor_w_non_field_value_type", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_is_hashable", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::FrozenSetTests::test_validate_required", "src/zope/schema/tests/test__field.py::MappingTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::MappingTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::MappingTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::MappingTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::MappingTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MappingTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::MappingTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::MappingTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::MappingTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::MappingTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MappingTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::MappingTests::test_is_hashable", "src/zope/schema/tests/test__field.py::MappingTests::test_mapping", "src/zope/schema/tests/test__field.py::MappingTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MappingTests::test_validate_required", "src/zope/schema/tests/test__field.py::MutableMappingTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::MutableMappingTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::MutableMappingTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_is_hashable", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_mapping", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::MutableMappingTests::test_validate_required", "src/zope/schema/tests/test__field.py::DictTests::test___eq___different_type", "src/zope/schema/tests/test__field.py::DictTests::test___eq___same_type_different_attrs", "src/zope/schema/tests/test__field.py::DictTests::test___eq___same_type_same_attrs", "src/zope/schema/tests/test__field.py::DictTests::test_bind_binds_key_and_value_types", "src/zope/schema/tests/test__field.py::DictTests::test_class_conforms_to_iface", "src/zope/schema/tests/test__field.py::DictTests::test_ctor_key_type_not_IField", "src/zope/schema/tests/test__field.py::DictTests::test_ctor_value_type_not_IField", "src/zope/schema/tests/test__field.py::DictTests::test_equal_instances_have_same_hash", "src/zope/schema/tests/test__field.py::DictTests::test_hash_across_unequal_instances", "src/zope/schema/tests/test__field.py::DictTests::test_instance_conforms_to_iface", "src/zope/schema/tests/test__field.py::DictTests::test_instances_in_different_interfaces_not_equal", "src/zope/schema/tests/test__field.py::DictTests::test_is_hashable", "src/zope/schema/tests/test__field.py::DictTests::test_mapping", "src/zope/schema/tests/test__field.py::DictTests::test_mutable_mapping", "src/zope/schema/tests/test__field.py::DictTests::test_validate_not_required", "src/zope/schema/tests/test__field.py::DictTests::test_validate_required", "src/zope/schema/tests/test__field.py::test_suite" ]
[]
Zope Public License 2.1
3,052
[ "src/zope/schema/_field.py", "src/zope/schema/_bootstrapfields.py", "src/zope/schema/_bootstrapinterfaces.py", ".gitignore", "CHANGES.rst", "docs/api.rst", "src/zope/schema/interfaces.py" ]
[ "src/zope/schema/_field.py", "src/zope/schema/_bootstrapfields.py", "src/zope/schema/_bootstrapinterfaces.py", ".gitignore", "CHANGES.rst", "docs/api.rst", "src/zope/schema/interfaces.py" ]
zopefoundation__zc.lockfile-16
c3ddaf120d390f3db2eb187b251968191d9ee2e1
2018-09-11 18:27:54
c3ddaf120d390f3db2eb187b251968191d9ee2e1
icemac: @jimfulton What do you think about this PR are you okay with it? jaraco: Bump icemac: @jaraco This PR is approved and can be merged after signing the contributor agreement. Did you already sign it? jaraco: Hi @icemac. I signed and submitted the contributor agreement on 2018-12-09. icemac: @jaraco Sorry for the inconvenience with the handling of your contributor agreement. In the process of merging the Zope foundation into the Plone foundation some requests to get access via a contributor agreement seem to have got dropped. There is now a new contributor agreement: https://www.zope.org/developer/becoming-a-committer.html May I ask you to resubmit your contributor agreement?
diff --git a/CHANGES.rst b/CHANGES.rst index 64a5788..8d81187 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,11 +1,16 @@ Change History *************** -1.5 (unreleased) +2.0 (unreleased) ================ -- Nothing changed yet. +- #15: Extracted new ``SimpleLockFile`` that removes implicit behavior + writing to the lock file, and instead allows a subclass to define + that behavior. +- ``SimpleLockFile`` and thus ``LockFile`` are now new-style classes. + Any clients relying on ``LockFile`` being an old-style class will + need to be adapted. 1.4 (2018-11-12) ================ diff --git a/setup.py b/setup.py index d2d97c1..0fe2746 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ setup( long_description=long_description, license = "ZPL 2.1", keywords = "lock", - url='http://www.python.org/pypi/zc.lockfile', + url='https://github.com/zopefoundation/zc.lockfile', packages = find_packages('src'), package_dir = {'': 'src'}, namespace_packages = ['zc'], diff --git a/src/zc/lockfile/__init__.py b/src/zc/lockfile/__init__.py index a0ac2ff..b541fa2 100644 --- a/src/zc/lockfile/__init__.py +++ b/src/zc/lockfile/__init__.py @@ -17,6 +17,8 @@ import errno import logging logger = logging.getLogger("zc.lockfile") +__metaclass__ = type + class LockError(Exception): """Couldn't get a lock """ @@ -61,18 +63,18 @@ else: def _unlock_file(file): fcntl.flock(file.fileno(), fcntl.LOCK_UN) -class LazyHostName(object): +class LazyHostName: """Avoid importing socket and calling gethostname() unnecessarily""" def __str__(self): import socket return socket.gethostname() -class LockFile: +class SimpleLockFile: _fp = None - def __init__(self, path, content_template='{pid}'): + def __init__(self, path): self._path = path try: # Try to open for writing without truncation: @@ -86,15 +88,13 @@ class LockFile: try: _lock_file(fp) + self._fp = fp except: fp.close() raise - # We got the lock, record info in the file. - self._fp = fp - fp.write(" %s\n" % content_template.format(pid=os.getpid(), - hostname=LazyHostName())) - fp.truncate() + # Lock acquired + self._on_lock() fp.flush() def close(self): @@ -102,3 +102,24 @@ class LockFile: _unlock_file(self._fp) self._fp.close() self._fp = None + + def _on_lock(self): + """ + Allow subclasses to supply behavior to occur following + lock acquisition. + """ + + +class LockFile(SimpleLockFile): + + def __init__(self, path, content_template='{pid}'): + self._content_template = content_template + super(LockFile, self).__init__(path) + + def _on_lock(self): + content = self._content_template.format( + pid=os.getpid(), + hostname=LazyHostName(), + ) + self._fp.write(" %s\n" % content) + self._fp.truncate()
Allow reading/writing with acquired lock file The current implementation of LockFile unconditionally writes some content and truncates the file upon acquiring the lock. Some use cases, such as in pytest-dev/pytest-services#23, require that the client be able to read the content of the file and write more content to the file while holding the lock. This use-case isn't possible with zc.lockfile because the content is truncated. I suggest instead the Lockfile class should allow clients to override this behavior.
zopefoundation/zc.lockfile
diff --git a/src/zc/lockfile/tests.py b/src/zc/lockfile/tests.py index e9fcbff..4c89053 100644 --- a/src/zc/lockfile/tests.py +++ b/src/zc/lockfile/tests.py @@ -179,6 +179,14 @@ class LockFileLogEntryTestCase(unittest.TestCase): lock.close() p.join() + def test_simple_lock(self): + assert isinstance(zc.lockfile.SimpleLockFile, type) + lock = zc.lockfile.SimpleLockFile('s') + with self.assertRaises(zc.lockfile.LockError): + zc.lockfile.SimpleLockFile('s') + lock.close() + zc.lockfile.SimpleLockFile('s').close() + def test_suite(): suite = unittest.TestSuite()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 3 }
1.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "zope.testing" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi exceptiongroup==1.2.2 importlib-metadata==6.7.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.2.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.7.1 -e git+https://github.com/zopefoundation/zc.lockfile.git@c3ddaf120d390f3db2eb187b251968191d9ee2e1#egg=zc.lockfile zipp==3.15.0 zope.testing==5.0.1
name: zc.lockfile channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - packaging==24.0 - pluggy==1.2.0 - pytest==7.4.4 - tomli==2.0.1 - typing-extensions==4.7.1 - zipp==3.15.0 - zope-testing==5.0.1 prefix: /opt/conda/envs/zc.lockfile
[ "src/zc/lockfile/tests.py::LockFileLogEntryTestCase::test_simple_lock" ]
[]
[ "src/zc/lockfile/tests.py::LockFileLogEntryTestCase::test_log_formatting", "src/zc/lockfile/tests.py::LockFileLogEntryTestCase::test_unlock_and_lock_while_multiprocessing_process_running", "src/zc/lockfile/tests.py::test_suite" ]
[]
Zope Public License 2.1
3,053
[ "setup.py", "src/zc/lockfile/__init__.py", "CHANGES.rst" ]
[ "setup.py", "src/zc/lockfile/__init__.py", "CHANGES.rst" ]
horejsek__python-fastjsonschema-24
dc3ae94332ce901181120db8a2f9f8b6d5339622
2018-09-12 12:06:19
97d45db2d13c3a769d9805cca2a672643b17bb6b
diff --git a/fastjsonschema/draft04.py b/fastjsonschema/draft04.py index 3ee257b..6d79a1e 100644 --- a/fastjsonschema/draft04.py +++ b/fastjsonschema/draft04.py @@ -18,7 +18,7 @@ JSON_TYPE_TO_PYTHON_TYPE = { class CodeGeneratorDraft04(CodeGenerator): # pylint: disable=line-too-long FORMAT_REGEXS = { - 'date-time': r'^\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d\.\d+(?:[+-][0-2]\d:[0-5]\d|Z)?$', + 'date-time': r'^\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d(?:\.\d+)?(?:[+-][0-2]\d:[0-5]\d|Z)?$', 'email': r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$', 'hostname': ( r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*'
JsonSchemaException when using date-time with RFC 3339 compliant string Validation fails with this example: ``` import json import fastjsonschema schema = { "$schema": "http://json-schema.org/draft-07/schema#", "$id": "https://example.com/example.schema.json", "title": "Example", "description": "An example schema", "type": "object", "properties": { "date": { "type": "string", "description": "Some date", "format": "date-time" } } } validate = fastjsonschema.compile(schema) validate({"date": "2018-02-05T14:17:10Z"}) ``` The output is: ``` Traceback (most recent call last): File "validate_simple_example.py", line 22, in <module> validate({"date": "2018-02-05T14:17:10Z"}) File "<string>", line 16, in validate_https___example_com_example_schema_json fastjsonschema.exceptions.JsonSchemaException ``` According to the [json schema docs](http://json-schema.org/latest/json-schema-validation.html#rfc.section.7.3.1) all RFC 3339 timestamps should be valid. I think the problem is the milliseconds part. It should be optional if I'm not wrong. The above example runs fine with: `validate({"date": "2018-02-05T14:17:10.00Z"})` The current regex is: ``` ^\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d\.\d+(?:[+-][0-2]\d:[0-5]\d|Z)?$ ``` I suggest changing it to: ``` ^\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d(?:\.\d+)?(?:[+-][0-2]\d:[0-5]\d|Z)?$ ``` Also maybe it's worth thinking about not using regexes for format validation for some of the stuff (like ips, dates, etc.)
horejsek/python-fastjsonschema
diff --git a/tests/test_datetime.py b/tests/test_datetime.py new file mode 100644 index 0000000..2263e9f --- /dev/null +++ b/tests/test_datetime.py @@ -0,0 +1,15 @@ + +import pytest + +from fastjsonschema import JsonSchemaException + + +exc = JsonSchemaException('data must be date-time') [email protected]('value, expected', [ + ('', exc), + ('bla', exc), + ('2018-02-05T14:17:10.00Z', '2018-02-05T14:17:10.00Z'), + ('2018-02-05T14:17:10Z', '2018-02-05T14:17:10Z'), +]) +def test_datetime(asserter, value, expected): + asserter({'type': 'string', 'format': 'date-time'}, value, expected)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[devel]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-benchmark", "pytest-cache" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==3.3.9 attrs==25.3.0 certifi==2025.1.31 charset-normalizer==3.4.1 colorama==0.4.6 dill==0.3.9 exceptiongroup==1.2.2 execnet==2.1.1 -e git+https://github.com/horejsek/python-fastjsonschema.git@dc3ae94332ce901181120db8a2f9f8b6d5339622#egg=fastjsonschema idna==3.10 iniconfig==2.1.0 isort==6.0.1 json-spec==0.10.1 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 mccabe==0.7.0 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 py-cpuinfo==9.0.0 pylint==3.3.6 pytest==8.3.5 pytest-benchmark==5.1.0 pytest-cache==1.0 referencing==0.36.2 requests==2.32.3 rpds-py==0.24.0 six==1.17.0 tomli==2.2.1 tomlkit==0.13.2 typing_extensions==4.13.0 urllib3==2.3.0 validictory==1.1.3
name: python-fastjsonschema channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==3.3.9 - attrs==25.3.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - colorama==0.4.6 - dill==0.3.9 - exceptiongroup==1.2.2 - execnet==2.1.1 - idna==3.10 - iniconfig==2.1.0 - isort==6.0.1 - json-spec==0.10.1 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - mccabe==0.7.0 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - py-cpuinfo==9.0.0 - pylint==3.3.6 - pytest==8.3.5 - pytest-benchmark==5.1.0 - pytest-cache==1.0 - referencing==0.36.2 - requests==2.32.3 - rpds-py==0.24.0 - six==1.17.0 - tomli==2.2.1 - tomlkit==0.13.2 - typing-extensions==4.13.0 - urllib3==2.3.0 - validictory==1.1.3 prefix: /opt/conda/envs/python-fastjsonschema
[ "tests/test_datetime.py::test_datetime[2018-02-05T14:17:10Z-2018-02-05T14:17:10Z]" ]
[]
[ "tests/test_datetime.py::test_datetime[-expected0]", "tests/test_datetime.py::test_datetime[bla-expected1]", "tests/test_datetime.py::test_datetime[2018-02-05T14:17:10.00Z-2018-02-05T14:17:10.00Z]" ]
[]
BSD 3-Clause "New" or "Revised" License
3,055
[ "fastjsonschema/draft04.py" ]
[ "fastjsonschema/draft04.py" ]
horejsek__python-fastjsonschema-25
97d45db2d13c3a769d9805cca2a672643b17bb6b
2018-09-12 12:38:34
97d45db2d13c3a769d9805cca2a672643b17bb6b
diff --git a/fastjsonschema/draft04.py b/fastjsonschema/draft04.py index 6d79a1e..3ee257b 100644 --- a/fastjsonschema/draft04.py +++ b/fastjsonschema/draft04.py @@ -18,7 +18,7 @@ JSON_TYPE_TO_PYTHON_TYPE = { class CodeGeneratorDraft04(CodeGenerator): # pylint: disable=line-too-long FORMAT_REGEXS = { - 'date-time': r'^\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d(?:\.\d+)?(?:[+-][0-2]\d:[0-5]\d|Z)?$', + 'date-time': r'^\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d\.\d+(?:[+-][0-2]\d:[0-5]\d|Z)?$', 'email': r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$', 'hostname': ( r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*' diff --git a/fastjsonschema/generator.py b/fastjsonschema/generator.py index 11358d2..28b9480 100644 --- a/fastjsonschema/generator.py +++ b/fastjsonschema/generator.py @@ -93,7 +93,7 @@ class CodeGenerator: '', ] ) - regexs = ['"{}": {}'.format(key, value) for key, value in self._compile_regexps.items()] + regexs = ['"{}": re.compile(r"{}")'.format(key, value.pattern) for key, value in self._compile_regexps.items()] return '\n'.join( [ 'import re',
ipv6 format leads to cut off string in generated code Thanks for the quick fix of #21 ! I just tried it, and indeed, the regex pattern is now there. But the code is still broken due to a different issue. When I generate the code for this schema: ``` { "$schema": "http://json-schema.org/draft-07/schema#", "$id": "https://example.com/example.schema.json", "title": "Example", "description": "An example schema", "type": "object", "properties": { "ip": { "type": "string", "description": "The IP of the future", "format": "ipv6" } }, "required": [ "ip" ] } ``` The resulting code contains the following dict: ``` REGEX_PATTERNS = { "ipv6_re_pattern": re.compile('^(?:(?:[0-9A-Fa-f]{1,4}:){6}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|::(?:[0-9A-Fa-f]{1,4) } ``` It looks like the line is just cut off at that point, so the code won't work. `SyntaxError: EOL while scanning string literal`
horejsek/python-fastjsonschema
diff --git a/tests/test_compile_to_code.py b/tests/test_compile_to_code.py index 394e648..7312f38 100644 --- a/tests/test_compile_to_code.py +++ b/tests/test_compile_to_code.py @@ -1,8 +1,19 @@ import os import pytest +import shutil from fastjsonschema import JsonSchemaException, compile_to_code [email protected]_fixture(autouse=True) +def run_around_tests(): + temp_dir = 'temp' + # Code that will run before your test, for example: + if not os.path.isdir(temp_dir): + os.makedirs(temp_dir) + # A test function will be run at this point + yield + # Code that will run after your test, for example: + shutil.rmtree(temp_dir) def test_compile_to_code(): code = compile_to_code({ @@ -12,11 +23,9 @@ def test_compile_to_code(): 'c': {'format': 'hostname'}, } }) - if not os.path.isdir('temp'): - os.makedirs('temp') - with open('temp/schema.py', 'w') as f: + with open('temp/schema_1.py', 'w') as f: f.write(code) - from temp.schema import validate + from temp.schema_1 import validate assert validate({ 'a': 'a', 'b': 1, @@ -26,3 +35,18 @@ def test_compile_to_code(): 'b': 1, 'c': 'example.com', } + +def test_compile_to_code_ipv6_regex(): + code = compile_to_code({ + 'properties': { + 'ip': {'format': 'ipv6'}, + } + }) + with open('temp/schema_2.py', 'w') as f: + f.write(code) + from temp.schema_2 import validate + assert validate({ + 'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334' + }) == { + 'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334' + } \ No newline at end of file diff --git a/tests/test_datetime.py b/tests/test_datetime.py deleted file mode 100644 index 2263e9f..0000000 --- a/tests/test_datetime.py +++ /dev/null @@ -1,15 +0,0 @@ - -import pytest - -from fastjsonschema import JsonSchemaException - - -exc = JsonSchemaException('data must be date-time') [email protected]('value, expected', [ - ('', exc), - ('bla', exc), - ('2018-02-05T14:17:10.00Z', '2018-02-05T14:17:10.00Z'), - ('2018-02-05T14:17:10Z', '2018-02-05T14:17:10Z'), -]) -def test_datetime(asserter, value, expected): - asserter({'type': 'string', 'format': 'date-time'}, value, expected)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_issue_reference", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 2 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[devel]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-benchmark", "pytest-cache" ], "pre_install": [ "apt-get update", "apt-get install -y gcc", "git submodule init", "git submodule update" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==3.3.9 attrs==25.3.0 certifi==2025.1.31 charset-normalizer==3.4.1 colorama==0.4.6 dill==0.3.9 exceptiongroup==1.2.2 execnet==2.1.1 -e git+https://github.com/horejsek/python-fastjsonschema.git@97d45db2d13c3a769d9805cca2a672643b17bb6b#egg=fastjsonschema idna==3.10 iniconfig==2.1.0 isort==6.0.1 json-spec==0.10.1 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 mccabe==0.7.0 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 py-cpuinfo==9.0.0 pylint==3.3.6 pytest==8.3.5 pytest-benchmark==5.1.0 pytest-cache==1.0 referencing==0.36.2 requests==2.32.3 rpds-py==0.24.0 six==1.17.0 tomli==2.2.1 tomlkit==0.13.2 typing_extensions==4.13.0 urllib3==2.3.0 validictory==1.1.3
name: python-fastjsonschema channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==3.3.9 - attrs==25.3.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - colorama==0.4.6 - dill==0.3.9 - exceptiongroup==1.2.2 - execnet==2.1.1 - idna==3.10 - iniconfig==2.1.0 - isort==6.0.1 - json-spec==0.10.1 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - mccabe==0.7.0 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - py-cpuinfo==9.0.0 - pylint==3.3.6 - pytest==8.3.5 - pytest-benchmark==5.1.0 - pytest-cache==1.0 - referencing==0.36.2 - requests==2.32.3 - rpds-py==0.24.0 - six==1.17.0 - tomli==2.2.1 - tomlkit==0.13.2 - typing-extensions==4.13.0 - urllib3==2.3.0 - validictory==1.1.3 prefix: /opt/conda/envs/python-fastjsonschema
[ "tests/test_compile_to_code.py::test_compile_to_code_ipv6_regex" ]
[]
[ "tests/test_compile_to_code.py::test_compile_to_code" ]
[]
BSD 3-Clause "New" or "Revised" License
3,056
[ "fastjsonschema/draft04.py", "fastjsonschema/generator.py" ]
[ "fastjsonschema/draft04.py", "fastjsonschema/generator.py" ]
pydata__sparse-182
b03b6b9a480a10a3cf59d7994292b9c5d3015cd5
2018-09-12 13:20:42
b03b6b9a480a10a3cf59d7994292b9c5d3015cd5
codecov[bot]: # [Codecov](https://codecov.io/gh/pydata/sparse/pull/182?src=pr&el=h1) Report > Merging [#182](https://codecov.io/gh/pydata/sparse/pull/182?src=pr&el=desc) into [master](https://codecov.io/gh/pydata/sparse/commit/b03b6b9a480a10a3cf59d7994292b9c5d3015cd5?src=pr&el=desc) will **decrease** coverage by `0.11%`. > The diff coverage is `100%`. [![Impacted file tree graph](https://codecov.io/gh/pydata/sparse/pull/182/graphs/tree.svg?width=650&token=H212u0Uxxw&height=150&src=pr)](https://codecov.io/gh/pydata/sparse/pull/182?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #182 +/- ## ========================================== - Coverage 97.3% 97.19% -0.12% ========================================== Files 11 11 Lines 1301 1319 +18 ========================================== + Hits 1266 1282 +16 - Misses 35 37 +2 ``` | [Impacted Files](https://codecov.io/gh/pydata/sparse/pull/182?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [sparse/utils.py](https://codecov.io/gh/pydata/sparse/pull/182/diff?src=pr&el=tree#diff-c3BhcnNlL3V0aWxzLnB5) | `100% <ø> (+1.59%)` | :arrow_up: | | [sparse/coo/umath.py](https://codecov.io/gh/pydata/sparse/pull/182/diff?src=pr&el=tree#diff-c3BhcnNlL2Nvby91bWF0aC5weQ==) | `97.32% <100%> (+0.49%)` | :arrow_up: | | [sparse/coo/core.py](https://codecov.io/gh/pydata/sparse/pull/182/diff?src=pr&el=tree#diff-c3BhcnNlL2Nvby9jb3JlLnB5) | `96.81% <100%> (+0.02%)` | :arrow_up: | | [sparse/compatibility.py](https://codecov.io/gh/pydata/sparse/pull/182/diff?src=pr&el=tree#diff-c3BhcnNlL2NvbXBhdGliaWxpdHkucHk=) | `50% <0%> (-33.34%)` | :arrow_down: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/pydata/sparse/pull/182?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/pydata/sparse/pull/182?src=pr&el=footer). Last update [b03b6b9...a0ca82a](https://codecov.io/gh/pydata/sparse/pull/182?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/docs/changelog.rst b/docs/changelog.rst index 934a70c..1682517 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -1,6 +1,7 @@ Changelog ========= +* :feature:`124` (via :issue:`182`) Allow mixed :code:`ndarray`-:obj:`COO` operations if the result is sparse. * :feature:`179` (via :issue:`180`) Allow specifying a fill-value when converting from NumPy arrays. * :feature:`175` Added :code:`COO.any` and :code:`COO.all` methods. * :feature:`172` Indexing for :code:`COO` now accepts a single one-dimensional array index. diff --git a/docs/operations.rst b/docs/operations.rst index 524f2af..fbfa8e9 100644 --- a/docs/operations.rst +++ b/docs/operations.rst @@ -51,7 +51,12 @@ Auto-Densification ~~~~~~~~~~~~~~~~~~ Operations that would result in dense matrices, such as operations with :doc:`Numpy arrays <reference/generated/numpy.ndarray>` -objects a :obj:`ValueError`. +raises a :obj:`ValueError`. For example, the following will raise a +:obj:`ValueError` if :code:`x` is a :obj:`numpy.ndarray`: + +.. code-block:: python + + x + y However, all of the following are valid operations. @@ -77,9 +82,21 @@ If densification is needed, it must be explicit. In other words, you must call :obj:`COO.todense` on the :obj:`COO` object. If both operands are :obj:`COO`, both must be densified. -.. warning:: Previously, operations with Numpy arrays were sometimes supported. Now, - it is necessary to convert Numpy arrays to :obj:`COO` objects. +Operations with NumPy arrays +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +In certain situations, operations with NumPy arrays are also supported. For example, +the following will work if :code:`x` is :obj:`COO` and :code:`y` is a NumPy array: + +.. code-block:: python + + x * y + +The following conditions must be met when performing element-wise operations with +NumPy arrays: +* The operation must produce a consistent fill-values. In other words, the resulting + array must also be sparse. +* Operating on the NumPy arrays must not increase the size when broadcasting the arrays. Operations with :obj:`scipy.sparse.spmatrix` -------------------------------------------- diff --git a/sparse/coo/core.py b/sparse/coo/core.py index 3f5d47b..1131e18 100644 --- a/sparse/coo/core.py +++ b/sparse/coo/core.py @@ -1317,6 +1317,11 @@ class COO(SparseArray, NDArrayOperatorsMixin): [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) """ + if isinstance(shape, Iterable): + shape = tuple(shape) + else: + shape = (shape,) + if self.shape == shape: return self if any(d == -1 for d in shape): diff --git a/sparse/coo/umath.py b/sparse/coo/umath.py index 8b3e15f..dcb3e52 100644 --- a/sparse/coo/umath.py +++ b/sparse/coo/umath.py @@ -1,11 +1,11 @@ -from itertools import product +import itertools import numba import numpy as np import scipy.sparse from ..compatibility import range, zip, zip_longest -from ..utils import isscalar, PositinalArgumentPartial, equivalent +from ..utils import isscalar, equivalent, _zero_of_dtype def elemwise(func, *args, **kwargs): @@ -43,35 +43,8 @@ def elemwise(func, *args, **kwargs): Previously, operations with Numpy arrays were sometimes supported. Now, it is necessary to convert Numpy arrays to :obj:`COO` objects. """ - # Because we need to mutate args. - from .core import COO - from ..sparse_array import SparseArray - - args = list(args) - posargs = [] - pos = [] - for i, arg in enumerate(args): - if isinstance(arg, scipy.sparse.spmatrix): - args[i] = COO.from_scipy_sparse(arg) - elif isscalar(arg) or (isinstance(arg, np.ndarray) and not arg.shape): - # Faster and more reliable to pass ()-shaped ndarrays as scalars. - args[i] = np.asarray(arg)[()] - - pos.append(i) - posargs.append(args[i]) - elif isinstance(arg, SparseArray) and not isinstance(arg, COO): - args[i] = COO(arg) - elif not isinstance(arg, COO): - return NotImplemented - - # Filter out scalars as they are 'baked' into the function. - func = PositinalArgumentPartial(func, pos, posargs) - args = [arg for arg in args if not isscalar(arg)] - if len(args) == 0: - return func(**kwargs) - - return _elemwise_n_ary(func, *args, **kwargs) + return _Elemwise(func, *args, **kwargs).get_result() @numba.jit(nopython=True, nogil=True) @@ -116,207 +89,6 @@ def _match_arrays(a, b): # pragma: no cover return np.array(a_ind, dtype=np.uintp), np.array(b_ind, dtype=np.uintp) -def _elemwise_n_ary(func, *args, **kwargs): - """ - Apply a function to any number of arguments with broadcasting. - - Parameters - ---------- - func : Callable - The function to apply to arguments. Must support broadcasting. - args : list - Input :obj:`COO` or :obj:`numpy.ndarray`s. - kwargs : dict - Additional arguments to pass to the function. - - Returns - ------- - COO - The output array. - - Raises - ------ - ValueError - If the input shapes aren't compatible or the result will be dense. - """ - from .core import COO - - args = list(args) - - args_fill_values = tuple(arg.fill_value[...] for arg in args) - func_fill_value = func(*args_fill_values, **kwargs) - - data_list = [] - coords_list = [] - - cache = {} - for mask in product([True, False], repeat=len(args)): - if not any(mask): - continue - - ci, di = _unmatch_coo(func, args, mask, cache, func_fill_value, **kwargs) - - coords_list.extend(ci) - data_list.extend(di) - - result_shape = _get_nary_broadcast_shape(*[arg.shape for arg in args]) - - # Concatenate matches and mismatches - data = np.concatenate(data_list) if len(data_list) else np.empty((0,), dtype=func_fill_value.dtype) - coords = np.concatenate(coords_list, axis=1) if len(coords_list) else \ - np.empty((0, len(result_shape)), dtype=np.intp) - - return COO(coords, data, shape=result_shape, has_duplicates=False, fill_value=func_fill_value) - - -def _match_coo(*args, **kwargs): - """ - Matches the coordinates for any number of input :obj:`COO` arrays. - Equivalent to "sparse" broadcasting for all arrays. - - Parameters - ---------- - args : Tuple[COO] - The input :obj:`COO` arrays. - return_midx : bool - Whether to return matched indices or matched arrays. Matching - only supported for two arrays. ``False`` by default. - cache : dict - Cache of things already matched. No cache by default. - - Returns - ------- - matched_idx : List[ndarray] - The indices of matched elements in the original arrays. Only returned if - ``return_midx`` is ``True``. - matched_arrays : List[COO] - The expanded, matched :obj:`COO` objects. Only returned if - ``return_midx`` is ``False``. - """ - from .core import COO - from .common import linear_loc - - return_midx = kwargs.pop('return_midx', False) - cache = kwargs.pop('cache', None) - - if kwargs: - raise ValueError('Unknown kwargs %s' % kwargs.keys()) - - if return_midx and (len(args) != 2 or cache is not None): - raise NotImplementedError('Matching indices only supported for two args, and no cache.') - - matched_arrays = [args[0]] - cache_key = [id(args[0])] - for arg2 in args[1:]: - cache_key.append(id(arg2)) - key = tuple(cache_key) - if cache is not None and key in cache: - matched_arrays = cache[key] - continue - - cargs = [matched_arrays[0], arg2] - current_shape = _get_broadcast_shape(matched_arrays[0].shape, arg2.shape) - params = [_get_broadcast_parameters(arg.shape, current_shape) for arg in cargs] - reduced_params = [all(p) for p in zip(*params)] - reduced_shape = _get_reduced_shape(arg2.shape, - reduced_params[-arg2.ndim:]) - - reduced_coords = [_get_reduced_coords(arg.coords, reduced_params[-arg.ndim:]) - for arg in cargs] - - linear = [linear_loc(rc, reduced_shape) for rc in reduced_coords] - sorted_idx = [np.argsort(idx) for idx in linear] - linear = [idx[s] for idx, s in zip(linear, sorted_idx)] - matched_idx = _match_arrays(*linear) - - if return_midx: - matched_idx = [sidx[midx] for sidx, midx in zip(sorted_idx, matched_idx)] - return matched_idx - - coords = [arg.coords[:, s] for arg, s in zip(cargs, sorted_idx)] - mcoords = [c[:, idx] for c, idx in zip(coords, matched_idx)] - mcoords = _get_matching_coords(mcoords, params, current_shape) - mdata = [arg.data[sorted_idx[0]][matched_idx[0]] for arg in matched_arrays] - mdata.append(arg2.data[sorted_idx[1]][matched_idx[1]]) - # The coords aren't truly sorted, but we don't need them, so it's - # best to avoid the extra cost. - matched_arrays = [ - COO(mcoords, md, shape=current_shape, sorted=True, has_duplicates=False) - for md in mdata] - - if cache is not None: - cache[key] = matched_arrays - - return matched_arrays - - -def _unmatch_coo(func, args, mask, cache, func_fill_value, **kwargs): - """ - Matches the coordinates for any number of input :obj:`COO` arrays. - - First computes the matches, then filters out the non-matches. - - Parameters - ---------- - func : Callable - The function to compute matches - args : tuple[COO] - The input :obj:`COO` arrays. - mask : tuple[bool] - Specifies the inputs that are zero and the ones that are - nonzero. - kwargs: dict - Extra keyword arguments to pass to func. - - Returns - ------- - matched_coords : list[ndarray] - The matched coordinates. - matched_data : list[ndarray] - The matched data. - """ - from .core import COO - - matched_args = [a for a, m in zip(args, mask) if m] - unmatched_args = [a for a, m in zip(args, mask) if not m] - - matched_arrays = _match_coo(*matched_args, cache=cache) - - pos = tuple(i for i, m in enumerate(mask) if not m) - posargs = [arg.fill_value[...] for arg, m in zip(args, mask) if not m] - result_shape = _get_nary_broadcast_shape(*[arg.shape for arg in args]) - - partial = PositinalArgumentPartial(func, pos, posargs) - matched_func = partial(*[a.data for a in matched_arrays], **kwargs) - - unmatched_mask = ~equivalent(matched_func, func_fill_value) - - if not unmatched_mask.any(): - return [], [] - - func_data = matched_func[unmatched_mask] - func_coords = matched_arrays[0].coords[:, unmatched_mask] - - # The coords aren't truly sorted, but we don't need them, so it's - # best to avoid the extra cost. - func_array = COO(func_coords, func_data, shape=matched_arrays[0].shape, - sorted=True, has_duplicates=False).broadcast_to(result_shape) - - if all(mask): - return [func_array.coords], [func_array.data] - - unmatched_mask = np.ones(func_array.nnz, dtype=np.bool) - - for arg in unmatched_args: - matched_idx = _match_coo(func_array, arg, return_midx=True)[0] - unmatched_mask[matched_idx] = False - - coords = np.asarray(func_array.coords[:, unmatched_mask], order='C') - data = np.asarray(func_array.data[unmatched_mask], order='C') - - return [coords], [data] - - def _get_nary_broadcast_shape(*shapes): """ Broadcast any number of shapes to a result shape. @@ -376,7 +148,7 @@ def _get_broadcast_shape(shape1, shape2, is_result=False): raise ValueError('operands could not be broadcast together with shapes %s, %s' % (shape1, shape2)) - result_shape = tuple(max(l1, l2) for l1, l2 in + result_shape = tuple(l1 if l1 != 1 else l2 for l1, l2 in zip_longest(shape1[::-1], shape2[::-1], fillvalue=1))[::-1] return result_shape @@ -532,7 +304,7 @@ def _cartesian_product(*arrays): return out.reshape(cols, rows) -def _get_matching_coords(coords, params, shape): +def _get_matching_coords(coords, params): """ Get the matching coords across a number of broadcast operands. @@ -605,3 +377,290 @@ def broadcast_to(x, shape): return COO(coords, data, shape=result_shape, has_duplicates=False, sorted=sorted, fill_value=x.fill_value) + + +class _Elemwise(object): + def __init__(self, func, *args, **kwargs): + """ + Initialize the element-wise function calculator. + + Parameters + ---------- + func : types.Callable + The function to compute + args : tuple[Union[SparseArray, ndarray, scipy.sparse.spmatrix]] + The arguments to compute the function on. + kwargs : dict + Extra arguments to pass to the function. + """ + + from .core import COO + from ..sparse_array import SparseArray + + processed_args = [] + + for arg in args: + if isinstance(arg, scipy.sparse.spmatrix): + processed_args.append(COO.from_scipy_sparse(arg)) + elif isscalar(arg) or isinstance(arg, np.ndarray): + # Faster and more reliable to pass ()-shaped ndarrays as scalars. + processed_args.append(np.asarray(arg)) + elif isinstance(arg, SparseArray) and not isinstance(arg, COO): + processed_args.append(COO(arg)) + elif not isinstance(arg, COO): + self.args = None + return + else: + processed_args.append(arg) + + self.args = tuple(processed_args) + self.func = func + self.kwargs = kwargs + self.cache = {} + + self._get_fill_value() + self._check_broadcast() + + def get_result(self): + from .core import COO + if self.args is None: + return NotImplemented + + if any(s == 0 for s in self.shape): + data = np.empty((0,), dtype=self.fill_value.dtype) + coords = np.empty((0, len(self.shape)), dtype=np.intp) + return COO(coords, data, shape=self.shape, has_duplicates=False, fill_value=self.fill_value) + + data_list = [] + coords_list = [] + + for mask in itertools.product(*[[True, False] if isinstance(arg, COO) + else [None] for arg in self.args]): + if not any(mask): + continue + + r = self._get_func_coords_data(mask) + + if r is not None: + coords_list.append(r[0]) + data_list.append(r[1]) + + # Concatenate matches and mismatches + data = np.concatenate(data_list) if len(data_list) else np.empty((0,), dtype=self.fill_value.dtype) + coords = np.concatenate(coords_list, axis=1) if len(coords_list) else \ + np.empty((0, len(self.shape)), dtype=np.intp) + + return COO(coords, data, shape=self.shape, has_duplicates=False, fill_value=self.fill_value) + + def _get_fill_value(self): + """ + A function that finds and returns the fill-value. + + Raises + ------ + ValueError + If the fill-value is inconsistent. + """ + from .core import COO + + zero_args = tuple(arg.fill_value[...] if isinstance(arg, COO) else arg for arg in self.args) + fill_value_array = self.func(*zero_args, **self.kwargs) + + try: + fill_value = fill_value_array[(0,) * fill_value_array.ndim] + except IndexError: + zero_args = tuple( + arg.fill_value if isinstance(arg, COO) else _zero_of_dtype(arg.dtype) for arg in self.args) + fill_value = self.func(*zero_args, **self.kwargs)[()] + + if not equivalent(fill_value, fill_value_array).all(): + raise ValueError('Inconsistent fill-values in the result array: operating on the ndarray with' + 'fill-values produces inconsistent results.') + + self.fill_value = fill_value + + def _check_broadcast(self): + """ + Checks if adding the ndarrays changes the broadcast shape. + + Raises + ------ + ValueError + If the check fails. + """ + from .core import COO + full_shape = _get_nary_broadcast_shape(*tuple(arg.shape for arg in self.args)) + non_ndarray_shape = _get_nary_broadcast_shape( + *tuple(arg.shape for arg in self.args if isinstance(arg, COO)) + ) + + if full_shape != non_ndarray_shape: + raise ValueError('All ndarrays must be broadcastable to the shape without ndarrays {}' + .format(non_ndarray_shape)) + + self.shape = full_shape + + def _get_func_coords_data(self, mask): + """ + Gets the coords/data for a certain mask + + Parameters + ---------- + mask : tuple[Union[bool, NoneType]] + The mask determining whether to match or unmatch. + + Returns + ------- + None or tuple + The coords/data tuple for the given mask. + """ + from .core import COO + + matched_args = [arg for arg, m in zip(self.args, mask) if m is not None and m] + unmatched_args = [arg for arg, m in zip(self.args, mask) if m is not None and not m] + ndarray_args = [arg for arg, m in zip(self.args, mask) if m is None] + + matched_broadcast_shape = _get_nary_broadcast_shape( + *tuple(arg.shape for arg in itertools.chain(matched_args, ndarray_args)) + ) + + matched_arrays = self._match_coo(*matched_args, + cache=self.cache, + broadcast_shape=matched_broadcast_shape) + + func_args = [] + + m_arg = 0 + for arg, m in zip(self.args, mask): + if m is None: + func_args.append(np.broadcast_to(arg, matched_broadcast_shape)[tuple(matched_arrays[0].coords)]) + continue + + if m: + func_args.append(matched_arrays[m_arg].data) + m_arg += 1 + else: + func_args.append(arg.fill_value) + + func_data = self.func(*func_args, **self.kwargs) + unmatched_mask = ~equivalent(func_data, self.fill_value) + + if not unmatched_mask.any(): + return None + + func_coords = matched_arrays[0].coords[:, unmatched_mask] + func_data = func_data[unmatched_mask] + + if matched_arrays[0].shape != self.shape: + params = _get_broadcast_parameters(matched_arrays[0].shape, self.shape) + func_coords, func_data = _get_expanded_coords_data(func_coords, func_data, params, self.shape) + + if all(m is None or m for m in mask): + return func_coords, func_data + + # Not really sorted but we need the sortedness. + func_array = COO(func_coords, func_data, self.shape, has_duplicates=False, sorted=True) + + unmatched_mask = np.ones(func_array.nnz, dtype=np.bool) + + for arg in unmatched_args: + matched_idx = self._match_coo(func_array, arg, return_midx=True)[0] + unmatched_mask[matched_idx] = False + + coords = np.asarray(func_array.coords[:, unmatched_mask], order='C') + data = np.asarray(func_array.data[unmatched_mask], order='C') + + return coords, data + + @staticmethod + def _match_coo(*args, **kwargs): + """ + Matches the coordinates for any number of input :obj:`COO` arrays. + Equivalent to "sparse" broadcasting for all arrays. + + Parameters + ---------- + args : Tuple[COO] + The input :obj:`COO` arrays. + return_midx : bool + Whether to return matched indices or matched arrays. Matching + only supported for two arrays. ``False`` by default. + cache : dict + Cache of things already matched. No cache by default. + + Returns + ------- + matched_idx : List[ndarray] + The indices of matched elements in the original arrays. Only returned if + ``return_midx`` is ``True``. + matched_arrays : List[COO] + The expanded, matched :obj:`COO` objects. Only returned if + ``return_midx`` is ``False``. + """ + from .core import COO + from .common import linear_loc + + cache = kwargs.pop('cache', None) + return_midx = kwargs.pop('return_midx', False) + broadcast_shape = kwargs.pop('broadcast_shape', None) + + if kwargs: + raise ValueError('Unknown kwargs: {}'.format(kwargs.keys())) + + if return_midx and (len(args) != 2 or cache is not None): + raise NotImplementedError('Matching indices only supported for two args, and no cache.') + + matched_arrays = [args[0]] + cache_key = [id(args[0])] + for arg2 in args[1:]: + cache_key.append(id(arg2)) + key = tuple(cache_key) + if cache is not None and key in cache: + matched_arrays = cache[key] + continue + + cargs = [matched_arrays[0], arg2] + current_shape = _get_broadcast_shape(matched_arrays[0].shape, arg2.shape) + params = [_get_broadcast_parameters(arg.shape, current_shape) for arg in cargs] + reduced_params = [all(p) for p in zip(*params)] + reduced_shape = _get_reduced_shape(arg2.shape, + reduced_params[-arg2.ndim:]) + + reduced_coords = [_get_reduced_coords(arg.coords, reduced_params[-arg.ndim:]) + for arg in cargs] + + linear = [linear_loc(rc, reduced_shape) for rc in reduced_coords] + sorted_idx = [np.argsort(idx) for idx in linear] + linear = [idx[s] for idx, s in zip(linear, sorted_idx)] + matched_idx = _match_arrays(*linear) + + if return_midx: + matched_idx = [sidx[midx] for sidx, midx in zip(sorted_idx, matched_idx)] + return matched_idx + + coords = [arg.coords[:, s] for arg, s in zip(cargs, sorted_idx)] + mcoords = [c[:, idx] for c, idx in zip(coords, matched_idx)] + mcoords = _get_matching_coords(mcoords, params) + mdata = [arg.data[sorted_idx[0]][matched_idx[0]] for arg in matched_arrays] + mdata.append(arg2.data[sorted_idx[1]][matched_idx[1]]) + # The coords aren't truly sorted, but we don't need them, so it's + # best to avoid the extra cost. + matched_arrays = [ + COO(mcoords, md, shape=current_shape, sorted=True, has_duplicates=False) + for md in mdata] + + if cache is not None: + cache[key] = matched_arrays + + if broadcast_shape is not None and matched_arrays[0].shape != broadcast_shape: + params = _get_broadcast_parameters(matched_arrays[0].shape, broadcast_shape) + coords, idx = _get_expanded_coords_data( + matched_arrays[0].coords, np.arange(matched_arrays[0].nnz), params, broadcast_shape + ) + + matched_arrays = [ + COO(coords, arr.data[idx], shape=broadcast_shape, sorted=True, has_duplicates=False) + for arr in matched_arrays + ] + + return matched_arrays diff --git a/sparse/utils.py b/sparse/utils.py index f7602ea..e759237 100644 --- a/sparse/utils.py +++ b/sparse/utils.py @@ -165,36 +165,6 @@ def isscalar(x): return not isinstance(x, SparseArray) and np.isscalar(x) -class PositinalArgumentPartial(object): - def __init__(self, func, pos, posargs): - self.pos = list(pos) - self.posargs = list(posargs) - self.func = func - - self.n = len(pos) - - self.__doc__ = func.__doc__ - - def __call__(self, *args, **kwargs): - j = 0 - totargs = [] - - for i in range(len(args) + self.n): - if j >= self.n or i != self.pos[j]: - totargs.append(args[i - j]) - else: - totargs.append(self.posargs[j]) - j += 1 - - return self.func(*totargs, **kwargs) - - def __str__(self): - return str(self.func) - - def __repr__(self): - return repr(self.func) - - def random_value_array(value, fraction): def replace_values(n): i = int(n * fraction)
Allow ndarray in elemwise again I was considering allowing `ndarray` under `elemwise` again, provided that `f(ndarrays, zeros_in_place_of_COO)` is all-zero. Upsides: - No conversion required - No matching/sorting required for the `ndarray` inputs at all. This makes it significantly faster for mixed sparse-dense `elemwise`. Downsides: - We'll have to broadcast all `ndarrays` against each other to actually test for densification. This could be huge. Or we can skip this step and risk wrong results. - Not so bad considering in almost all use-cases, the `ndarrays` will be tiny compared to `COO`. - Puts a slight (but not huge) roadblock for arbitrary fill-values.
pydata/sparse
diff --git a/sparse/tests/test_coo.py b/sparse/tests/test_coo.py index fdc474e..e692cb2 100644 --- a/sparse/tests/test_coo.py +++ b/sparse/tests/test_coo.py @@ -293,6 +293,59 @@ def test_elemwise_inplace(func): assert_eq(x, s) +def test_elemwise_mixed(): + s1 = sparse.random((2, 3, 4), density=0.5) + x2 = np.random.rand(4) + + x1 = s1.todense() + + assert_eq(s1 * x2, x1 * x2) + + +def test_elemwise_mixed_empty(): + s1 = sparse.random((2, 0, 4), density=0.5) + x2 = np.random.rand(2, 0, 4) + + x1 = s1.todense() + + assert_eq(s1 * x2, x1 * x2) + + +def test_ndarray_bigger_shape(): + s1 = sparse.random((2, 3, 4), density=0.5) + x2 = np.random.rand(5, 1, 1, 1) + + with pytest.raises(ValueError): + s1 * x2 + + +def test_elemwise_unsupported(): + class A(): + pass + + s1 = sparse.random((2, 3, 4), density=0.5) + x2 = A() + + with pytest.raises(TypeError): + s1 + x2 + + assert sparse.elemwise(operator.add, s1, x2) is NotImplemented + + +def test_elemwise_mixed_broadcast(): + s1 = sparse.random((2, 3, 4), density=0.5) + s2 = sparse.random(4, density=0.5) + x3 = np.random.rand(3, 4) + + x1 = s1.todense() + x2 = s2.todense() + + def func(x1, x2, x3): + return x1 * x2 * x3 + + assert_eq(sparse.elemwise(func, s1, s2, x3), func(x1, x2, x3)) + + @pytest.mark.parametrize('func', [ operator.mul, operator.add, operator.sub, operator.gt, operator.lt, operator.ne @@ -486,7 +539,7 @@ def test_trinary_broadcasting_pathological(shapes, func, value, fraction): def test_sparse_broadcasting(monkeypatch): - orig_unmatch_coo = sparse.coo.umath._unmatch_coo + orig_unmatch_coo = sparse.coo.umath._Elemwise._get_func_coords_data state = {'num_matches': 0} @@ -495,10 +548,11 @@ def test_sparse_broadcasting(monkeypatch): def mock_unmatch_coo(*args, **kwargs): result = orig_unmatch_coo(*args, **kwargs) - state['num_matches'] += len(result[0]) + if result is not None: + state['num_matches'] += 1 return result - monkeypatch.setattr(sparse.coo.umath, '_unmatch_coo', mock_unmatch_coo) + monkeypatch.setattr(sparse.coo.umath._Elemwise, '_get_func_coords_data', mock_unmatch_coo) xs * ys @@ -507,7 +561,7 @@ def test_sparse_broadcasting(monkeypatch): def test_dense_broadcasting(monkeypatch): - orig_unmatch_coo = sparse.coo.umath._unmatch_coo + orig_unmatch_coo = sparse.coo.umath._Elemwise._get_func_coords_data state = {'num_matches': 0} @@ -516,10 +570,11 @@ def test_dense_broadcasting(monkeypatch): def mock_unmatch_coo(*args, **kwargs): result = orig_unmatch_coo(*args, **kwargs) - state['num_matches'] += len(result[0]) + if result is not None: + state['num_matches'] += 1 return result - monkeypatch.setattr(sparse.coo.umath, '_unmatch_coo', mock_unmatch_coo) + monkeypatch.setattr(sparse.coo.umath._Elemwise, '_get_func_coords_data', mock_unmatch_coo) xs + ys @@ -545,7 +600,7 @@ def test_ndarray_densification_fails(): xs = sparse.random((3, 4), density=0.5) y = np.random.rand(3, 4) - with pytest.raises(TypeError): + with pytest.raises(ValueError): xs + y @@ -553,7 +608,7 @@ def test_elemwise_noargs(): def func(): return np.float_(5.0) - assert sparse.elemwise(func) == func() + assert_eq(sparse.elemwise(func), func()) @pytest.mark.parametrize('func', [
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 5 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-flake8", "pytest-cov" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 asv==0.5.1 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 distlib==0.3.9 docutils==0.17.1 filelock==3.4.1 flake8==5.0.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.2.0 importlib-resources==5.4.0 iniconfig==1.1.1 Jinja2==3.0.3 llvmlite==0.36.0 MarkupSafe==2.0.1 mccabe==0.7.0 numba==0.53.1 numpy==1.19.5 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-flake8==1.1.1 pytz==2025.2 releases==2.1.1 requests==2.27.1 scipy==1.5.4 semantic-version==2.6.0 six==1.17.0 snowballstemmer==2.2.0 -e git+https://github.com/pydata/sparse.git@b03b6b9a480a10a3cf59d7994292b9c5d3015cd5#egg=sparse Sphinx==4.3.2 sphinx-rtd-theme==1.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 urllib3==1.26.20 virtualenv==20.16.2 zipp==3.6.0
name: sparse channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - asv==0.5.1 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - coverage==6.2 - distlib==0.3.9 - docutils==0.17.1 - filelock==3.4.1 - flake8==5.0.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.2.0 - importlib-resources==5.4.0 - iniconfig==1.1.1 - jinja2==3.0.3 - llvmlite==0.36.0 - markupsafe==2.0.1 - mccabe==0.7.0 - numba==0.53.1 - numpy==1.19.5 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-flake8==1.1.1 - pytz==2025.2 - releases==2.1.1 - requests==2.27.1 - scipy==1.5.4 - semantic-version==2.6.0 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==4.3.2 - sphinx-rtd-theme==1.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - urllib3==1.26.20 - virtualenv==20.16.2 - zipp==3.6.0 prefix: /opt/conda/envs/sparse
[ "sparse/tests/test_coo.py::test_elemwise_mixed", "sparse/tests/test_coo.py::test_elemwise_mixed_empty", "sparse/tests/test_coo.py::test_ndarray_bigger_shape", "sparse/tests/test_coo.py::test_elemwise_mixed_broadcast", "sparse/tests/test_coo.py::test_sparse_broadcasting", "sparse/tests/test_coo.py::test_dense_broadcasting", "sparse/tests/test_coo.py::test_ndarray_densification_fails", "sparse/tests/test_dok.py::test_setitem[shape0-index0-0.20334826085619861]", "sparse/tests/test_dok.py::test_setitem[shape1-index1-0.6453249644526395]", "sparse/tests/test_dok.py::test_setitem[shape3-1-0.4205356283126368]", "sparse/tests/test_dok.py::test_setitem[shape4-index4-0.9052351839659999]", "sparse/tests/test_dok.py::test_setitem[shape5-index5-0.3489304021939277]", "sparse/tests/test_dok.py::test_setitem[shape9-index9-0.7722599625576393]", "sparse/tests/test_dok.py::test_setitem[shape11-index11-0.14183046036354652]", "sparse/tests/test_dok.py::test_setitem[shape13-index13-0.014592003691852984]" ]
[ "sparse/__init__.py::flake-8::FLAKE8", "sparse/_version.py::flake-8::FLAKE8", "sparse/compatibility.py::flake-8::FLAKE8", "sparse/dok.py::flake-8::FLAKE8", "sparse/io.py::flake-8::FLAKE8", "sparse/slicing.py::flake-8::FLAKE8", "sparse/sparse_array.py::flake-8::FLAKE8", "sparse/utils.py::flake-8::FLAKE8", "sparse/coo/__init__.py::flake-8::FLAKE8", "sparse/coo/common.py::flake-8::FLAKE8", "sparse/coo/core.py::flake-8::FLAKE8", "sparse/coo/indexing.py::flake-8::FLAKE8", "sparse/coo/umath.py::flake-8::FLAKE8", "sparse/tests/test_coo.py::flake-8::FLAKE8", "sparse/tests/test_coo.py::test_op_scipy_sparse_left[func2]", "sparse/tests/test_coo.py::test_op_scipy_sparse_left[func3]", "sparse/tests/test_coo.py::test_op_scipy_sparse_left[func4]", "sparse/tests/test_coo.py::test_op_scipy_sparse_left[func5]", "sparse/tests/test_dok.py::flake-8::FLAKE8", "sparse/tests/test_io.py::flake-8::FLAKE8" ]
[ "sparse/dok.py::sparse.dok.DOK", "sparse/dok.py::sparse.dok.DOK.from_coo", "sparse/dok.py::sparse.dok.DOK.from_numpy", "sparse/dok.py::sparse.dok.DOK.nnz", "sparse/dok.py::sparse.dok.DOK.to_coo", "sparse/dok.py::sparse.dok.DOK.todense", "sparse/io.py::sparse.io.save_npz", "sparse/slicing.py::sparse.slicing.check_index", "sparse/slicing.py::sparse.slicing.clip_slice", "sparse/slicing.py::sparse.slicing.normalize_index", "sparse/slicing.py::sparse.slicing.posify_index", "sparse/slicing.py::sparse.slicing.replace_ellipsis", "sparse/slicing.py::sparse.slicing.replace_none", "sparse/slicing.py::sparse.slicing.sanitize_index", "sparse/sparse_array.py::sparse.sparse_array.SparseArray.density", "sparse/sparse_array.py::sparse.sparse_array.SparseArray.ndim", "sparse/sparse_array.py::sparse.sparse_array.SparseArray.nnz", "sparse/sparse_array.py::sparse.sparse_array.SparseArray.size", "sparse/utils.py::sparse.utils.check_consistent_fill_value", "sparse/utils.py::sparse.utils.check_zero_fill_value", "sparse/utils.py::sparse.utils.equivalent", "sparse/utils.py::sparse.utils.random", "sparse/coo/core.py::sparse.coo.core.COO", "sparse/coo/core.py::sparse.coo.core.COO.T", "sparse/coo/core.py::sparse.coo.core.COO.__len__", "sparse/coo/core.py::sparse.coo.core.COO._sort_indices", "sparse/coo/core.py::sparse.coo.core.COO._sum_duplicates", "sparse/coo/core.py::sparse.coo.core.COO.all", "sparse/coo/core.py::sparse.coo.core.COO.any", "sparse/coo/core.py::sparse.coo.core.COO.dot", "sparse/coo/core.py::sparse.coo.core.COO.dtype", "sparse/coo/core.py::sparse.coo.core.COO.from_iter", "sparse/coo/core.py::sparse.coo.core.COO.from_numpy", "sparse/coo/core.py::sparse.coo.core.COO.from_scipy_sparse", "sparse/coo/core.py::sparse.coo.core.COO.linear_loc", "sparse/coo/core.py::sparse.coo.core.COO.max", "sparse/coo/core.py::sparse.coo.core.COO.maybe_densify", "sparse/coo/core.py::sparse.coo.core.COO.min", "sparse/coo/core.py::sparse.coo.core.COO.nbytes", "sparse/coo/core.py::sparse.coo.core.COO.nnz", "sparse/coo/core.py::sparse.coo.core.COO.nonzero", "sparse/coo/core.py::sparse.coo.core.COO.prod", "sparse/coo/core.py::sparse.coo.core.COO.reduce", "sparse/coo/core.py::sparse.coo.core.COO.reshape", "sparse/coo/core.py::sparse.coo.core.COO.sum", "sparse/coo/core.py::sparse.coo.core.COO.todense", "sparse/coo/core.py::sparse.coo.core.COO.transpose", "sparse/coo/indexing.py::sparse.coo.indexing._compute_mask", "sparse/coo/indexing.py::sparse.coo.indexing._filter_pairs", "sparse/coo/indexing.py::sparse.coo.indexing._get_mask_pairs", "sparse/coo/indexing.py::sparse.coo.indexing._get_slice_len", "sparse/coo/indexing.py::sparse.coo.indexing._ind_ar_from_indices", "sparse/coo/indexing.py::sparse.coo.indexing._join_adjacent_pairs", "sparse/coo/indexing.py::sparse.coo.indexing._prune_indices", "sparse/tests/test_coo.py::test_reductions[True-None-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True-None-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True-None-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True-None-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True-None-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[True-0-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True-0-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True-0-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True-0-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True-0-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[True-1-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True-1-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True-1-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True-1-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True-1-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[True-2-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True-2-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True-2-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True-2-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True-2-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[True-axis4-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True-axis4-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True-axis4-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True-axis4-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True-axis4-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[True--3-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True--3-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True--3-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True--3-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True--3-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[True-axis6-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True-axis6-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True-axis6-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True-axis6-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True-axis6-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False-None-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False-None-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False-None-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False-None-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False-None-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False-0-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False-0-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False-0-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False-0-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False-0-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False-1-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False-1-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False-1-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False-1-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False-1-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False-2-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False-2-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False-2-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False-2-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False-2-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False-axis4-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False-axis4-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False-axis4-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False-axis4-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False-axis4-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False--3-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False--3-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False--3-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False--3-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False--3-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False-axis6-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False-axis6-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False-axis6-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False-axis6-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False-axis6-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions_bool[True-None-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True-None-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[True-0-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True-0-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[True-1-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True-1-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[True-2-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True-2-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[True-axis4-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True-axis4-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[True--3-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True--3-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[True-axis6-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True-axis6-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False-None-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False-None-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False-0-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False-0-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False-1-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False-1-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False-2-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False-2-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False-axis4-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False-axis4-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False--3-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False--3-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False-axis6-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False-axis6-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-None-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-None-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-None-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-None-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-None-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-0-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-0-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-0-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-0-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-0-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-1-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-1-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-1-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-1-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-1-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-2-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-2-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-2-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-2-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-2-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True--1-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True--1-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True--1-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True--1-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True--1-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis6-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis6-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis6-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis6-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis6-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-None-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-None-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-None-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-None-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-None-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-0-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-0-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-0-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-0-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-0-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-1-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-1-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-1-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-1-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-1-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-2-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-2-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-2-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-2-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-2-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False--1-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False--1-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False--1-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False--1-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False--1-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis6-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis6-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis6-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis6-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis6-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[amax-kwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[sum-kwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[prod-kwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[reduce-kwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[reduce-kwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[reduce-kwargs5]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-None-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-None-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-None-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-None-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-0-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-0-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-0-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-0-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-1-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-1-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-1-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-1-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-None-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-None-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-None-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-None-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-0-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-0-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-0-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-0-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-1-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-1-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-1-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-1-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-None-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-None-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-None-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-None-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-0-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-0-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-0-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-0-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-1-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-1-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-1-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-1-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-None-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-None-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-None-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-None-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-0-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-0-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-0-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-0-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-1-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-1-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-1-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-1-nanmin]", "sparse/tests/test_coo.py::test_all_nan_reduction_warning[None-nanmax]", "sparse/tests/test_coo.py::test_all_nan_reduction_warning[None-nanmin]", "sparse/tests/test_coo.py::test_all_nan_reduction_warning[0-nanmax]", "sparse/tests/test_coo.py::test_all_nan_reduction_warning[0-nanmin]", "sparse/tests/test_coo.py::test_all_nan_reduction_warning[1-nanmax]", "sparse/tests/test_coo.py::test_all_nan_reduction_warning[1-nanmin]", "sparse/tests/test_coo.py::test_transpose[None]", "sparse/tests/test_coo.py::test_transpose[axis1]", "sparse/tests/test_coo.py::test_transpose[axis2]", "sparse/tests/test_coo.py::test_transpose[axis3]", "sparse/tests/test_coo.py::test_transpose[axis4]", "sparse/tests/test_coo.py::test_transpose[axis5]", "sparse/tests/test_coo.py::test_transpose[axis6]", "sparse/tests/test_coo.py::test_transpose_error[axis0]", "sparse/tests/test_coo.py::test_transpose_error[axis1]", "sparse/tests/test_coo.py::test_transpose_error[axis2]", "sparse/tests/test_coo.py::test_transpose_error[axis3]", "sparse/tests/test_coo.py::test_transpose_error[axis4]", "sparse/tests/test_coo.py::test_transpose_error[axis5]", "sparse/tests/test_coo.py::test_transpose_error[0.3]", "sparse/tests/test_coo.py::test_transpose_error[axis7]", "sparse/tests/test_coo.py::test_reshape[a0-b0]", "sparse/tests/test_coo.py::test_reshape[a1-b1]", "sparse/tests/test_coo.py::test_reshape[a2-b2]", "sparse/tests/test_coo.py::test_reshape[a3-b3]", "sparse/tests/test_coo.py::test_reshape[a4-b4]", "sparse/tests/test_coo.py::test_reshape[a5-b5]", "sparse/tests/test_coo.py::test_reshape[a6-b6]", "sparse/tests/test_coo.py::test_reshape[a7-b7]", "sparse/tests/test_coo.py::test_reshape[a8-b8]", "sparse/tests/test_coo.py::test_reshape[a9-b9]", "sparse/tests/test_coo.py::test_large_reshape", "sparse/tests/test_coo.py::test_reshape_same", "sparse/tests/test_coo.py::test_to_scipy_sparse", "sparse/tests/test_coo.py::test_tensordot[a_shape0-b_shape0-axes0]", "sparse/tests/test_coo.py::test_tensordot[a_shape1-b_shape1-axes1]", "sparse/tests/test_coo.py::test_tensordot[a_shape2-b_shape2-axes2]", "sparse/tests/test_coo.py::test_tensordot[a_shape3-b_shape3-axes3]", "sparse/tests/test_coo.py::test_tensordot[a_shape4-b_shape4-axes4]", "sparse/tests/test_coo.py::test_tensordot[a_shape5-b_shape5-axes5]", "sparse/tests/test_coo.py::test_tensordot[a_shape6-b_shape6-axes6]", "sparse/tests/test_coo.py::test_tensordot[a_shape7-b_shape7-axes7]", "sparse/tests/test_coo.py::test_tensordot[a_shape8-b_shape8-axes8]", "sparse/tests/test_coo.py::test_tensordot[a_shape9-b_shape9-0]", "sparse/tests/test_coo.py::test_dot[a_shape0-b_shape0]", "sparse/tests/test_coo.py::test_dot[a_shape1-b_shape1]", "sparse/tests/test_coo.py::test_dot[a_shape2-b_shape2]", "sparse/tests/test_coo.py::test_dot[a_shape3-b_shape3]", "sparse/tests/test_coo.py::test_dot[a_shape4-b_shape4]", "sparse/tests/test_coo.py::test_elemwise[expm1]", "sparse/tests/test_coo.py::test_elemwise[log1p]", "sparse/tests/test_coo.py::test_elemwise[sin]", "sparse/tests/test_coo.py::test_elemwise[tan]", "sparse/tests/test_coo.py::test_elemwise[sinh]", "sparse/tests/test_coo.py::test_elemwise[tanh]", "sparse/tests/test_coo.py::test_elemwise[floor]", "sparse/tests/test_coo.py::test_elemwise[ceil]", "sparse/tests/test_coo.py::test_elemwise[sqrt]", "sparse/tests/test_coo.py::test_elemwise[conjugate0]", "sparse/tests/test_coo.py::test_elemwise[round_]", "sparse/tests/test_coo.py::test_elemwise[rint]", "sparse/tests/test_coo.py::test_elemwise[<lambda>0]", "sparse/tests/test_coo.py::test_elemwise[conjugate1]", "sparse/tests/test_coo.py::test_elemwise[conjugate2]", "sparse/tests/test_coo.py::test_elemwise[<lambda>1]", "sparse/tests/test_coo.py::test_elemwise[abs]", "sparse/tests/test_coo.py::test_elemwise_inplace[expm1]", "sparse/tests/test_coo.py::test_elemwise_inplace[log1p]", "sparse/tests/test_coo.py::test_elemwise_inplace[sin]", "sparse/tests/test_coo.py::test_elemwise_inplace[tan]", "sparse/tests/test_coo.py::test_elemwise_inplace[sinh]", "sparse/tests/test_coo.py::test_elemwise_inplace[tanh]", "sparse/tests/test_coo.py::test_elemwise_inplace[floor]", "sparse/tests/test_coo.py::test_elemwise_inplace[ceil]", "sparse/tests/test_coo.py::test_elemwise_inplace[sqrt]", "sparse/tests/test_coo.py::test_elemwise_inplace[conjugate0]", "sparse/tests/test_coo.py::test_elemwise_inplace[round_]", "sparse/tests/test_coo.py::test_elemwise_inplace[rint]", "sparse/tests/test_coo.py::test_elemwise_inplace[conjugate1]", "sparse/tests/test_coo.py::test_elemwise_inplace[conjugate2]", "sparse/tests/test_coo.py::test_elemwise_inplace[<lambda>]", "sparse/tests/test_coo.py::test_elemwise_unsupported", "sparse/tests/test_coo.py::test_elemwise_binary[shape0-mul]", "sparse/tests/test_coo.py::test_elemwise_binary[shape0-add]", "sparse/tests/test_coo.py::test_elemwise_binary[shape0-sub]", "sparse/tests/test_coo.py::test_elemwise_binary[shape0-gt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape0-lt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape0-ne]", "sparse/tests/test_coo.py::test_elemwise_binary[shape1-mul]", "sparse/tests/test_coo.py::test_elemwise_binary[shape1-add]", "sparse/tests/test_coo.py::test_elemwise_binary[shape1-sub]", "sparse/tests/test_coo.py::test_elemwise_binary[shape1-gt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape1-lt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape1-ne]", "sparse/tests/test_coo.py::test_elemwise_binary[shape2-mul]", "sparse/tests/test_coo.py::test_elemwise_binary[shape2-add]", "sparse/tests/test_coo.py::test_elemwise_binary[shape2-sub]", "sparse/tests/test_coo.py::test_elemwise_binary[shape2-gt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape2-lt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape2-ne]", "sparse/tests/test_coo.py::test_elemwise_binary[shape3-mul]", "sparse/tests/test_coo.py::test_elemwise_binary[shape3-add]", "sparse/tests/test_coo.py::test_elemwise_binary[shape3-sub]", "sparse/tests/test_coo.py::test_elemwise_binary[shape3-gt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape3-lt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape3-ne]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape0-imul]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape0-iadd]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape0-isub]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape1-imul]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape1-iadd]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape1-isub]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape2-imul]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape2-iadd]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape2-isub]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape3-imul]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape3-iadd]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape3-isub]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape0-<lambda>0]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape0-<lambda>1]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape0-<lambda>2]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape0-<lambda>3]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape1-<lambda>0]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape1-<lambda>1]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape1-<lambda>2]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape1-<lambda>3]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape2-<lambda>0]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape2-<lambda>1]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape2-<lambda>2]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape2-<lambda>3]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape3-<lambda>0]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape3-<lambda>1]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape3-<lambda>2]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape3-<lambda>3]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape10-shape20-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape10-shape20-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape11-shape21-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape11-shape21-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape12-shape22-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape12-shape22-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape13-shape23-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape13-shape23-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape14-shape24-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape14-shape24-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape15-shape25-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape15-shape25-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape16-shape26-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape16-shape26-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape17-shape27-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape17-shape27-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape18-shape28-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape18-shape28-mul]", "sparse/tests/test_coo.py::test_broadcast_to[shape10-shape20]", "sparse/tests/test_coo.py::test_broadcast_to[shape11-shape21]", "sparse/tests/test_coo.py::test_broadcast_to[shape12-shape22]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes0]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes1]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes2]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes3]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes4]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes5]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes6]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes7]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes0]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes1]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes2]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes3]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes4]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes5]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes6]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes7]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes0]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes1]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes2]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes3]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes4]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes5]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes6]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes7]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes0]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes1]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes2]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes3]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes4]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes5]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes6]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes7]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes0]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes1]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes2]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes3]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes4]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes5]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes6]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes7]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes0]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes1]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes2]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes3]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes4]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes5]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes6]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes7]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-nan-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-nan-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-nan-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-nan-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25--inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25--inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25--inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25--inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-nan-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-nan-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-nan-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-nan-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5--inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5--inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5--inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5--inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-nan-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-nan-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-nan-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-nan-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75--inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75--inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75--inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75--inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-nan-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-nan-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-nan-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-nan-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0--inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0--inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0--inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0--inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_sparsearray_elemwise[coo]", "sparse/tests/test_coo.py::test_sparsearray_elemwise[dok]", "sparse/tests/test_coo.py::test_elemwise_noargs", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[pow]", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[truediv]", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[floordiv]", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[ge]", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[le]", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[eq]", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[mod]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-mul-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-add-0]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-sub-0]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-pow-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-truediv-3]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-floordiv-4]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-gt-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-lt--5]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-ne-0]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-ge-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-le--3]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-eq-1]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-mod-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-mul-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-add-0]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-sub-0]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-pow-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-truediv-3]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-floordiv-4]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-gt-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-lt--5]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-ne-0]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-ge-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-le--3]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-eq-1]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-mod-5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-mul-5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-add-0]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-sub-0]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-gt--5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-lt-5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-ne-0]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-ge--5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-le-3]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-eq-1]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-mul-5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-add-0]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-sub-0]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-gt--5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-lt-5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-ne-0]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-ge--5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-le-3]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-eq-1]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[add-5]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[sub--5]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[pow--3]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[truediv-0]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[floordiv-0]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[gt--5]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[lt-5]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[ne-1]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[ge--3]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[le-3]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[eq-0]", "sparse/tests/test_coo.py::test_bitwise_binary[shape0-and_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape0-or_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape0-xor]", "sparse/tests/test_coo.py::test_bitwise_binary[shape1-and_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape1-or_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape1-xor]", "sparse/tests/test_coo.py::test_bitwise_binary[shape2-and_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape2-or_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape2-xor]", "sparse/tests/test_coo.py::test_bitwise_binary[shape3-and_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape3-or_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape3-xor]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape0-iand]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape0-ior]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape0-ixor]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape1-iand]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape1-ior]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape1-ixor]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape2-iand]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape2-ior]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape2-ixor]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape3-iand]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape3-ior]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape3-ixor]", "sparse/tests/test_coo.py::test_bitshift_binary[shape0-lshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape0-rshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape1-lshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape1-rshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape2-lshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape2-rshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape3-lshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape3-rshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape0-ilshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape0-irshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape1-ilshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape1-irshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape2-ilshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape2-irshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape3-ilshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape3-irshift]", "sparse/tests/test_coo.py::test_bitwise_scalar[shape0-and_]", "sparse/tests/test_coo.py::test_bitwise_scalar[shape1-and_]", "sparse/tests/test_coo.py::test_bitwise_scalar[shape2-and_]", "sparse/tests/test_coo.py::test_bitwise_scalar[shape3-and_]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape0-lshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape0-rshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape1-lshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape1-rshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape2-lshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape2-rshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape3-lshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape3-rshift]", "sparse/tests/test_coo.py::test_unary_bitwise_nonzero_output_fv[shape0-invert]", "sparse/tests/test_coo.py::test_unary_bitwise_nonzero_output_fv[shape1-invert]", "sparse/tests/test_coo.py::test_unary_bitwise_nonzero_output_fv[shape2-invert]", "sparse/tests/test_coo.py::test_unary_bitwise_nonzero_output_fv[shape3-invert]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape0-or_]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape0-xor]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape1-or_]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape1-xor]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape2-or_]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape2-xor]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape3-or_]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape3-xor]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape0-mul]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape0-add]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape0-sub]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape0-gt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape0-lt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape0-ne]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape1-mul]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape1-add]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape1-sub]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape1-gt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape1-lt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape1-ne]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape2-mul]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape2-add]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape2-sub]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape2-gt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape2-lt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape2-ne]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape3-mul]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape3-add]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape3-sub]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape3-gt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape3-lt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape3-ne]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape0-lshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape0-rshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape1-lshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape1-rshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape2-lshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape2-rshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape3-lshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape3-rshift]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape0-and_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape0-or_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape0-xor]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape1-and_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape1-or_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape1-xor]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape2-and_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape2-or_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape2-xor]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape3-and_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape3-or_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape3-xor]", "sparse/tests/test_coo.py::test_elemwise_binary_empty", "sparse/tests/test_coo.py::test_gt", "sparse/tests/test_coo.py::test_slicing[0]", "sparse/tests/test_coo.py::test_slicing[1]", "sparse/tests/test_coo.py::test_slicing[-1]", "sparse/tests/test_coo.py::test_slicing[index3]", "sparse/tests/test_coo.py::test_slicing[index4]", "sparse/tests/test_coo.py::test_slicing[index5]", "sparse/tests/test_coo.py::test_slicing[index6]", "sparse/tests/test_coo.py::test_slicing[index7]", "sparse/tests/test_coo.py::test_slicing[index8]", "sparse/tests/test_coo.py::test_slicing[index9]", "sparse/tests/test_coo.py::test_slicing[index10]", "sparse/tests/test_coo.py::test_slicing[index11]", "sparse/tests/test_coo.py::test_slicing[index12]", "sparse/tests/test_coo.py::test_slicing[index13]", "sparse/tests/test_coo.py::test_slicing[index14]", "sparse/tests/test_coo.py::test_slicing[index15]", "sparse/tests/test_coo.py::test_slicing[index16]", "sparse/tests/test_coo.py::test_slicing[index17]", "sparse/tests/test_coo.py::test_slicing[index18]", "sparse/tests/test_coo.py::test_slicing[index19]", "sparse/tests/test_coo.py::test_slicing[index20]", "sparse/tests/test_coo.py::test_slicing[index21]", "sparse/tests/test_coo.py::test_slicing[index22]", "sparse/tests/test_coo.py::test_slicing[index23]", "sparse/tests/test_coo.py::test_slicing[index24]", "sparse/tests/test_coo.py::test_slicing[index25]", "sparse/tests/test_coo.py::test_slicing[index26]", "sparse/tests/test_coo.py::test_slicing[index27]", "sparse/tests/test_coo.py::test_slicing[index28]", "sparse/tests/test_coo.py::test_slicing[index29]", "sparse/tests/test_coo.py::test_slicing[index30]", "sparse/tests/test_coo.py::test_slicing[index31]", "sparse/tests/test_coo.py::test_slicing[index32]", "sparse/tests/test_coo.py::test_slicing[index33]", "sparse/tests/test_coo.py::test_slicing[index34]", "sparse/tests/test_coo.py::test_slicing[index35]", "sparse/tests/test_coo.py::test_slicing[index36]", "sparse/tests/test_coo.py::test_slicing[index37]", "sparse/tests/test_coo.py::test_slicing[index38]", "sparse/tests/test_coo.py::test_slicing[index39]", "sparse/tests/test_coo.py::test_slicing[index40]", "sparse/tests/test_coo.py::test_slicing[index41]", "sparse/tests/test_coo.py::test_slicing[index42]", "sparse/tests/test_coo.py::test_slicing[index43]", "sparse/tests/test_coo.py::test_slicing[index44]", "sparse/tests/test_coo.py::test_slicing[index45]", "sparse/tests/test_coo.py::test_advanced_indexing[index0]", "sparse/tests/test_coo.py::test_advanced_indexing[index1]", "sparse/tests/test_coo.py::test_advanced_indexing[index2]", "sparse/tests/test_coo.py::test_advanced_indexing[index3]", "sparse/tests/test_coo.py::test_advanced_indexing[index4]", "sparse/tests/test_coo.py::test_advanced_indexing[index5]", "sparse/tests/test_coo.py::test_advanced_indexing[index6]", "sparse/tests/test_coo.py::test_advanced_indexing[index7]", "sparse/tests/test_coo.py::test_advanced_indexing[index8]", "sparse/tests/test_coo.py::test_advanced_indexing[index9]", "sparse/tests/test_coo.py::test_custom_dtype_slicing", "sparse/tests/test_coo.py::test_slicing_errors[index0]", "sparse/tests/test_coo.py::test_slicing_errors[index1]", "sparse/tests/test_coo.py::test_slicing_errors[index2]", "sparse/tests/test_coo.py::test_slicing_errors[5]", "sparse/tests/test_coo.py::test_slicing_errors[-5]", "sparse/tests/test_coo.py::test_slicing_errors[foo]", "sparse/tests/test_coo.py::test_slicing_errors[index6]", "sparse/tests/test_coo.py::test_slicing_errors[0.5]", "sparse/tests/test_coo.py::test_slicing_errors[index8]", "sparse/tests/test_coo.py::test_slicing_errors[index9]", "sparse/tests/test_coo.py::test_slicing_errors[index10]", "sparse/tests/test_coo.py::test_slicing_errors[index11]", "sparse/tests/test_coo.py::test_concatenate", "sparse/tests/test_coo.py::test_concatenate_mixed[stack-0]", "sparse/tests/test_coo.py::test_concatenate_mixed[stack-1]", "sparse/tests/test_coo.py::test_concatenate_mixed[concatenate-0]", "sparse/tests/test_coo.py::test_concatenate_mixed[concatenate-1]", "sparse/tests/test_coo.py::test_concatenate_noarrays", "sparse/tests/test_coo.py::test_stack[0-shape0]", "sparse/tests/test_coo.py::test_stack[0-shape1]", "sparse/tests/test_coo.py::test_stack[0-shape2]", "sparse/tests/test_coo.py::test_stack[1-shape0]", "sparse/tests/test_coo.py::test_stack[1-shape1]", "sparse/tests/test_coo.py::test_stack[1-shape2]", "sparse/tests/test_coo.py::test_stack[-1-shape0]", "sparse/tests/test_coo.py::test_stack[-1-shape1]", "sparse/tests/test_coo.py::test_stack[-1-shape2]", "sparse/tests/test_coo.py::test_large_concat_stack", "sparse/tests/test_coo.py::test_addition", "sparse/tests/test_coo.py::test_scalar_multiplication[2]", "sparse/tests/test_coo.py::test_scalar_multiplication[2.5]", "sparse/tests/test_coo.py::test_scalar_multiplication[scalar2]", "sparse/tests/test_coo.py::test_scalar_multiplication[scalar3]", "sparse/tests/test_coo.py::test_scalar_exponentiation", "sparse/tests/test_coo.py::test_create_with_lists_of_tuples", "sparse/tests/test_coo.py::test_sizeof", "sparse/tests/test_coo.py::test_scipy_sparse_interface", "sparse/tests/test_coo.py::test_scipy_sparse_interaction[coo]", "sparse/tests/test_coo.py::test_scipy_sparse_interaction[csr]", "sparse/tests/test_coo.py::test_scipy_sparse_interaction[dok]", "sparse/tests/test_coo.py::test_scipy_sparse_interaction[csc]", "sparse/tests/test_coo.py::test_op_scipy_sparse[mul]", "sparse/tests/test_coo.py::test_op_scipy_sparse[add]", "sparse/tests/test_coo.py::test_op_scipy_sparse[sub]", "sparse/tests/test_coo.py::test_op_scipy_sparse[gt]", "sparse/tests/test_coo.py::test_op_scipy_sparse[lt]", "sparse/tests/test_coo.py::test_op_scipy_sparse[ne]", "sparse/tests/test_coo.py::test_op_scipy_sparse_left[add]", "sparse/tests/test_coo.py::test_op_scipy_sparse_left[sub]", "sparse/tests/test_coo.py::test_cache_csr", "sparse/tests/test_coo.py::test_empty_shape", "sparse/tests/test_coo.py::test_single_dimension", "sparse/tests/test_coo.py::test_large_sum", "sparse/tests/test_coo.py::test_add_many_sparse_arrays", "sparse/tests/test_coo.py::test_caching", "sparse/tests/test_coo.py::test_scalar_slicing", "sparse/tests/test_coo.py::test_triul[shape0-0]", "sparse/tests/test_coo.py::test_triul[shape1-1]", "sparse/tests/test_coo.py::test_triul[shape2--1]", "sparse/tests/test_coo.py::test_triul[shape3--2]", "sparse/tests/test_coo.py::test_triul[shape4-1000]", "sparse/tests/test_coo.py::test_empty_reduction", "sparse/tests/test_coo.py::test_random_shape[0.1-shape0]", "sparse/tests/test_coo.py::test_random_shape[0.1-shape1]", "sparse/tests/test_coo.py::test_random_shape[0.1-shape2]", "sparse/tests/test_coo.py::test_random_shape[0.3-shape0]", "sparse/tests/test_coo.py::test_random_shape[0.3-shape1]", "sparse/tests/test_coo.py::test_random_shape[0.3-shape2]", "sparse/tests/test_coo.py::test_random_shape[0.5-shape0]", "sparse/tests/test_coo.py::test_random_shape[0.5-shape1]", "sparse/tests/test_coo.py::test_random_shape[0.5-shape2]", "sparse/tests/test_coo.py::test_random_shape[0.7-shape0]", "sparse/tests/test_coo.py::test_random_shape[0.7-shape1]", "sparse/tests/test_coo.py::test_random_shape[0.7-shape2]", "sparse/tests/test_coo.py::test_two_random_unequal", "sparse/tests/test_coo.py::test_two_random_same_seed", "sparse/tests/test_coo.py::test_random_rvs[0.0-shape0-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.0-shape0-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.0-shape0-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.0-shape1-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.0-shape1-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.0-shape1-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.01-shape0-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.01-shape0-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.01-shape0-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.01-shape1-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.01-shape1-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.01-shape1-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.1-shape0-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.1-shape0-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.1-shape0-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.1-shape1-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.1-shape1-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.1-shape1-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.2-shape0-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.2-shape0-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.2-shape0-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.2-shape1-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.2-shape1-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.2-shape1-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_fv[coo]", "sparse/tests/test_coo.py::test_random_fv[dok]", "sparse/tests/test_coo.py::test_scalar_shape_construction", "sparse/tests/test_coo.py::test_len", "sparse/tests/test_coo.py::test_density", "sparse/tests/test_coo.py::test_size", "sparse/tests/test_coo.py::test_np_array", "sparse/tests/test_coo.py::test_three_arg_where[shapes0]", "sparse/tests/test_coo.py::test_three_arg_where[shapes1]", "sparse/tests/test_coo.py::test_three_arg_where[shapes2]", "sparse/tests/test_coo.py::test_three_arg_where[shapes3]", "sparse/tests/test_coo.py::test_three_arg_where[shapes4]", "sparse/tests/test_coo.py::test_three_arg_where[shapes5]", "sparse/tests/test_coo.py::test_three_arg_where[shapes6]", "sparse/tests/test_coo.py::test_three_arg_where[shapes7]", "sparse/tests/test_coo.py::test_one_arg_where", "sparse/tests/test_coo.py::test_one_arg_where_dense", "sparse/tests/test_coo.py::test_two_arg_where", "sparse/tests/test_coo.py::test_inplace_invalid_shape[imul]", "sparse/tests/test_coo.py::test_inplace_invalid_shape[iadd]", "sparse/tests/test_coo.py::test_inplace_invalid_shape[isub]", "sparse/tests/test_coo.py::test_nonzero", "sparse/tests/test_coo.py::test_argwhere", "sparse/tests/test_coo.py::test_asformat[coo]", "sparse/tests/test_coo.py::test_asformat[dok]", "sparse/tests/test_coo.py::test_as_coo[COO]", "sparse/tests/test_coo.py::test_as_coo[DOK]", "sparse/tests/test_coo.py::test_as_coo[csr_matrix]", "sparse/tests/test_coo.py::test_as_coo[asarray]", "sparse/tests/test_coo.py::test_invalid_attrs_error", "sparse/tests/test_coo.py::test_invalid_iterable_error", "sparse/tests/test_coo.py::TestRoll::test_1d[0]", "sparse/tests/test_coo.py::TestRoll::test_1d[2]", "sparse/tests/test_coo.py::TestRoll::test_1d[-2]", "sparse/tests/test_coo.py::TestRoll::test_1d[20]", "sparse/tests/test_coo.py::TestRoll::test_1d[-20]", "sparse/tests/test_coo.py::TestRoll::test_2d[None-0]", "sparse/tests/test_coo.py::TestRoll::test_2d[None-2]", "sparse/tests/test_coo.py::TestRoll::test_2d[None--2]", "sparse/tests/test_coo.py::TestRoll::test_2d[None-20]", "sparse/tests/test_coo.py::TestRoll::test_2d[None--20]", "sparse/tests/test_coo.py::TestRoll::test_2d[0-0]", "sparse/tests/test_coo.py::TestRoll::test_2d[0-2]", "sparse/tests/test_coo.py::TestRoll::test_2d[0--2]", "sparse/tests/test_coo.py::TestRoll::test_2d[0-20]", "sparse/tests/test_coo.py::TestRoll::test_2d[0--20]", "sparse/tests/test_coo.py::TestRoll::test_2d[1-0]", "sparse/tests/test_coo.py::TestRoll::test_2d[1-2]", "sparse/tests/test_coo.py::TestRoll::test_2d[1--2]", "sparse/tests/test_coo.py::TestRoll::test_2d[1-20]", "sparse/tests/test_coo.py::TestRoll::test_2d[1--20]", "sparse/tests/test_coo.py::TestRoll::test_2d[ax3-0]", "sparse/tests/test_coo.py::TestRoll::test_2d[ax3-2]", "sparse/tests/test_coo.py::TestRoll::test_2d[ax3--2]", "sparse/tests/test_coo.py::TestRoll::test_2d[ax3-20]", "sparse/tests/test_coo.py::TestRoll::test_2d[ax3--20]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax0-shift0]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax0-shift1]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax0-shift2]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax0-shift3]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax1-shift0]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax1-shift1]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax1-shift2]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax1-shift3]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax2-shift0]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax2-shift1]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax2-shift2]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax2-shift3]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax3-shift0]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax3-shift1]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax3-shift2]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax3-shift3]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[None-0]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[None-2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[None--2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[None-20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[None--20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[0-0]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[0-2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[0--2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[0-20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[0--20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[1-0]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[1-2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[1--2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[1-20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[1--20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[ax3-0]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[ax3-2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[ax3--2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[ax3-20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[ax3--20]", "sparse/tests/test_coo.py::TestRoll::test_empty", "sparse/tests/test_coo.py::TestRoll::test_valerr[args0]", "sparse/tests/test_coo.py::TestRoll::test_valerr[args1]", "sparse/tests/test_coo.py::TestRoll::test_valerr[args2]", "sparse/tests/test_coo.py::TestRoll::test_valerr[args3]", "sparse/tests/test_coo.py::TestFailFillValue::test_nonzero_fv", "sparse/tests/test_coo.py::TestFailFillValue::test_inconsistent_fv", "sparse/tests/test_coo.py::test_initialization[2]", "sparse/tests/test_coo.py::test_initialization[3]", "sparse/tests/test_coo.py::test_initialization[4]", "sparse/tests/test_coo.py::test_initialization[5]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.1-shape0]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.1-shape1]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.1-shape2]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.3-shape0]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.3-shape1]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.3-shape2]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.5-shape0]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.5-shape1]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.5-shape2]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.7-shape0]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.7-shape1]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.7-shape2]", "sparse/tests/test_dok.py::test_convert_to_coo", "sparse/tests/test_dok.py::test_convert_from_coo", "sparse/tests/test_dok.py::test_convert_from_numpy", "sparse/tests/test_dok.py::test_convert_to_numpy", "sparse/tests/test_dok.py::test_construct[2-data0]", "sparse/tests/test_dok.py::test_construct[shape1-data1]", "sparse/tests/test_dok.py::test_construct[shape2-data2]", "sparse/tests/test_dok.py::test_getitem[0.1-shape0]", "sparse/tests/test_dok.py::test_getitem[0.1-shape1]", "sparse/tests/test_dok.py::test_getitem[0.1-shape2]", "sparse/tests/test_dok.py::test_getitem[0.3-shape0]", "sparse/tests/test_dok.py::test_getitem[0.3-shape1]", "sparse/tests/test_dok.py::test_getitem[0.3-shape2]", "sparse/tests/test_dok.py::test_getitem[0.5-shape0]", "sparse/tests/test_dok.py::test_getitem[0.5-shape1]", "sparse/tests/test_dok.py::test_getitem[0.5-shape2]", "sparse/tests/test_dok.py::test_getitem[0.7-shape0]", "sparse/tests/test_dok.py::test_getitem[0.7-shape1]", "sparse/tests/test_dok.py::test_getitem[0.7-shape2]", "sparse/tests/test_dok.py::test_setitem[shape2-index2-value2]", "sparse/tests/test_dok.py::test_setitem[shape6-index6-value6]", "sparse/tests/test_dok.py::test_setitem[shape7-index7-value7]", "sparse/tests/test_dok.py::test_setitem[shape8-index8-value8]", "sparse/tests/test_dok.py::test_setitem[shape10-index10-value10]", "sparse/tests/test_dok.py::test_setitem[shape12-index12-value12]", "sparse/tests/test_dok.py::test_default_dtype", "sparse/tests/test_dok.py::test_int_dtype", "sparse/tests/test_dok.py::test_float_dtype", "sparse/tests/test_dok.py::test_set_zero", "sparse/tests/test_dok.py::test_asformat[coo]", "sparse/tests/test_dok.py::test_asformat[dok]", "sparse/tests/test_io.py::test_save_load_npz_file[True]", "sparse/tests/test_io.py::test_save_load_npz_file[False]", "sparse/tests/test_io.py::test_load_wrong_format_exception" ]
[]
BSD 3-Clause "New" or "Revised" License
3,057
[ "sparse/utils.py", "docs/changelog.rst", "sparse/coo/umath.py", "docs/operations.rst", "sparse/coo/core.py" ]
[ "sparse/utils.py", "docs/changelog.rst", "sparse/coo/umath.py", "docs/operations.rst", "sparse/coo/core.py" ]
horejsek__python-fastjsonschema-27
6c07d2300c5018628dab9487bd5a89b85acb3084
2018-09-12 15:25:25
6c07d2300c5018628dab9487bd5a89b85acb3084
diff --git a/fastjsonschema/draft04.py b/fastjsonschema/draft04.py index 6d79a1e..57b2a85 100644 --- a/fastjsonschema/draft04.py +++ b/fastjsonschema/draft04.py @@ -20,10 +20,7 @@ class CodeGeneratorDraft04(CodeGenerator): FORMAT_REGEXS = { 'date-time': r'^\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d(?:\.\d+)?(?:[+-][0-2]\d:[0-5]\d|Z)?$', 'email': r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$', - 'hostname': ( - r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*' - r'([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]{1,62}[A-Za-z0-9])$' - ), + 'hostname': r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]{0,61}[A-Za-z0-9])$', 'ipv4': r'^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$', 'ipv6': r'^(?:(?:[0-9A-Fa-f]{1,4}:){6}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|::(?:[0-9A-Fa-f]{1,4}:){5}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){4}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){3}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,2}[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){2}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,3}[0-9A-Fa-f]{1,4})?::[0-9A-Fa-f]{1,4}:(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,4}[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,5}[0-9A-Fa-f]{1,4})?::[0-9A-Fa-f]{1,4}|(?:(?:[0-9A-Fa-f]{1,4}:){,6}[0-9A-Fa-f]{1,4})?::)$', 'uri': r'^\w+:(\/?\/?)[^\s]+$',
'example.de' (and many others hostnames) not valid with hostname format Aaand another one. Hope you aren't getting annoyed. Running this example throws a JsonSchemaException: ``` import fastjsonschema schema = { "$schema": "http://json-schema.org/draft-07/schema#", "$id": "https://example.com/example.schema.json", "title": "Example", "description": "An example schema", "type": "object", "properties": { "host": { "type": "string", "description": "Some hostname", "format": "hostname" } } } validate = fastjsonschema.compile(schema) validate({"host": "example.de"}) ``` For 'google.com' it works fine. I'll create a PR soon, probably.
horejsek/python-fastjsonschema
diff --git a/tests/test_datetime.py b/tests/test_datetime.py index 2263e9f..b9e460b 100644 --- a/tests/test_datetime.py +++ b/tests/test_datetime.py @@ -1,4 +1,3 @@ - import pytest from fastjsonschema import JsonSchemaException diff --git a/tests/test_hostname.py b/tests/test_hostname.py new file mode 100644 index 0000000..8620ed1 --- /dev/null +++ b/tests/test_hostname.py @@ -0,0 +1,19 @@ +import pytest + +from fastjsonschema import JsonSchemaException + + +exc = JsonSchemaException('data must be hostname') [email protected]('value, expected', [ + ('', exc), + ('LDhsjf878&d', exc), + ('bla.bla-', exc), + ('example.example.com-', exc), + ('localhost', 'localhost'), + ('example.com', 'example.com'), + ('example.de', 'example.de'), + ('example.fr', 'example.fr'), + ('example.example.com', 'example.example.com'), +]) +def test_hostname(asserter, value, expected): + asserter({'type': 'string', 'format': 'hostname'}, value, expected)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
2.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[devel]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-benchmark" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==2.11.7 attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 charset-normalizer==2.0.12 colorama==0.4.5 dill==0.3.4 execnet==1.9.0 -e git+https://github.com/horejsek/python-fastjsonschema.git@6c07d2300c5018628dab9487bd5a89b85acb3084#egg=fastjsonschema idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work isort==5.10.1 json-spec==0.10.1 jsonschema==3.2.0 lazy-object-proxy==1.7.1 mccabe==0.7.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work platformdirs==2.4.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work py-cpuinfo==9.0.0 pylint==2.13.9 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pyrsistent==0.18.0 pytest==6.2.4 pytest-benchmark==3.4.1 pytest-cache==1.0 requests==2.27.1 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 typed-ast==1.5.5 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 validictory==1.1.3 wrapt==1.16.0 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: python-fastjsonschema channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==2.11.7 - charset-normalizer==2.0.12 - colorama==0.4.5 - dill==0.3.4 - execnet==1.9.0 - idna==3.10 - isort==5.10.1 - json-spec==0.10.1 - jsonschema==3.2.0 - lazy-object-proxy==1.7.1 - mccabe==0.7.0 - platformdirs==2.4.0 - py-cpuinfo==9.0.0 - pylint==2.13.9 - pyrsistent==0.18.0 - pytest-benchmark==3.4.1 - pytest-cache==1.0 - requests==2.27.1 - six==1.17.0 - tomli==1.2.3 - typed-ast==1.5.5 - urllib3==1.26.20 - validictory==1.1.3 - wrapt==1.16.0 prefix: /opt/conda/envs/python-fastjsonschema
[ "tests/test_hostname.py::test_hostname[example.de-example.de]", "tests/test_hostname.py::test_hostname[example.fr-example.fr]" ]
[]
[ "tests/test_datetime.py::test_datetime[-expected0]", "tests/test_datetime.py::test_datetime[bla-expected1]", "tests/test_datetime.py::test_datetime[2018-02-05T14:17:10.00Z-2018-02-05T14:17:10.00Z]", "tests/test_datetime.py::test_datetime[2018-02-05T14:17:10Z-2018-02-05T14:17:10Z]", "tests/test_hostname.py::test_hostname[-expected0]", "tests/test_hostname.py::test_hostname[LDhsjf878&d-expected1]", "tests/test_hostname.py::test_hostname[bla.bla--expected2]", "tests/test_hostname.py::test_hostname[example.example.com--expected3]", "tests/test_hostname.py::test_hostname[localhost-localhost]", "tests/test_hostname.py::test_hostname[example.com-example.com]", "tests/test_hostname.py::test_hostname[example.example.com-example.example.com]" ]
[]
BSD 3-Clause "New" or "Revised" License
3,058
[ "fastjsonschema/draft04.py" ]
[ "fastjsonschema/draft04.py" ]
TACC__agavepy-52
4f950a942d8b25ce68f6dcf9d038f5430962240f
2018-09-12 17:12:29
06a9114a77ab4ff8af368e9c3f750c6500e3af3e
alejandrox1: The branch of which #49 was based off was updated before it was merged. Due to this, i am creating a new pr to resolve #48
diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..dc85065 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,19 @@ +--- +language: python + +python: + - "2.7" + - "3.3" + - "3.6" + +install: + - pip install -r requirements.txt + +script: + - py.test tests + +before_install: + - pip install sphinx>=1.6.3 + - pip install pytest + - pip install mock + - python setup.py install diff --git a/Makefile b/Makefile index 5605c02..2735419 100644 --- a/Makefile +++ b/Makefile @@ -25,6 +25,14 @@ build: # Build development container. clean: rm -rf agavepy.egg-info build dist .cache rm -rf schema openapi + rm -rf agavepy/__pycache__/ + rm -rf agavepy/tests/__pycache__/ + rm -rf agavepy/*.pyc + rm -rf tests/__pycache__/ + rm -rf tests/*.pyc + rm -rf .pytest_cache/ + +clean-docs: make -C docs/ clean deps: @@ -32,19 +40,25 @@ deps: mkdir -p openap docs: deps - python scripts/swagger_to_rst.py && \ - cd docs && \ - make static-clean && \ - make openapi && \ - make schema && \ - make html && \ + python scripts/swagger_to_rst.py + cd docs + make static-clean + make openapi + make schema + make html cd ../ install: python setup.py install +install-py2: + python2 setup.py install + shell: build # Start a shell inside the build environment. $(DOCKER_RUN_AGAVECLI) bash tests: - py.test agavepy/tests/test_agave_basic.py -s + pytest -v --cache-clear tests/ + +tests-py2: + python2 -m pytest tests diff --git a/README.rst b/README.rst index 1441513..aed9336 100644 --- a/README.rst +++ b/README.rst @@ -114,23 +114,26 @@ documentation and your specific usage needs. Create a new Oauth client ^^^^^^^^^^^^^^^^^^^^^^^^^ +In order to interact with Agave, you'll need to first create an Oauth client so +that later on you can create access tokens to do work. + +To create a client you can do the following: .. code-block:: pycon - >>> ag = Agave(api_server='https://api.tacc.cloud', - ... username='mwvaughn', - ... password='PaZ$w0r6!') - >>> ag.clients.create(body={'clientName': 'my_client'}) - {u'consumerKey': u'kV4XLPhVBAv9RTf7a2QyBHhQAXca', u'_links': {u'subscriber': - {u'href': u'https://api.tacc.cloud/profiles/v2/mwvaughn'}, u'self': {u'href': - u'https://api.tacc.cloud/clients/v2/my_client'}, u'subscriptions': {u'href': - u'https://api.tacc.cloud/clients/v2/my_client/subscriptions/'}}, - u'description': u'', u'tier': u'Unlimited', u'callbackUrl': u'', - u'consumerSecret': u'5EbjEOcyzzIsAAE3vBS7nspVqHQa', u'name': u'my_client'} - -You use the **consumerKey** and **consumerSecret** to generate Oauth *tokens*, + >>> from agavepy.agave import Agave + >>> ag = Agave(api_server='https://api.tacc.cloud') + >>> ag.clients_create("client-name", "some description") + API username: your-username + API password: + >>> ag.api_key + 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxx' + >>> ag.api_secret + 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXX' + +You will use the api key and secret to generate Oauth *tokens*, which are temporary credentials that you can use in place of putting your real -credentials into code that is scripting against the TACC APIs. +credentials into code that is interacting with TACC APIs. Reuse an existing Oauth client ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -145,21 +148,13 @@ AgavePy up to use it works the same way: >>> from agavepy.agave import Agave >>> ag = Agave(api_server='https://api.tacc.cloud', - ... username='mwvaughn', password='PaZ$w0r6!', + ... username='mwvaughn', ... client_name='my_client', ... api_key='kV4XLPhVBAv9RTf7a2QyBHhQAXca', ... api_secret='5EbjEOcyzzIsAAE3vBS7nspVqHQa') The Agave object ``ag`` is now configured to talk to all TACC Cloud services. -Here's an example: Let's retrieve a the curent user's **profile**. - -.. code-block:: pycon - >>> ag.profiles.get() - {u'status': u'', u'username': u'mwvaughn', u'first_name': u'Matthew', - u'last_name': u'Vaughn', u'phone': u'867-5309', u'mobile_phone': u'', - u'create_time': u'20140515180317Z', u'full_name': u'vaughn', - u'email': u'[email protected]'} The refresh token ^^^^^^^^^^^^^^^^^ diff --git a/agavepy/agave.py b/agavepy/agave.py index 8c26266..ef1c960 100644 --- a/agavepy/agave.py +++ b/agavepy/agave.py @@ -19,6 +19,7 @@ import jinja2 import dateutil.parser import requests + import sys sys.path.insert(0, os.path.dirname(__file__)) @@ -26,6 +27,7 @@ from .swaggerpy.client import SwaggerClient from .swaggerpy.http_client import SynchronousHttpClient from .swaggerpy.processors import SwaggerProcessor +from .clients import client_create from .tenants import tenant_list HERE = os.path.dirname(os.path.abspath(__file__)) @@ -225,10 +227,15 @@ class Agave(object): value = (kwargs[param] if mandatory else kwargs.get(param, default)) except KeyError: + # Request user to set the tenant url (api_server). if param == "api_server": self.list_tenants(tenantsurl="https://api.tacc.utexas.edu/tenants") value = input( "\nPlease specify the url of a tenant to interact with: ") + + # If present, remove the last '/' from the url. + if value[-1] == "/": + value = value[:-1] else: raise AgaveError("parameter \"{}\" is mandatory".format(param)) @@ -519,6 +526,29 @@ class Agave(object): """ tenant_list(tenantsurl) + def clients_create(self, client_name, description): + """ Create an Agave Oauth client + + Save the api key and secret upon a successfull reuest to Agave. + + PARAMETERS + ---------- + client_name: string + Name of the oauth client to be created. + description: string + Description of the client to be created. + """ + # Set tenant url. + tenant_url = self.api_server + + # Set username. + if self.username == "" or self.username is None: + self.username = input("API username: ") + + self.api_key, self.api_secret = client_create( + self.username, client_name, description, tenant_url) + + class Resource(object): diff --git a/agavepy/clients.py b/agavepy/clients.py new file mode 100644 index 0000000..fb3220a --- /dev/null +++ b/agavepy/clients.py @@ -0,0 +1,82 @@ +""" + clients.py + +Functions related to Agave Oauth clients. +""" +from __future__ import print_function +from builtins import input +import getpass +import json +import requests +import sys +from os import path +from utils import handle_bad_response_status_code + + +class AgaveClientError(Exception): + """ Handle Agave-related client operations + """ + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return repr(self.msg) + + +def client_create(username, client_name, description, tenant_url): + """ Create an Agave client + + Make a request to Agave to create an oauth client. Returns the client's api + key and secret as a tuple. + + PARAMETERS + ---------- + username: string + User's username. + client_name: string + Name for agave client. + description: string + Description of the agave client. + tenant_url: string + URL of agave tenant to interact with. + + RETURNS + ------- + api_key: string + api_secret: string + """ + # Get user's password. + passwd = getpass.getpass(prompt="API password: ") + + # Set request endpoint. + endpoint = tenant_url + "/clients/v2" + + # Make sure client_name is valid. + if client_name == "" or client_name is None: + raise AgaveClientError("Error creating client: invalid client_name") + + # Make request. + try: + data = { + "clientName": client_name, + "description": description, + "tier": "Unlimited", + "callbackUrl": "", + } + resp = requests.post(endpoint, data=data, auth=(username, passwd)) + del passwd + except Exception as err: + del passwd + raise AgaveClientError(err) + + # Handle bad status code. + handle_bad_response_status_code(resp) + + # Parse the request's response and return api key and secret. + response = resp.json().get("result", {}) + api_key = response.get("consumerKey", "") + api_secret = response.get("consumerSecret", "") + if api_key == "" or api_secret == "": + raise AgaveClientError("Error creating client: api key and secret are empty") + + return api_key, api_secret diff --git a/agavepy/tenants.py b/agavepy/tenants.py index a3dc70d..85a0222 100644 --- a/agavepy/tenants.py +++ b/agavepy/tenants.py @@ -6,7 +6,7 @@ Methods to interact with Agave tenants. from __future__ import print_function import requests import sys -from .response_handlers import handle_bad_response_status_code +from utils import handle_bad_response_status_code def get_tenants(url): diff --git a/agavepy/utils/__init__.py b/agavepy/utils/__init__.py new file mode 100644 index 0000000..fb93af6 --- /dev/null +++ b/agavepy/utils/__init__.py @@ -0,0 +1,1 @@ +from .response_handlers import handle_bad_response_status_code diff --git a/agavepy/utils/response_handlers.py b/agavepy/utils/response_handlers.py new file mode 100644 index 0000000..e98a8dc --- /dev/null +++ b/agavepy/utils/response_handlers.py @@ -0,0 +1,28 @@ +""" + response_hanlders.py +""" +from __future__ import print_function +import sys + + + +class AgaveAPICallError(Exception): + """ Handle bad responses from Agave + """ + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return repr(self.msg) + + +def handle_bad_response_status_code(r): + """ Handle a response with a bad status code + """ + if not r.ok: + error_msg = "Bad {0} request to {1}, status code {2}\n".format( + r.request.method, r.url, r.status_code), + error_msg += "{}\n".format(r.request.body) + error_msg += "{}\n".format(r.json()) + + raise AgaveAPICallError(error_msg) diff --git a/dev.Dockerfile b/dev.Dockerfile index 3bc256f..a982d1b 100644 --- a/dev.Dockerfile +++ b/dev.Dockerfile @@ -16,7 +16,10 @@ RUN apt-get update -y && apt-get install -yq git bash-completion \ && make altinstall \ && ln -sf /usr/local/bin/python2.7 /usr/bin/python2 \ && cd ../ \ - && rm -r Python-${PYVERSION}.tgz Python-${PYVERSION} + && rm -r Python-${PYVERSION}.tgz Python-${PYVERSION} \ + && pip install sphinx \ + && pip install pytest \ + && pip2.7 install mock RUN git clone https://github.com/TACC/agavepy
Re-implement client creation to properly manage credentials Currently to create an agave oauth client one has to specify username and password at the moment an agave object is instantiated, see #41 . Will re-implement the client creation to only ask for credentials when the library makes the request to create a client. This way credentials will not be stored.
TACC/agavepy
diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 0000000..79b2575 --- /dev/null +++ b/TESTING.md @@ -0,0 +1,58 @@ +# Testing + +## Writing new tests + +Most code changes will fall into one of the following categories. + +### Writing tests for new features + +New code should be covered by unit tests. If the code is difficult to test with +a unit tests then that is a good sign that it should be refactored to make it +easier to reuse and maintain. Consider accepting unexported interfaces instead +of structs so that fakes can be provided for dependencies. + + +### Writing tests for bug fixes + +Bugs fixes should include a unit test case which exercises the bug. + + +### Test structure + +Test serve a dual purpose: test functionality and document the expected +response from the agave API. + +The test suite uses valid sample responses, see +[sample responses](tests/sample_responses) to mock the agave API by spawning a +mock server in the local host to handle any requests from the cli. + + +## Running tests + +To run the unit test suite run the development container: +``` +$ make shell +``` + +### Testing Python 3 +Before you start testing, you'll need to install all the package dependencies. +``` +$ make install +``` + +Inside the container run: +``` +$ make tests +``` + + +### Testing Python 2 +Install all dependencies +``` +$ make install-p2 +``` + +Run tests +``` +$ make tests-py2 +``` diff --git a/tests/clients_test.py b/tests/clients_test.py new file mode 100644 index 0000000..be061b1 --- /dev/null +++ b/tests/clients_test.py @@ -0,0 +1,112 @@ +""" + client_tests.py + +Test operations related to the management of Agave oauth clients. +""" +import pytest +import cgi +import json +import os +import sys +from http.server import BaseHTTPRequestHandler +from testsuite_utils import MockServer +from response_templates import response_template_to_json + +try: + from unittest.mock import patch +except ImportError: + from mock import patch + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from agavepy.agave import Agave + + +# The agave API will provide a response with the following format upon a +# successfull attempt at creating a client. +sample_client_create_response = response_template_to_json("clients-create.json") + + +class MockServerClientEndpoints(BaseHTTPRequestHandler): + """ Mock the Agave API + + Mock client managament endpoints from the agave api. + """ + + def do_POST(self): + """ Test agave client creation + """ + # Get request data. + form = cgi.FieldStorage( + fp = self.rfile, + headers = self.headers, + environ={'REQUEST_METHOD': 'POST'}) + + # Check client name is set. + client_name = form.getvalue("clientName", "") + if client_name == "": + self.send_response(400) + self.end_headers() + return + + # Check client description is set. + client_description = form.getvalue("description", "") + if client_description == "": + self.send_response(400) + self.end_headers() + return + + # CHeck client tier is set. + if form.getvalue("tier", "") == "": + self.send_response(400) + self.end_headers() + return + + # Update response fields. + sample_client_create_response["result"]["name"] = client_name + sample_client_create_response["result"]["description"] = client_description + sample_client_create_response["result"]["consumerKey"] = "some api key" + sample_client_create_response["result"]["consumerSecret"] = "some secret" + + self.send_response(200) + self.end_headers() + self.wfile.write(json.dumps(sample_client_create_response).encode()) + + + +class TestMockServer(MockServer): + """ Test client-related agave api endpoints + + Tests client creation (HTTP POST request), removal (HTTP DELETE request), + and listing (HTTP GET request). + """ + + @classmethod + def setup_class(cls): + """ Set up an agave mock server + + Listen and serve mock api as a daemon. + """ + MockServer.serve.__func__(cls, MockServerClientEndpoints) + + + @patch("agavepy.agave.input") + @patch("clients.getpass.getpass") + def test_client_create(self, mock_input, mock_pass): + """ Test client create op + + Patch username and password from user to send a client create request + to mock server. + """ + # Patch username and password. + mock_input.return_value = "user" + mock_pass.return_value = "pass" + + # Instantiate Agave object making reference to local mock server. + local_uri = "http://localhost:{port}/".format(port=self.mock_server_port) + ag = Agave(api_server=local_uri) + + # Create client. + ag.clients_create("client-name", "some description") + + assert ag.api_key == "some api key" + assert ag.api_secret == "some secret" diff --git a/tests/response_templates.py b/tests/response_templates.py new file mode 100644 index 0000000..18b8356 --- /dev/null +++ b/tests/response_templates.py @@ -0,0 +1,14 @@ +import json +import pkg_resources + + +rsc_pkg = __name__ + + +def response_template_to_json(template_name): + rsc_path = "/".join(("sample_responses", template_name)) + try: # Python 3.3 will return the contents as a bytes-like object. + file_contents = pkg_resources.resource_string(rsc_pkg, rsc_path).decode("utf-8") + except AttributeError: + file_contents = pkg_resources.resource_string(rsc_pkg, rsc_path) + return json.loads(file_contents) diff --git a/tests/sample_responses/clients-create.json b/tests/sample_responses/clients-create.json new file mode 100644 index 0000000..a3e6789 --- /dev/null +++ b/tests/sample_responses/clients-create.json @@ -0,0 +1,24 @@ +{ + "status": "success", + "message": "Client created successfully.", + "version": "2.0.0-SNAPSHOT-rc3fad", + "result": { + "description": "{DESCRIPTION}", + "name": "{NAME}", + "consumerKey": "xxxxxxxxxxxxxxxxxxxxxxxxxx", + "_links": { + "subscriber": { + "href": "{URL_ROOT}profiles/v2/{USER}" + }, + "self": { + "href": "{URL_ROOT}clients/v2/{NAME}" + }, + "subscriptions": { + "href": "{URL_ROOT}clients/v2/{NAME}/subscriptions/" + } + }, + "tier": "{TIER}", + "consumerSecret": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", + "callbackUrl": "{CALLBACK_URL}" + } +} diff --git a/tests/testsuite_utils.py b/tests/testsuite_utils.py new file mode 100644 index 0000000..d77644f --- /dev/null +++ b/tests/testsuite_utils.py @@ -0,0 +1,50 @@ +""" + testsuite_utils.py + +Methods used throughout the test suite for testing. +""" +import socket +from threading import Thread +try: # python 2 + from BaseHTTPServer import HTTPServer +except ImportError: # python 3 + from http.server import HTTPServer + + + +def get_free_port(): + """ Find a free port + + Return a port available for connecting on localhost. + """ + s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM) + s.bind(("localhost", 0)) + addr, port = s.getsockname() + s.close() + return port + + +class MockServer(object): + """ Mock server + + Run an HTTP server as a daemon in a thread. + """ + + @classmethod + def serve(cls, http_server): + """ Set up mock server + + INPUTS + ------- + http_server: BaseHTTPRequestHandler + HTTP server with request handlers specified + """ + # Find a port to listen to connect. + cls.mock_server_port = get_free_port() + # Instantiate server. + cls.mock_server = \ + HTTPServer(("localhost", cls.mock_server_port), http_server) + + cls.mock_server_thread = Thread(target=cls.mock_server.serve_forever) + cls.mock_server_thread.setDaemon(True) + cls.mock_server_thread.start()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_issue_reference", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 5 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/TACC/agavepy.git@4f950a942d8b25ce68f6dcf9d038f5430962240f#egg=agavepy attrs==22.2.0 backports.ssl-match-hostname==3.7.0.1 certifi==2021.5.30 charset-normalizer==2.0.12 cloudpickle==2.2.1 configparser==5.2.0 future==1.0.0 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 Jinja2==3.0.3 MarkupSafe==2.0.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 python-dateutil==2.9.0.post0 requests==2.27.1 requests-toolbelt==1.0.0 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 websocket-client==1.3.1 zipp==3.6.0
name: agavepy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - backports-ssl-match-hostname==3.7.0.1 - charset-normalizer==2.0.12 - cloudpickle==2.2.1 - configparser==5.2.0 - future==1.0.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jinja2==3.0.3 - markupsafe==2.0.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-dateutil==2.9.0.post0 - requests==2.27.1 - requests-toolbelt==1.0.0 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - websocket-client==1.3.1 - zipp==3.6.0 prefix: /opt/conda/envs/agavepy
[ "tests/clients_test.py::TestMockServer::test_client_create" ]
[]
[]
[]
BSD 3-Clause License
3,059
[ "README.rst", "Makefile", "dev.Dockerfile", "agavepy/clients.py", "agavepy/tenants.py", "agavepy/utils/response_handlers.py", ".travis.yml", "agavepy/utils/__init__.py", "agavepy/agave.py" ]
[ "README.rst", "Makefile", "dev.Dockerfile", "agavepy/clients.py", "agavepy/tenants.py", "agavepy/utils/response_handlers.py", ".travis.yml", "agavepy/utils/__init__.py", "agavepy/agave.py" ]
pydicom__pydicom-737
3e67bb6dcf4ed6e0b8eec10fd49f2d37d36cdf3c
2018-09-12 18:36:03
0721bdc0b5797f40984cc55b5408e273328dc528
pep8speaks: Hello @mrbean-bremen! Thanks for submitting the PR. - There are no PEP8 issues in the file [`pydicom/charset.py`](https://github.com/mrbean-bremen/pydicom/blob/511cff9e6493d3533d38a6162fc5d9fe86c13c3e/pydicom/charset.py) ! - There are no PEP8 issues in the file [`pydicom/tests/test_charset.py`](https://github.com/mrbean-bremen/pydicom/blob/511cff9e6493d3533d38a6162fc5d9fe86c13c3e/pydicom/tests/test_charset.py) !
diff --git a/pydicom/charset.py b/pydicom/charset.py index 59a0e62ca..120b6f22d 100644 --- a/pydicom/charset.py +++ b/pydicom/charset.py @@ -221,9 +221,15 @@ def convert_encodings(encodings): patched_encodings = [] patched = {} for x in encodings: - if re.match('^ISO[^_]IR', x): + # check for spelling errors, but exclude the correct spelling + # standard encodings + if re.match('^ISO[^_]IR', x) is not None: patched[x] = 'ISO_IR' + x[6:] patched_encodings.append(patched[x]) + # encodings with code extensions + elif re.match('^(?=ISO.2022.IR.)(?!ISO 2022 IR )', x) is not None: + patched[x] = 'ISO 2022 IR ' + x[12:] + patched_encodings.append(patched[x]) else: patched_encodings.append(x) if patched:
LookupError: unknown encoding: ISO_2022_IR_6 <!-- Instructions For Filing a Bug: https://github.com/pydicom/pydicom/blob/master/CONTRIBUTING.md#filing-bugs --> #### Description LookupError: unknown encoding: ISO_2022_IR_6 #### Steps/Code to Reproduce ``` import pydicom dataset = pydicom.dcmread(filename) print("Storage type.....:", dataset.SOPClassUID) # runs fine pat_name = dataset.PatientName # throws error x = dataset.PatientID # throws error arr = dataset.pixel_array # runs fine ``` I cannot provide the original data, but pydicom runs fine on most images in my dataset -- it's only a couple images which have this problem. #### Expected Results No error is thrown. #### Actual Results ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/afs/cs.stanford.edu/u/emmap1/.local/lib/python3.5/site-packages/pydicom/dataset.py", line 520, in __getattr__ return self[tag].value File "/afs/cs.stanford.edu/u/emmap1/.local/lib/python3.5/site-packages/pydicom/dataset.py", line 599, in __getitem__ self[tag] = DataElement_from_raw(data_elem, character_set) File "/afs/cs.stanford.edu/u/emmap1/.local/lib/python3.5/site-packages/pydicom/dataelem.py", line 493, in DataElement_from_raw value = convert_value(VR, raw, encoding) File "/afs/cs.stanford.edu/u/emmap1/.local/lib/python3.5/site-packages/pydicom/values.py", line 350, in convert_value encoding=encoding[1]) File "/afs/cs.stanford.edu/u/emmap1/.local/lib/python3.5/site-packages/pydicom/values.py", line 234, in convert_string byte_string = byte_string.decode(encoding) LookupError: unknown encoding: ISO_2022_IR_6 ``` #### Versions Linux-4.4.0-119-generic-x86_64-with-Ubuntu-16.04-xenial Python 3.5.2 pydicom 1.1.0
pydicom/pydicom
diff --git a/pydicom/tests/test_charset.py b/pydicom/tests/test_charset.py index e6260ef50..1112bf3c8 100644 --- a/pydicom/tests/test_charset.py +++ b/pydicom/tests/test_charset.py @@ -150,6 +150,38 @@ class TestCharset(object): pydicom.charset.decode(elem, ['utf8']) assert u'Buc^Jérôme' == elem.value + def test_patched_code_extension_charset(self): + """Test some commonly misspelled charset values for code extensions.""" + elem = DataElement(0x00100010, 'PN', + b'Dionysios=\x1b\x2d\x46' + b'\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2') + # correct encoding + pydicom.charset.decode(elem, ['ISO 2022 IR 100', 'ISO 2022 IR 126']) + assert u'Dionysios=Διονυσιος' == elem.value + + # patched encoding shall behave correctly, but a warning is issued + with pytest.warns(UserWarning, + match='Incorrect value for Specific Character Set ' + "'ISO_2022-IR 100' - assuming " + "'ISO 2022 IR 100'"): + elem = DataElement(0x00100010, 'PN', + b'Dionysios=\x1b\x2d\x46' + b'\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2') + pydicom.charset.decode(elem, + ['ISO_2022-IR 100', 'ISO 2022 IR 126']) + assert u'Dionysios=Διονυσιος' == elem.value + + with pytest.warns(UserWarning, + match=r'Incorrect value for Specific Character Set ' + r"'ISO_2022_IR\+126' - assuming " + r"'ISO 2022 IR 126'"): + elem = DataElement(0x00100010, 'PN', + b'Dionysios=\x1b\x2d\x46' + b'\xc4\xe9\xef\xed\xf5\xf3\xe9\xef\xf2') + pydicom.charset.decode(elem, + ['ISO 2022 IR 100', 'ISO_2022_IR+126']) + assert u'Dionysios=Διονυσιος' == elem.value + def test_multi_charset_default_value(self): """Test that the first value is used if no escape code is given""" # regression test for #707
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 -e git+https://github.com/pydicom/pydicom.git@3e67bb6dcf4ed6e0b8eec10fd49f2d37d36cdf3c#egg=pydicom pyparsing==3.1.4 pytest==7.0.1 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: pydicom channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/pydicom
[ "pydicom/tests/test_charset.py::TestCharset::test_patched_code_extension_charset" ]
[ "pydicom/tests/test_charset.py::TestCharset::test_changed_character_set" ]
[ "pydicom/tests/test_charset.py::TestCharset::test_encodings", "pydicom/tests/test_charset.py::TestCharset::test_nested_character_sets", "pydicom/tests/test_charset.py::TestCharset::test_inherited_character_set_in_sequence", "pydicom/tests/test_charset.py::TestCharset::test_standard_file", "pydicom/tests/test_charset.py::TestCharset::test_encoding_with_specific_tags", "pydicom/tests/test_charset.py::TestCharset::test_bad_charset", "pydicom/tests/test_charset.py::TestCharset::test_patched_charset", "pydicom/tests/test_charset.py::TestCharset::test_multi_charset_default_value", "pydicom/tests/test_charset.py::TestCharset::test_single_byte_multi_charset_personname", "pydicom/tests/test_charset.py::TestCharset::test_single_byte_multi_charset_text", "pydicom/tests/test_charset.py::TestCharset::test_single_byte_code_extensions[ISO", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrArab-\\u0642\\u0628\\u0627\\u0646\\u064a^\\u0644\\u0646\\u0632\\u0627\\u0631]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrFren-Buc^J\\xe9r\\xf4me]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrFrenMulti-Buc^J\\xe9r\\xf4me]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrGerm-\\xc4neas^R\\xfcdiger]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrGreek-\\u0394\\u03b9\\u03bf\\u03bd\\u03c5\\u03c3\\u03b9\\u03bf\\u03c2]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrH31-Yamada^Tarou=\\u5c71\\u7530^\\u592a\\u90ce=\\u3084\\u307e\\u3060^\\u305f\\u308d\\u3046]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrH32-\\uff94\\uff8f\\uff80\\uff9e^\\uff80\\uff9b\\uff73=\\u5c71\\u7530^\\u592a\\u90ce=\\u3084\\u307e\\u3060^\\u305f\\u308d\\u3046]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrHbrw-\\u05e9\\u05e8\\u05d5\\u05df^\\u05d3\\u05d1\\u05d5\\u05e8\\u05d4]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrI2-Hong^Gildong=\\u6d2a^\\u5409\\u6d1e=\\ud64d^\\uae38\\ub3d9]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrJapMulti-\\u3084\\u307e\\u3060^\\u305f\\u308d\\u3046]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrJapMultiExplicitIR6-\\u3084\\u307e\\u3060^\\u305f\\u308d\\u3046]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrKoreanMulti-\\uae40\\ud76c\\uc911]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrRuss-\\u041b\\u044e\\u043ace\\u043c\\u0431yp\\u0433]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrX1-Wang^XiaoDong=\\u738b^\\u5c0f\\u6771]", "pydicom/tests/test_charset.py::TestCharset::test_charset_patient_names[chrX2-Wang^XiaoDong=\\u738b^\\u5c0f\\u4e1c]" ]
[]
MIT License
3,060
[ "pydicom/charset.py" ]
[ "pydicom/charset.py" ]
capitalone__datacompy-30
e406940f0c4b9acc6284ab27dcaecd96b857ae83
2018-09-12 19:37:53
246aad8c381f7591512f6ecef9debf6341261578
diff --git a/datacompy/core.py b/datacompy/core.py index c6553f8..e39fa5e 100644 --- a/datacompy/core.py +++ b/datacompy/core.py @@ -62,6 +62,8 @@ class Compare(object): A string name for the second dataframe ignore_spaces : bool, optional Flag to strip whitespace (including newlines) from string columns + ignore_case : bool, optional + Flag to ignore the case of string columns Attributes ---------- @@ -82,6 +84,7 @@ class Compare(object): df1_name="df1", df2_name="df2", ignore_spaces=False, + ignore_case=False, ): if on_index and join_columns is not None: @@ -105,7 +108,7 @@ class Compare(object): self.rel_tol = rel_tol self.df1_unq_rows = self.df2_unq_rows = self.intersect_rows = None self.column_stats = [] - self._compare(ignore_spaces) + self._compare(ignore_spaces, ignore_case) @property def df1(self): @@ -154,7 +157,7 @@ class Compare(object): if len(dataframe.drop_duplicates(subset=self.join_columns)) < len(dataframe): self._any_dupes = True - def _compare(self, ignore_spaces): + def _compare(self, ignore_spaces, ignore_case): """Actually run the comparison. This tries to run df1.equals(df2) first so that if they're truly equal we can tell. @@ -176,7 +179,7 @@ class Compare(object): LOG.info("Number of columns in df2 and not in df1: {}".format(len(self.df2_unq_columns()))) LOG.debug("Merging dataframes") self._dataframe_merge(ignore_spaces) - self._intersect_compare(ignore_spaces) + self._intersect_compare(ignore_spaces, ignore_case) if self.matches(): LOG.info("df1 matches df2") else: @@ -270,7 +273,7 @@ class Compare(object): ) ) - def _intersect_compare(self, ignore_spaces): + def _intersect_compare(self, ignore_spaces, ignore_case): """Run the comparison on the intersect dataframe This loops through all columns that are shared between df1 and df2, and @@ -295,6 +298,7 @@ class Compare(object): self.rel_tol, self.abs_tol, ignore_spaces, + ignore_case, ) match_cnt = self.intersect_rows[col_match].sum() max_diff = calculate_max_diff( @@ -585,7 +589,7 @@ def render(filename, *fields): return file_open.read().format(*fields) -def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0, ignore_spaces=False): +def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0, ignore_spaces=False, ignore_case=False): """Compares two columns from a dataframe, returning a True/False series, with the same index as column 1. @@ -609,6 +613,8 @@ def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0, ignore_spaces=False): Absolute tolerance ignore_spaces : bool, optional Flag to strip whitespace (including newlines) from string columns + ignore_case : bool, optional + Flag to ignore the case of string columns Returns ------- @@ -637,6 +643,12 @@ def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0, ignore_spaces=False): if col_2.dtype.kind == "O": col_2 = col_2.str.strip() + if ignore_case: + if col_1.dtype.kind == "O": + col_1 = col_1.str.upper() + if col_2.dtype.kind == "O": + col_2 = col_2.str.upper() + if set([col_1.dtype.kind, col_2.dtype.kind]) == set(["M", "O"]): compare = compare_string_and_date_columns(col_1, col_2) else:
Ignore case when comparing strings Might be a nice to have a optional ignore case feature so that `STRING` == `string`. Should be relatively simple to add. Happy to take it on. @theianrobertson thoughts?
capitalone/datacompy
diff --git a/tests/test_core.py b/tests/test_core.py index 3251d37..7e37ac4 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -108,6 +108,28 @@ something||False assert_series_equal(expect_out, actual_out, check_names=False) +def test_string_columns_equal_with_ignore_spaces_and_case(): + data = """a|b|expected +Hi|Hi|True +Yo|Yo|True +Hey|Hey |True +résumé|resume|False +résumé|résumé|True +💩|💩|True +💩|🤔|False + | |True + | |True +datacompy|DataComPy|True +something||False +|something|False +||True""" + df = pd.read_csv(six.StringIO(data), sep="|") + actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True, + ignore_case=True) + expect_out = df["expected"] + assert_series_equal(expect_out, actual_out, check_names=False) + + def test_date_columns_equal(): data = """a|b|expected 2017-01-01|2017-01-01|True @@ -158,6 +180,32 @@ def test_date_columns_equal_with_ignore_spaces(): assert_series_equal(expect_out, actual_out_rev, check_names=False) +def test_date_columns_equal_with_ignore_spaces_and_case(): + data = """a|b|expected +2017-01-01|2017-01-01 |True +2017-01-02 |2017-01-02|True +2017-10-01 |2017-10-10 |False +2017-01-01||False +|2017-01-01|False +||True""" + df = pd.read_csv(six.StringIO(data), sep="|") + # First compare just the strings + actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True, + ignore_case=True) + expect_out = df["expected"] + assert_series_equal(expect_out, actual_out, check_names=False) + + # Then compare converted to datetime objects + df["a"] = pd.to_datetime(df["a"]) + df["b"] = pd.to_datetime(df["b"]) + actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True) + expect_out = df["expected"] + assert_series_equal(expect_out, actual_out, check_names=False) + # and reverse + actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True) + assert_series_equal(expect_out, actual_out_rev, check_names=False) + + def test_date_columns_unequal(): """I want datetime fields to match with dates stored as strings """ @@ -323,6 +371,25 @@ def test_mixed_column_with_ignore_spaces(): assert_series_equal(expect_out, actual_out, check_names=False) +def test_mixed_column_with_ignore_spaces_and_case(): + df = pd.DataFrame( + [ + {"a": "hi", "b": "hi ", "expected": True}, + {"a": 1, "b": 1, "expected": True}, + {"a": np.inf, "b": np.inf, "expected": True}, + {"a": Decimal("1"), "b": Decimal("1"), "expected": True}, + {"a": 1, "b": "1 ", "expected": False}, + {"a": 1, "b": "yo ", "expected": False}, + {"a": "Hi", "b": "hI ", "expected": True}, + {"a": "HI", "b": "HI ", "expected": True}, + {"a": "hi", "b": "hi ", "expected": True}, + ] + ) + actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True) + expect_out = df["expected"] + assert_series_equal(expect_out, actual_out, check_names=False) + + def test_compare_df_setter_bad(): df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}]) with raises(TypeError, message="df1 must be a pandas DataFrame"): @@ -668,6 +735,23 @@ def test_strings_with_joins_with_ignore_spaces(): assert compare.intersect_rows_match() + +def test_strings_with_joins_with_ignore_case(): + df1 = pd.DataFrame([{"a": "hi", "b": "a"}, {"a": "bye", "b": "A"}]) + df2 = pd.DataFrame([{"a": "hi", "b": "A"}, {"a": "bye", "b": "a"}]) + compare = datacompy.Compare(df1, df2, "a", ignore_case=False) + assert not compare.matches() + assert compare.all_columns_match() + assert compare.all_rows_overlap() + assert not compare.intersect_rows_match() + + compare = datacompy.Compare(df1, df2, "a", ignore_case=True) + assert compare.matches() + assert compare.all_columns_match() + assert compare.all_rows_overlap() + assert compare.intersect_rows_match() + + def test_decimal_with_joins_with_ignore_spaces(): df1 = pd.DataFrame([{"a": 1, "b": " A"}, {"a": 2, "b": "A"}]) df2 = pd.DataFrame([{"a": 1, "b": "A"}, {"a": 2, "b": "A "}]) @@ -684,6 +768,22 @@ def test_decimal_with_joins_with_ignore_spaces(): assert compare.intersect_rows_match() +def test_decimal_with_joins_with_ignore_case(): + df1 = pd.DataFrame([{"a": 1, "b": "a"}, {"a": 2, "b": "A"}]) + df2 = pd.DataFrame([{"a": 1, "b": "A"}, {"a": 2, "b": "a"}]) + compare = datacompy.Compare(df1, df2, "a", ignore_case=False) + assert not compare.matches() + assert compare.all_columns_match() + assert compare.all_rows_overlap() + assert not compare.intersect_rows_match() + + compare = datacompy.Compare(df1, df2, "a", ignore_case=True) + assert compare.matches() + assert compare.all_columns_match() + assert compare.all_rows_overlap() + assert compare.intersect_rows_match() + + def test_index_with_joins_with_ignore_spaces(): df1 = pd.DataFrame([{"a": 1, "b": " A"}, {"a": 2, "b": "A"}]) df2 = pd.DataFrame([{"a": 1, "b": "A"}, {"a": 2, "b": "A "}]) @@ -700,6 +800,22 @@ def test_index_with_joins_with_ignore_spaces(): assert compare.intersect_rows_match() +def test_index_with_joins_with_ignore_case(): + df1 = pd.DataFrame([{"a": 1, "b": "a"}, {"a": 2, "b": "A"}]) + df2 = pd.DataFrame([{"a": 1, "b": "A"}, {"a": 2, "b": "a"}]) + compare = datacompy.Compare(df1, df2, on_index=True, ignore_case=False) + assert not compare.matches() + assert compare.all_columns_match() + assert compare.all_rows_overlap() + assert not compare.intersect_rows_match() + + compare = datacompy.Compare(df1, df2, "a", ignore_case=True) + assert compare.matches() + assert compare.all_columns_match() + assert compare.all_rows_overlap() + assert compare.intersect_rows_match() + + MAX_DIFF_DF = pd.DataFrame( { "base": [1, 1, 1, 1, 1],
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 1 }, "num_modified_files": 1 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest>=3.0.6", "Sphinx>=1.6.2", "sphinx-rtd-theme>=0.2.4", "numpydoc>=0.6.0", "mock>=2.0.0", "pre-commit>=1.10.4", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 cfgv==3.3.1 charset-normalizer==2.0.12 -e git+https://github.com/capitalone/datacompy.git@e406940f0c4b9acc6284ab27dcaecd96b857ae83#egg=datacompy distlib==0.3.9 docutils==0.18.1 filelock==3.4.1 identify==2.4.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.2.3 iniconfig==1.1.1 Jinja2==3.0.3 MarkupSafe==2.0.1 mock==5.2.0 nodeenv==1.6.0 numpy==1.19.5 numpydoc==1.1.0 packaging==21.3 pandas==0.23.3 platformdirs==2.4.0 pluggy==1.0.0 pre-commit==2.17.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 requests==2.27.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 toml==0.10.2 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 virtualenv==20.16.2 zipp==3.6.0
name: datacompy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - cfgv==3.3.1 - charset-normalizer==2.0.12 - distlib==0.3.9 - docutils==0.18.1 - filelock==3.4.1 - identify==2.4.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.2.3 - iniconfig==1.1.1 - jinja2==3.0.3 - markupsafe==2.0.1 - mock==5.2.0 - nodeenv==1.6.0 - numpy==1.19.5 - numpydoc==1.1.0 - packaging==21.3 - pandas==0.23.3 - platformdirs==2.4.0 - pluggy==1.0.0 - pre-commit==2.17.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - requests==2.27.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - toml==0.10.2 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - virtualenv==20.16.2 - zipp==3.6.0 prefix: /opt/conda/envs/datacompy
[ "tests/test_core.py::test_string_columns_equal_with_ignore_spaces_and_case", "tests/test_core.py::test_date_columns_equal_with_ignore_spaces_and_case", "tests/test_core.py::test_mixed_column_with_ignore_spaces_and_case", "tests/test_core.py::test_strings_with_joins_with_ignore_case", "tests/test_core.py::test_decimal_with_joins_with_ignore_case", "tests/test_core.py::test_index_with_joins_with_ignore_case" ]
[ "tests/test_core.py::test_compare_df_setter_bad", "tests/test_core.py::test_compare_df_setter_bad_index", "tests/test_core.py::test_compare_on_index_and_join_columns" ]
[ "tests/test_core.py::test_numeric_columns_equal_abs", "tests/test_core.py::test_numeric_columns_equal_rel", "tests/test_core.py::test_string_columns_equal", "tests/test_core.py::test_string_columns_equal_with_ignore_spaces", "tests/test_core.py::test_date_columns_equal", "tests/test_core.py::test_date_columns_equal_with_ignore_spaces", "tests/test_core.py::test_date_columns_unequal", "tests/test_core.py::test_bad_date_columns", "tests/test_core.py::test_rounded_date_columns", "tests/test_core.py::test_decimal_float_columns_equal", "tests/test_core.py::test_decimal_float_columns_equal_rel", "tests/test_core.py::test_decimal_columns_equal", "tests/test_core.py::test_decimal_columns_equal_rel", "tests/test_core.py::test_infinity_and_beyond", "tests/test_core.py::test_mixed_column", "tests/test_core.py::test_mixed_column_with_ignore_spaces", "tests/test_core.py::test_compare_df_setter_good", "tests/test_core.py::test_compare_df_setter_different_cases", "tests/test_core.py::test_compare_df_setter_good_index", "tests/test_core.py::test_columns_overlap", "tests/test_core.py::test_columns_no_overlap", "tests/test_core.py::test_10k_rows", "tests/test_core.py::test_subset", "tests/test_core.py::test_not_subset", "tests/test_core.py::test_large_subset", "tests/test_core.py::test_string_joiner", "tests/test_core.py::test_decimal_with_joins", "tests/test_core.py::test_decimal_with_nulls", "tests/test_core.py::test_strings_with_joins", "tests/test_core.py::test_index_joining", "tests/test_core.py::test_index_joining_strings_i_guess", "tests/test_core.py::test_index_joining_non_overlapping", "tests/test_core.py::test_temp_column_name", "tests/test_core.py::test_temp_column_name_one_has", "tests/test_core.py::test_temp_column_name_both_have", "tests/test_core.py::test_temp_column_name_one_already", "tests/test_core.py::test_simple_dupes_one_field", "tests/test_core.py::test_simple_dupes_two_fields", "tests/test_core.py::test_simple_dupes_index", "tests/test_core.py::test_simple_dupes_one_field_two_vals", "tests/test_core.py::test_simple_dupes_one_field_three_to_two_vals", "tests/test_core.py::test_dupes_from_real_data", "tests/test_core.py::test_strings_with_joins_with_ignore_spaces", "tests/test_core.py::test_decimal_with_joins_with_ignore_spaces", "tests/test_core.py::test_index_with_joins_with_ignore_spaces", "tests/test_core.py::test_calculate_max_diff[base-0]", "tests/test_core.py::test_calculate_max_diff[floats-0.2]", "tests/test_core.py::test_calculate_max_diff[decimals-0.1]", "tests/test_core.py::test_calculate_max_diff[null_floats-0.1]", "tests/test_core.py::test_calculate_max_diff[strings-0.1]", "tests/test_core.py::test_calculate_max_diff[mixed_strings-0]", "tests/test_core.py::test_calculate_max_diff[infinity-inf]" ]
[]
Apache License 2.0
3,061
[ "datacompy/core.py" ]
[ "datacompy/core.py" ]
google__importlab-23
77f04151272440dacea197b8c4f74aa26fdbe950
2018-09-12 22:25:03
676d17cd41ac68de6ebb48fb71780ad6110c4ae3
diff --git a/importlab/resolve.py b/importlab/resolve.py index 23314bc..d55f34d 100644 --- a/importlab/resolve.py +++ b/importlab/resolve.py @@ -102,6 +102,9 @@ def infer_module_name(filename, fspath): for f in fspath: short_name = f.relative_path(filename) if short_name: + # The module name for __init__.py files is the directory. + if short_name.endswith(os.path.sep + "__init__"): + short_name = short_name[:short_name.rfind(os.path.sep)] return short_name.replace(os.path.sep, '.') # We have not found filename relative to anywhere in pythonpath. return ''
Incorrect inferred module name for __init__.py files See google/pytype#154 for more detail. `resolve.infer_module_name` calculates the wrong name for `__init__.py` files. For example, for `foo/bar/__init__.py`, it will return `foo.bar.__init__`. The module name should be `foo.bar`.
google/importlab
diff --git a/tests/test_resolve.py b/tests/test_resolve.py index 2a217d3..9891764 100644 --- a/tests/test_resolve.py +++ b/tests/test_resolve.py @@ -293,6 +293,15 @@ class TestResolverUtils(unittest.TestCase): resolve.infer_module_name("/some/random/file", fspath), "") + def testInferInitModuleName(self): + with utils.Tempdir() as d: + os_fs = fs.OSFileSystem(d.path) + fspath = [os_fs] + py_file = d.create_file("foo/__init__.py") + self.assertEqual( + resolve.infer_module_name(py_file, fspath), + "foo") + def testGetAbsoluteName(self): test_cases = [ ("x.y", "a.b", "x.y.a.b"),
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 decorator==4.4.2 -e git+https://github.com/google/importlab.git@77f04151272440dacea197b8c4f74aa26fdbe950#egg=importlab importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work networkx==2.5.1 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: importlab channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - decorator==4.4.2 - networkx==2.5.1 - six==1.17.0 prefix: /opt/conda/envs/importlab
[ "tests/test_resolve.py::TestResolverUtils::testInferInitModuleName" ]
[]
[ "tests/test_resolve.py::TestResolver::testFallBackToSource", "tests/test_resolve.py::TestResolver::testGetPyFromPycSource", "tests/test_resolve.py::TestResolver::testOverrideSource", "tests/test_resolve.py::TestResolver::testPycSourceWithoutPy", "tests/test_resolve.py::TestResolver::testResolveBuiltin", "tests/test_resolve.py::TestResolver::testResolveInitFile", "tests/test_resolve.py::TestResolver::testResolveInitFileRelative", "tests/test_resolve.py::TestResolver::testResolveModuleFromFile", "tests/test_resolve.py::TestResolver::testResolvePackageFile", "tests/test_resolve.py::TestResolver::testResolveParentPackageFile", "tests/test_resolve.py::TestResolver::testResolveParentPackageFileWithModule", "tests/test_resolve.py::TestResolver::testResolvePyiFile", "tests/test_resolve.py::TestResolver::testResolveRelativeFromInitFileWithModule", "tests/test_resolve.py::TestResolver::testResolveRelativeInNonPackage", "tests/test_resolve.py::TestResolver::testResolveSamePackageFile", "tests/test_resolve.py::TestResolver::testResolveSiblingPackageFile", "tests/test_resolve.py::TestResolver::testResolveStarImport", "tests/test_resolve.py::TestResolver::testResolveStarImportBuiltin", "tests/test_resolve.py::TestResolver::testResolveStarImportSystem", "tests/test_resolve.py::TestResolver::testResolveSymbolFromFile", "tests/test_resolve.py::TestResolver::testResolveSystemInitFile", "tests/test_resolve.py::TestResolver::testResolveSystemPackageDir", "tests/test_resolve.py::TestResolver::testResolveSystemRelative", "tests/test_resolve.py::TestResolver::testResolveSystemSymbol", "tests/test_resolve.py::TestResolver::testResolveSystemSymbolNameClash", "tests/test_resolve.py::TestResolver::testResolveTopLevel", "tests/test_resolve.py::TestResolver::testResolveWithFilesystem", "tests/test_resolve.py::TestResolverUtils::testGetAbsoluteName", "tests/test_resolve.py::TestResolverUtils::testInferModuleName" ]
[]
Apache License 2.0
3,062
[ "importlab/resolve.py" ]
[ "importlab/resolve.py" ]
neogeny__TatSu-83
4aa9636ab1a77a24a5b60eeb06575aee5cf20dd7
2018-09-12 23:48:22
4aa9636ab1a77a24a5b60eeb06575aee5cf20dd7
diff --git a/grammar/tatsu.ebnf b/grammar/tatsu.ebnf index d087e40..3ea4896 100644 --- a/grammar/tatsu.ebnf +++ b/grammar/tatsu.ebnf @@ -378,7 +378,7 @@ token::Token literal = - string | raw_string | word | hex | float | int + string | raw_string | boolean | word | hex | float | int ; diff --git a/tatsu/bootstrap.py b/tatsu/bootstrap.py index 50ecd1f..5fcba66 100644 --- a/tatsu/bootstrap.py +++ b/tatsu/bootstrap.py @@ -152,7 +152,7 @@ class EBNFBootstrapParser(Parser): self._boolean_() self.name_last_node('value') with self._option(): - self._constant('True') + self._constant(True) self.name_last_node('value') self._error('no available options') with self._option(): @@ -728,7 +728,7 @@ class EBNFBootstrapParser(Parser): def _special_(self): # noqa self._token('?(') self._cut() - self._pattern(r'.*?(?!\)\?)') + self._pattern('.*?(?!\\)\\?)') self.name_last_node('@') self._token(')?') self._cut() @@ -803,11 +803,11 @@ class EBNFBootstrapParser(Parser): @tatsumasu('Constant') def _constant_(self): # noqa - self._pattern(r'`') + self._pattern('`') self._cut() self._literal_() self.name_last_node('@') - self._pattern(r'`') + self._pattern('`') @tatsumasu('Token') def _token_(self): # noqa @@ -825,6 +825,8 @@ class EBNFBootstrapParser(Parser): self._string_() with self._option(): self._raw_string_() + with self._option(): + self._boolean_() with self._option(): self._word_() with self._option(): @@ -851,14 +853,14 @@ class EBNFBootstrapParser(Parser): with self._option(): self._token('"') self._cut() - self._pattern(r'([^"\n]|\\"|\\\\)*') + self._pattern('([^"\\n]|\\\\"|\\\\\\\\)*') self.name_last_node('@') self._token('"') self._cut() with self._option(): self._token("'") self._cut() - self._pattern(r"([^'\n]|\\'|\\\\)*") + self._pattern("([^'\\n]|\\\\'|\\\\\\\\)*") self.name_last_node('@') self._token("'") self._cut() @@ -866,23 +868,23 @@ class EBNFBootstrapParser(Parser): @tatsumasu() def _hex_(self): # noqa - self._pattern(r'0[xX](\d|[a-fA-F])+') + self._pattern('0[xX](\\d|[a-fA-F])+') @tatsumasu() def _float_(self): # noqa - self._pattern(r'[-+]?(?:\d+\.\d*|\d*\.\d+)(?:[Ee][-+]?\d+)?') + self._pattern('[-+]?(?:\\d+\\.\\d*|\\d*\\.\\d+)(?:[Ee][-+]?\\d+)?') @tatsumasu() def _int_(self): # noqa - self._pattern(r'[-+]?\d+') + self._pattern('[-+]?\\d+') @tatsumasu() def _path_(self): # noqa - self._pattern(r'(?!\d)\w+(::(?!\d)\w+)+') + self._pattern('(?!\\d)\\w+(::(?!\\d)\\w+)+') @tatsumasu() def _word_(self): # noqa - self._pattern(r'(?!\d)\w+') + self._pattern('(?!\\d)\\w+') @tatsumasu('Any') def _any_(self): # noqa @@ -908,16 +910,16 @@ class EBNFBootstrapParser(Parser): with self._option(): self._token('/') self._cut() - self._pattern(r'([^/\\]|\\/|\\.)+') + self._pattern('([^/\\\\]|\\\\/|\\\\.)+') self.name_last_node('@') self._token('/') self._cut() with self._option(): self._token('?/') self._cut() - self._pattern(r'(.|\n)+?(?=/\?)') + self._pattern('(.|\\n)+?(?=/\\?)') self.name_last_node('@') - self._pattern(r'/\?+') + self._pattern('/\\?+') self._cut() with self._option(): self._token('?') @@ -1140,14 +1142,16 @@ class EBNFBootstrapSemantics(object): return ast -def main(filename, start='start', **kwargs): +def main(filename, start=None, **kwargs): + if start is None: + start = 'start' if not filename or filename == '-': text = sys.stdin.read() else: with open(filename) as f: text = f.read() parser = EBNFBootstrapParser() - return parser.parse(text, start=start, filename=filename, **kwargs) + return parser.parse(text, rule_name=start, filename=filename, **kwargs) if __name__ == '__main__': diff --git a/tatsu/objectmodel.py b/tatsu/objectmodel.py index 5be7337..67c0fd0 100644 --- a/tatsu/objectmodel.py +++ b/tatsu/objectmodel.py @@ -25,8 +25,8 @@ class Node(object): parseinfo = ast.parseinfo if not parseinfo else None self._parseinfo = parseinfo - attributes = ast or {} - # asume that kwargs contains node attributes of interest + attributes = ast if ast is not None else {} + # assume that kwargs contains node attributes of interest if isinstance(attributes, MutableMapping): attributes.update(kwargs) diff --git a/tatsu/parser_semantics.py b/tatsu/parser_semantics.py index c15b48b..3d8daf8 100644 --- a/tatsu/parser_semantics.py +++ b/tatsu/parser_semantics.py @@ -46,7 +46,7 @@ class EBNFGrammarSemantics(ModelBuilderSemantics): def string(self, ast): return eval_escapes(ast) - def hext(self, ast): + def hex(self, ast): return int(ast, 16) def float(self, ast):
No boolean literals ``` `10` -> 10 `string` -> 'string' `'long string'` -> 'long string' `True` -> 'True' ``` Since number literals are passed on I would expect the same to happen for boolean literals, but instead they get converted to strings. The same happens for `None` as well. I can use 0 and 1 instead, but it's not really the same. Workaround: ``` t_bool::bool = k_true @:`1` | k_false @:`0`; t_bool_lit::Boolean = VALUE:t_bool; ```
neogeny/TatSu
diff --git a/test/grammar/parameter_test.py b/test/grammar/parameter_test.py index 7c7b777..a8c4b94 100644 --- a/test/grammar/parameter_test.py +++ b/test/grammar/parameter_test.py @@ -192,7 +192,7 @@ class ParameterTests(unittest.TestCase): def test_numbers_and_unicode(self): grammar = ''' - rúle(1, -23, 4.56, 7.89e-11, 0xABCDEF, Añez) + rúle(1, -23, 4.56, 7.89e-11, Añez) = 'a' ; diff --git a/test/grammar/syntax_test.py b/test/grammar/syntax_test.py index b733b19..5f007dc 100644 --- a/test/grammar/syntax_test.py +++ b/test/grammar/syntax_test.py @@ -320,3 +320,26 @@ class SyntaxTests(unittest.TestCase): model = compile(grammar, "start") ast = model.parse("1xx 2 yy") self.assertEqual(['1', 'xx', ' ', '2', 'yy'], ast) + + def test_constant(self): + grammar = ''' + start = () + _0:`0` _1:`+1` _n123:`-123` + _xF:`0xF` + _string:`string` + _string_space:`'string space'` + _true:`True` _false:`False` + $; + ''' + + model = compile(grammar) + ast = model.parse("") + + self.assertEqual(ast._0, 0) + self.assertEqual(ast._1, 1) + self.assertEqual(ast._n123, -123) + self.assertEqual(ast._xF, 0xF) + self.assertEqual(ast._string, "string") + self.assertEqual(ast._string_space, "string space") + self.assertEqual(ast._true, True) + self.assertEqual(ast._false, False)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 4 }
4.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-flake8", "pytest-mypy", "pytest-pylint" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==3.3.9 dill==0.3.9 exceptiongroup==1.2.2 filelock==3.18.0 flake8==7.2.0 iniconfig==2.1.0 isort==6.0.1 mccabe==0.7.0 mypy==1.15.0 mypy-extensions==1.0.0 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pycodestyle==2.13.0 pyflakes==3.3.1 pylint==3.3.6 pytest==8.3.5 pytest-flake8==1.3.0 pytest-mypy==1.0.0 pytest-pylint==0.21.0 -e git+https://github.com/neogeny/TatSu.git@4aa9636ab1a77a24a5b60eeb06575aee5cf20dd7#egg=TatSu tomli==2.2.1 tomlkit==0.13.2 typing_extensions==4.13.0
name: TatSu channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==3.3.9 - dill==0.3.9 - exceptiongroup==1.2.2 - filelock==3.18.0 - flake8==7.2.0 - iniconfig==2.1.0 - isort==6.0.1 - mccabe==0.7.0 - mypy==1.15.0 - mypy-extensions==1.0.0 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pycodestyle==2.13.0 - pyflakes==3.3.1 - pylint==3.3.6 - pytest==8.3.5 - pytest-flake8==1.3.0 - pytest-mypy==1.0.0 - pytest-pylint==0.21.0 - tomli==2.2.1 - tomlkit==0.13.2 - typing-extensions==4.13.0 prefix: /opt/conda/envs/TatSu
[ "test/grammar/syntax_test.py::SyntaxTests::test_constant" ]
[]
[ "test/grammar/parameter_test.py::ParameterTests::test_35_only_keyword_params", "test/grammar/parameter_test.py::ParameterTests::test_36_param_combinations", "test/grammar/parameter_test.py::ParameterTests::test_36_params_and_keyword_params", "test/grammar/parameter_test.py::ParameterTests::test_36_unichars", "test/grammar/parameter_test.py::ParameterTests::test_keyword_params", "test/grammar/parameter_test.py::ParameterTests::test_numbers_and_unicode", "test/grammar/syntax_test.py::SyntaxTests::test_48_rule_override", "test/grammar/syntax_test.py::SyntaxTests::test_any", "test/grammar/syntax_test.py::SyntaxTests::test_ast_assignment", "test/grammar/syntax_test.py::SyntaxTests::test_based_rule", "test/grammar/syntax_test.py::SyntaxTests::test_empty_closure", "test/grammar/syntax_test.py::SyntaxTests::test_empty_match_token", "test/grammar/syntax_test.py::SyntaxTests::test_failed_ref", "test/grammar/syntax_test.py::SyntaxTests::test_group_ast", "test/grammar/syntax_test.py::SyntaxTests::test_include_and_override", "test/grammar/syntax_test.py::SyntaxTests::test_list_override", "test/grammar/syntax_test.py::SyntaxTests::test_new_override", "test/grammar/syntax_test.py::SyntaxTests::test_optional_closure", "test/grammar/syntax_test.py::SyntaxTests::test_optional_sequence", "test/grammar/syntax_test.py::SyntaxTests::test_parseinfo", "test/grammar/syntax_test.py::SyntaxTests::test_partial_choice", "test/grammar/syntax_test.py::SyntaxTests::test_partial_options", "test/grammar/syntax_test.py::SyntaxTests::test_raw_string", "test/grammar/syntax_test.py::SyntaxTests::test_rule_include", "test/grammar/syntax_test.py::SyntaxTests::test_update_ast" ]
[]
BSD License
3,063
[ "tatsu/objectmodel.py", "tatsu/parser_semantics.py", "tatsu/bootstrap.py", "grammar/tatsu.ebnf" ]
[ "tatsu/objectmodel.py", "tatsu/parser_semantics.py", "tatsu/bootstrap.py", "grammar/tatsu.ebnf" ]
python-cmd2__cmd2-516
982d2f2c2b91c04ecd2ba45dc2f6e1d26d77e4c1
2018-09-13 04:42:46
60a212c1c585f0c4c06ffcfeb9882520af8dbf35
diff --git a/CHANGELOG.md b/CHANGELOG.md index f5dff203..3015b793 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,15 @@ * These allow you to provide feedback to the user in an asychronous fashion, meaning alerts can display when the user is still entering text at the prompt. See [async_printing.py](https://github.com/python-cmd2/cmd2/blob/master/examples/async_printing.py) for an example. + * Cross-platform colored output support + * ``colorama`` gets initialized properly in ``Cmd.__init()`` + * The ``Cmd.colors`` setting is no longer platform dependent and now has three values: + * Terminal (default) - output methods do not strip any ANSI escape sequences when output is a terminal, but + if the output is a pipe or a file the escape sequences are stripped + * Always - output methods **never** strip ANSI escape sequences, regardless of the output destination + * Never - output methods strip all ANSI escape sequences +* Deprecations + * Deprecated the builtin ``cmd2`` suport for colors including ``Cmd.colorize()`` and ``Cmd._colorcodes`` * Deletions * The ``preparse``, ``postparsing_precmd``, and ``postparsing_postcmd`` methods *deprecated* in the previous release have been deleted diff --git a/cmd2/cmd2.py b/cmd2/cmd2.py index ed478b0d..7d8ac7dc 100644 --- a/cmd2/cmd2.py +++ b/cmd2/cmd2.py @@ -32,16 +32,16 @@ Git repository on GitHub at https://github.com/python-cmd2/cmd2 import argparse import cmd import collections +import colorama from colorama import Fore import glob import inspect import os -import platform import re import shlex import sys import threading -from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Type, Union +from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Type, Union, IO from . import constants from . import utils @@ -318,7 +318,7 @@ class Cmd(cmd.Cmd): reserved_words = [] # Attributes which ARE dynamically settable at runtime - colors = (platform.system() != 'Windows') + colors = constants.COLORS_TERMINAL continuation_prompt = '> ' debug = False echo = False @@ -338,7 +338,7 @@ class Cmd(cmd.Cmd): # To make an attribute settable with the "do_set" command, add it to this ... # This starts out as a dictionary but gets converted to an OrderedDict sorted alphabetically by key - settable = {'colors': 'Colorized output (*nix only)', + settable = {'colors': 'Allow colorized output (valid values: Terminal, Always, Never)', 'continuation_prompt': 'On 2nd+ line of input', 'debug': 'Show full error stack on error', 'echo': 'Echo command issued into output', @@ -370,6 +370,9 @@ class Cmd(cmd.Cmd): except AttributeError: pass + # Override whether ansi codes should be stripped from the output since cmd2 has its own logic for doing this + colorama.init(strip=False) + # initialize plugin system # needs to be done before we call __init__(0) self._initialize_plugin_system() @@ -418,13 +421,13 @@ class Cmd(cmd.Cmd): self._STOP_AND_EXIT = True # cmd convention self._colorcodes = {'bold': {True: '\x1b[1m', False: '\x1b[22m'}, - 'cyan': {True: '\x1b[36m', False: '\x1b[39m'}, - 'blue': {True: '\x1b[34m', False: '\x1b[39m'}, - 'red': {True: '\x1b[31m', False: '\x1b[39m'}, - 'magenta': {True: '\x1b[35m', False: '\x1b[39m'}, - 'green': {True: '\x1b[32m', False: '\x1b[39m'}, - 'underline': {True: '\x1b[4m', False: '\x1b[24m'}, - 'yellow': {True: '\x1b[33m', False: '\x1b[39m'}} + 'cyan': {True: Fore.CYAN, False: Fore.RESET}, + 'blue': {True: Fore.BLUE, False: Fore.RESET}, + 'red': {True: Fore.RED, False: Fore.RESET}, + 'magenta': {True: Fore.MAGENTA, False: Fore.RESET}, + 'green': {True: Fore.GREEN, False: Fore.RESET}, + 'underline': {True: '\x1b[4m', False: Fore.RESET}, + 'yellow': {True: Fore.YELLOW, False: Fore.RESET}} # Used load command to store the current script dir as a LIFO queue to support _relative_load command self._script_dir = [] @@ -554,34 +557,53 @@ class Cmd(cmd.Cmd): # Make sure settable parameters are sorted alphabetically by key self.settable = collections.OrderedDict(sorted(self.settable.items(), key=lambda t: t[0])) - def poutput(self, msg: str, end: str='\n') -> None: - """Convenient shortcut for self.stdout.write(); by default adds newline to end if not already present. + def decolorized_write(self, fileobj: IO, msg: str) -> None: + """Write a string to a fileobject, stripping ANSI escape sequences if necessary + + Honor the current colors setting, which requires us to check whether the + fileobject is a tty. + """ + if self.colors.lower() == constants.COLORS_NEVER.lower() or \ + (self.colors.lower() == constants.COLORS_TERMINAL.lower() and not fileobj.isatty()): + msg = utils.strip_ansi(msg) + fileobj.write(msg) - Also handles BrokenPipeError exceptions for when a commands's output has been piped to another process and - that process terminates before the cmd2 command is finished executing. + def poutput(self, msg: Any, end: str='\n', color: str='') -> None: + """Smarter self.stdout.write(); color aware and adds newline of not present. + + Also handles BrokenPipeError exceptions for when a commands's output has + been piped to another process and that process terminates before the + cmd2 command is finished executing. :param msg: message to print to current stdout (anything convertible to a str with '{}'.format() is OK) - :param end: string appended after the end of the message if not already present, default a newline + :param end: (optional) string appended after the end of the message if not already present, default a newline + :param color: (optional) color escape to output this message with """ if msg is not None and msg != '': try: msg_str = '{}'.format(msg) - self.stdout.write(msg_str) if not msg_str.endswith(end): - self.stdout.write(end) + msg_str += end + if color: + msg_str = color + msg_str + Fore.RESET + self.decolorized_write(self.stdout, msg_str) except BrokenPipeError: - # This occurs if a command's output is being piped to another process and that process closes before the - # command is finished. If you would like your application to print a warning message, then set the - # broken_pipe_warning attribute to the message you want printed. + # This occurs if a command's output is being piped to another + # process and that process closes before the command is + # finished. If you would like your application to print a + # warning message, then set the broken_pipe_warning attribute + # to the message you want printed. if self.broken_pipe_warning: sys.stderr.write(self.broken_pipe_warning) - def perror(self, err: Union[str, Exception], traceback_war: bool=True) -> None: + def perror(self, err: Union[str, Exception], traceback_war: bool=True, err_color: str=Fore.LIGHTRED_EX, + war_color: str=Fore.LIGHTYELLOW_EX) -> None: """ Print error message to sys.stderr and if debug is true, print an exception Traceback if one exists. :param err: an Exception or error message to print out :param traceback_war: (optional) if True, print a message to let user know they can enable debug - :return: + :param err_color: (optional) color escape to output error with + :param war_color: (optional) color escape to output warning with """ if self.debug: import traceback @@ -589,14 +611,15 @@ class Cmd(cmd.Cmd): if isinstance(err, Exception): err_msg = "EXCEPTION of type '{}' occurred with message: '{}'\n".format(type(err).__name__, err) - sys.stderr.write(self.colorize(err_msg, 'red')) else: - err_msg = self.colorize("ERROR: {}\n".format(err), 'red') - sys.stderr.write(err_msg) + err_msg = "ERROR: {}\n".format(err) + err_msg = err_color + err_msg + Fore.RESET + self.decolorized_write(sys.stderr, err_msg) if traceback_war: war = "To enable full traceback, run the following command: 'set debug true'\n" - sys.stderr.write(self.colorize(war, 'yellow')) + war = war_color + war + Fore.RESET + self.decolorized_write(sys.stderr, war) def pfeedback(self, msg: str) -> None: """For printing nonessential feedback. Can be silenced with `quiet`. @@ -605,7 +628,7 @@ class Cmd(cmd.Cmd): if self.feedback_to_output: self.poutput(msg) else: - sys.stderr.write("{}\n".format(msg)) + self.decolorized_write(sys.stderr, "{}\n".format(msg)) def ppaged(self, msg: str, end: str='\n', chop: bool=False) -> None: """Print output using a pager if it would go off screen and stdout isn't currently being redirected. @@ -641,6 +664,9 @@ class Cmd(cmd.Cmd): # Don't attempt to use a pager that can block if redirecting or running a script (either text or Python) # Also only attempt to use a pager if actually running in a real fully functional terminal if functional_terminal and not self.redirecting and not self._in_py and not self._script_dir: + if self.colors.lower() == constants.COLORS_NEVER.lower(): + msg_str = utils.strip_ansi(msg_str) + pager = self.pager if chop: pager = self.pager_chop @@ -665,7 +691,7 @@ class Cmd(cmd.Cmd): except BrokenPipeError: # This occurs if a command's output is being piped to another process and that process closes before the # command is finished. If you would like your application to print a warning message, then set the - # broken_pipe_warning attribute to the message you want printed. + # broken_pipe_warning attribute to the message you want printed.` if self.broken_pipe_warning: sys.stderr.write(self.broken_pipe_warning) @@ -676,7 +702,7 @@ class Cmd(cmd.Cmd): is running on Windows, will return ``val`` unchanged. ``color`` should be one of the supported strings (or styles): red/blue/green/cyan/magenta, bold, underline""" - if self.colors and (self.stdout == self.initial_stdout): + if self.colors.lower() != constants.COLORS_NEVER.lower() and (self.stdout == self.initial_stdout): return self._colorcodes[color][True] + val + self._colorcodes[color][False] return val diff --git a/cmd2/constants.py b/cmd2/constants.py index d3e8a125..3c133b70 100644 --- a/cmd2/constants.py +++ b/cmd2/constants.py @@ -17,3 +17,8 @@ REDIRECTION_TOKENS = [REDIRECTION_PIPE, REDIRECTION_OUTPUT, REDIRECTION_APPEND] ANSI_ESCAPE_RE = re.compile(r'\x1b[^m]*m') LINE_FEED = '\n' + +# values for colors setting +COLORS_NEVER = 'Never' +COLORS_TERMINAL = 'Terminal' +COLORS_ALWAYS = 'Always' diff --git a/docs/settingchanges.rst b/docs/settingchanges.rst index 02955273..e08b6026 100644 --- a/docs/settingchanges.rst +++ b/docs/settingchanges.rst @@ -137,7 +137,7 @@ comments, is viewable from within a running application with:: (Cmd) set --long - colors: True # Colorized output (*nix only) + colors: Terminal # Allow colorized output continuation_prompt: > # On 2nd+ line of input debug: False # Show full error stack on error echo: False # Echo command issued into output @@ -150,5 +150,5 @@ with:: Any of these user-settable parameters can be set while running your app with the ``set`` command like so:: - set colors False + set colors Never diff --git a/docs/unfreefeatures.rst b/docs/unfreefeatures.rst index b5f9415d..364addc6 100644 --- a/docs/unfreefeatures.rst +++ b/docs/unfreefeatures.rst @@ -139,23 +139,43 @@ instead. These methods have these advantages: .. automethod:: cmd2.cmd2.Cmd.ppaged -color -===== +Colored Output +============== -Text output can be colored by wrapping it in the ``colorize`` method. +The output methods in the previous section all honor the ``colors`` setting, +which has three possible values: + +Never + poutput() and pfeedback() strip all ANSI escape sequences + which instruct the terminal to colorize output + +Terminal + (the default value) poutput() and pfeedback() do not strip any ANSI escape + sequences when the output is a terminal, but if the output is a pipe or a + file the escape sequences are stripped. If you want colorized output you + must add ANSI escape sequences, preferably using some python color library + like `plumbum.colors`, `colorama`, `blessings`, or `termcolor`. + +Always + poutput() and pfeedback() never strip ANSI escape sequences, regardless of + the output destination + + +The previously recommended ``colorize`` method is now deprecated. -.. automethod:: cmd2.cmd2.Cmd.colorize .. _quiet: +Suppressing non-essential output +================================ -quiet -===== +The ``quiet`` setting controls whether ``self.pfeedback()`` actually produces +any output. If ``quiet`` is ``False``, then the output will be produced. If +``quiet`` is ``True``, no output will be produced. -Controls whether ``self.pfeedback('message')`` output is suppressed; -useful for non-essential feedback that the user may not always want -to read. ``quiet`` is only relevant if -``app.pfeedback`` is sometimes used. +This makes ``self.pfeedback()`` useful for non-essential output like status +messages. Users can control whether they would like to see these messages by changing +the value of the ``quiet`` setting. select diff --git a/examples/colors.py b/examples/colors.py new file mode 100755 index 00000000..8765aee0 --- /dev/null +++ b/examples/colors.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python +# coding=utf-8 +""" +A sample application for cmd2. Demonstrating colorized output. + +Experiment with the command line options on the `speak` command to see how +different output colors ca + +The colors setting has three possible values: + +Never + poutput() and pfeedback() strip all ANSI escape sequences + which instruct the terminal to colorize output + +Terminal + (the default value) poutput() and pfeedback() do not strip any ANSI escape + sequences when the output is a terminal, but if the output is a pipe or a + file the escape sequences are stripped. If you want colorized output you + must add ANSI escape sequences, preferably using some python color library + like `plumbum.colors`, `colorama`, `blessings`, or `termcolor`. + +Always + poutput() and pfeedback() never strip ANSI escape sequences, regardless of + the output destination +""" + +import random +import argparse + +import cmd2 +from colorama import Fore, Back + +FG_COLORS = { + 'black': Fore.BLACK, + 'red': Fore.RED, + 'green': Fore.GREEN, + 'yellow': Fore.YELLOW, + 'blue': Fore.BLUE, + 'magenta': Fore.MAGENTA, + 'cyan': Fore.CYAN, + 'white': Fore.WHITE, +} +BG_COLORS = { + 'black': Back.BLACK, + 'red': Back.RED, + 'green': Back.GREEN, + 'yellow': Back.YELLOW, + 'blue': Back.BLUE, + 'magenta': Back.MAGENTA, + 'cyan': Back.CYAN, + 'white': Back.WHITE, +} + + +class CmdLineApp(cmd2.Cmd): + """Example cmd2 application demonstrating colorized output.""" + + # Setting this true makes it run a shell command if a cmd2/cmd command doesn't exist + # default_to_shell = True + MUMBLES = ['like', '...', 'um', 'er', 'hmmm', 'ahh'] + MUMBLE_FIRST = ['so', 'like', 'well'] + MUMBLE_LAST = ['right?'] + + def __init__(self): + self.multiline_commands = ['orate'] + self.maxrepeats = 3 + + # Add stuff to settable and shortcuts before calling base class initializer + self.settable['maxrepeats'] = 'max repetitions for speak command' + self.shortcuts.update({'&': 'speak'}) + + # Set use_ipython to True to enable the "ipy" command which embeds and interactive IPython shell + super().__init__(use_ipython=True) + + speak_parser = argparse.ArgumentParser() + speak_parser.add_argument('-p', '--piglatin', action='store_true', help='atinLay') + speak_parser.add_argument('-s', '--shout', action='store_true', help='N00B EMULATION MODE') + speak_parser.add_argument('-r', '--repeat', type=int, help='output [n] times') + speak_parser.add_argument('-f', '--fg', choices=FG_COLORS, help='foreground color to apply to output') + speak_parser.add_argument('-b', '--bg', choices=BG_COLORS, help='background color to apply to output') + speak_parser.add_argument('words', nargs='+', help='words to say') + + @cmd2.with_argparser(speak_parser) + def do_speak(self, args): + """Repeats what you tell me to.""" + words = [] + for word in args.words: + if args.piglatin: + word = '%s%say' % (word[1:], word[0]) + if args.shout: + word = word.upper() + words.append(word) + + repetitions = args.repeat or 1 + + color_on = '' + if args.fg: + color_on += FG_COLORS[args.fg] + if args.bg: + color_on += BG_COLORS[args.bg] + color_off = Fore.RESET + Back.RESET + + for i in range(min(repetitions, self.maxrepeats)): + # .poutput handles newlines, and accommodates output redirection too + self.poutput(color_on + ' '.join(words) + color_off) + + do_say = do_speak # now "say" is a synonym for "speak" + do_orate = do_speak # another synonym, but this one takes multi-line input + + mumble_parser = argparse.ArgumentParser() + mumble_parser.add_argument('-r', '--repeat', type=int, help='how many times to repeat') + mumble_parser.add_argument('-f', '--fg', help='foreground color to apply to output') + mumble_parser.add_argument('-b', '--bg', help='background color to apply to output') + mumble_parser.add_argument('words', nargs='+', help='words to say') + + @cmd2.with_argparser(mumble_parser) + def do_mumble(self, args): + """Mumbles what you tell me to.""" + color_on = '' + if args.fg and args.fg in FG_COLORS: + color_on += FG_COLORS[args.fg] + if args.bg and args.bg in BG_COLORS: + color_on += BG_COLORS[args.bg] + color_off = Fore.RESET + Back.RESET + + repetitions = args.repeat or 1 + for i in range(min(repetitions, self.maxrepeats)): + output = [] + if random.random() < .33: + output.append(random.choice(self.MUMBLE_FIRST)) + for word in args.words: + if random.random() < .40: + output.append(random.choice(self.MUMBLES)) + output.append(word) + if random.random() < .25: + output.append(random.choice(self.MUMBLE_LAST)) + self.poutput(color_on + ' '.join(output) + color_off) + + +if __name__ == '__main__': + c = CmdLineApp() + c.cmdloop() diff --git a/examples/pirate.py b/examples/pirate.py index 34906a9f..22274dbf 100755 --- a/examples/pirate.py +++ b/examples/pirate.py @@ -8,8 +8,21 @@ It demonstrates many features of cmd2. """ import argparse +from colorama import Fore + import cmd2 +COLORS = { + 'black': Fore.BLACK, + 'red': Fore.RED, + 'green': Fore.GREEN, + 'yellow': Fore.YELLOW, + 'blue': Fore.BLUE, + 'magenta': Fore.MAGENTA, + 'cyan': Fore.CYAN, + 'white': Fore.WHITE, +} + class Pirate(cmd2.Cmd): """A piratical example cmd2 application involving looting and drinking.""" @@ -17,10 +30,10 @@ class Pirate(cmd2.Cmd): self.default_to_shell = True self.multiline_commands = ['sing'] self.terminators = self.terminators + ['...'] - self.songcolor = 'blue' + self.songcolor = Fore.BLUE # Add stuff to settable and/or shortcuts before calling base class initializer - self.settable['songcolor'] = 'Color to ``sing`` in (red/blue/green/cyan/magenta, bold, underline)' + self.settable['songcolor'] = 'Color to ``sing`` in (black/red/green/yellow/blue/magenta/cyan/white)' self.shortcuts.update({'~': 'sing'}) """Initialize the base class as well as this one""" @@ -68,7 +81,8 @@ class Pirate(cmd2.Cmd): def do_sing(self, arg): """Sing a colorful song.""" - self.poutput(self.colorize(arg, self.songcolor)) + color_escape = COLORS.get(self.songcolor, default=Fore.RESET) + self.poutput(arg, color=color_escape) yo_parser = argparse.ArgumentParser() yo_parser.add_argument('--ho', type=int, default=2, help="How often to chant 'ho'") diff --git a/examples/plumbum_colors.py b/examples/plumbum_colors.py new file mode 100755 index 00000000..942eaf80 --- /dev/null +++ b/examples/plumbum_colors.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python +# coding=utf-8 +""" +A sample application for cmd2. Demonstrating colorized output using the plumbum package. + +Experiment with the command line options on the `speak` command to see how +different output colors ca + +The colors setting has three possible values: + +Never + poutput() and pfeedback() strip all ANSI escape sequences + which instruct the terminal to colorize output + +Terminal + (the default value) poutput() and pfeedback() do not strip any ANSI escape + sequences when the output is a terminal, but if the output is a pipe or a + file the escape sequences are stripped. If you want colorized output you + must add ANSI escape sequences, preferably using some python color library + like `plumbum.colors`, `colorama`, `blessings`, or `termcolor`. + +Always + poutput() and pfeedback() never strip ANSI escape sequences, regardless of + the output destination + +WARNING: This example requires the plumbum package, which isn't normally required by cmd2. +""" + +import random +import argparse + +import cmd2 +from plumbum.colors import fg, bg, reset + +FG_COLORS = { + 'black': fg.Black, + 'red': fg.DarkRedA, + 'green': fg.MediumSpringGreen, + 'yellow': fg.LightYellow, + 'blue': fg.RoyalBlue1, + 'magenta': fg.Purple, + 'cyan': fg.SkyBlue1, + 'white': fg.White, +} +BG_COLORS = { + 'black': bg.BLACK, + 'red': bg.DarkRedA, + 'green': bg.MediumSpringGreen, + 'yellow': bg.LightYellow, + 'blue': bg.RoyalBlue1, + 'magenta': bg.Purple, + 'cyan': bg.SkyBlue1, + 'white': bg.White, +} + + +class CmdLineApp(cmd2.Cmd): + """Example cmd2 application demonstrating colorized output.""" + + # Setting this true makes it run a shell command if a cmd2/cmd command doesn't exist + # default_to_shell = True + MUMBLES = ['like', '...', 'um', 'er', 'hmmm', 'ahh'] + MUMBLE_FIRST = ['so', 'like', 'well'] + MUMBLE_LAST = ['right?'] + + def __init__(self): + self.multiline_commands = ['orate'] + self.maxrepeats = 3 + + # Add stuff to settable and shortcuts before calling base class initializer + self.settable['maxrepeats'] = 'max repetitions for speak command' + self.shortcuts.update({'&': 'speak'}) + + # Set use_ipython to True to enable the "ipy" command which embeds and interactive IPython shell + super().__init__(use_ipython=True) + + speak_parser = argparse.ArgumentParser() + speak_parser.add_argument('-p', '--piglatin', action='store_true', help='atinLay') + speak_parser.add_argument('-s', '--shout', action='store_true', help='N00B EMULATION MODE') + speak_parser.add_argument('-r', '--repeat', type=int, help='output [n] times') + speak_parser.add_argument('-f', '--fg', choices=FG_COLORS, help='foreground color to apply to output') + speak_parser.add_argument('-b', '--bg', choices=BG_COLORS, help='background color to apply to output') + speak_parser.add_argument('words', nargs='+', help='words to say') + + @cmd2.with_argparser(speak_parser) + def do_speak(self, args): + """Repeats what you tell me to.""" + words = [] + for word in args.words: + if args.piglatin: + word = '%s%say' % (word[1:], word[0]) + if args.shout: + word = word.upper() + words.append(word) + + repetitions = args.repeat or 1 + + color_on = '' + if args.fg: + color_on += FG_COLORS[args.fg] + if args.bg: + color_on += BG_COLORS[args.bg] + color_off = reset + + for i in range(min(repetitions, self.maxrepeats)): + # .poutput handles newlines, and accommodates output redirection too + self.poutput(color_on + ' '.join(words) + color_off) + + do_say = do_speak # now "say" is a synonym for "speak" + do_orate = do_speak # another synonym, but this one takes multi-line input + + mumble_parser = argparse.ArgumentParser() + mumble_parser.add_argument('-r', '--repeat', type=int, help='how many times to repeat') + mumble_parser.add_argument('-f', '--fg', help='foreground color to apply to output') + mumble_parser.add_argument('-b', '--bg', help='background color to apply to output') + mumble_parser.add_argument('words', nargs='+', help='words to say') + + @cmd2.with_argparser(mumble_parser) + def do_mumble(self, args): + """Mumbles what you tell me to.""" + color_on = '' + if args.fg and args.fg in FG_COLORS: + color_on += FG_COLORS[args.fg] + if args.bg and args.bg in BG_COLORS: + color_on += BG_COLORS[args.bg] + color_off = Fore.RESET + Back.RESET + + repetitions = args.repeat or 1 + for i in range(min(repetitions, self.maxrepeats)): + output = [] + if random.random() < .33: + output.append(random.choice(self.MUMBLE_FIRST)) + for word in args.words: + if random.random() < .40: + output.append(random.choice(self.MUMBLES)) + output.append(word) + if random.random() < .25: + output.append(random.choice(self.MUMBLE_LAST)) + self.poutput(color_on + ' '.join(output) + color_off) + + +if __name__ == '__main__': + c = CmdLineApp() + c.cmdloop() diff --git a/examples/python_scripting.py b/examples/python_scripting.py index 4c959f58..0b0030a5 100755 --- a/examples/python_scripting.py +++ b/examples/python_scripting.py @@ -17,6 +17,8 @@ This application and the "scripts/conditional.py" script serve as an example for import argparse import os +from colorama import Fore + import cmd2 @@ -33,7 +35,7 @@ class CmdLineApp(cmd2.Cmd): def _set_prompt(self): """Set prompt so it displays the current working directory.""" self.cwd = os.getcwd() - self.prompt = self.colorize('{!r} $ '.format(self.cwd), 'cyan') + self.prompt = Fore.CYAN + '{!r} $ '.format(self.cwd) + Fore.RESET def postcmd(self, stop: bool, line: str) -> bool: """Hook method executed just after a command dispatch is finished. diff --git a/examples/transcripts/exampleSession.txt b/examples/transcripts/exampleSession.txt index 6318776f..38fb0659 100644 --- a/examples/transcripts/exampleSession.txt +++ b/examples/transcripts/exampleSession.txt @@ -3,7 +3,7 @@ # The regex for editor will match whatever program you use. # regexes on prompts just make the trailing space obvious (Cmd) set -colors: /(True|False)/ +colors: /(Terminal|Always|Never)/ continuation_prompt: >/ / debug: False echo: False diff --git a/examples/transcripts/transcript_regex.txt b/examples/transcripts/transcript_regex.txt index 08588ab1..6980fac6 100644 --- a/examples/transcripts/transcript_regex.txt +++ b/examples/transcripts/transcript_regex.txt @@ -3,7 +3,7 @@ # The regex for editor will match whatever program you use. # regexes on prompts just make the trailing space obvious (Cmd) set -colors: /(True|False)/ +colors: /(Terminal|Always|Never)/ continuation_prompt: >/ / debug: False echo: False
Consider refactoring how colorize and colors work in cmd2 Currently ``cmd2`` has: * a required dependency on [colorama](https://github.com/tartley/colorama) * provides cross-platform colored terminal text on Window, macOS, and Linux * a built-in ``colorize`` method in ``Cmd`` class that provides colored terminal text on macOS and Linux * uses an internal dictionary which maps string color names to ANSI color escape codes Having both of these feels awkward and redundant. There are several possible options I could see for refactoring this including the following: 1. Make ``colorama`` an optional dependency and refactor the **colorize** method to use ``colorama`` if present and to provide no color if it isn't 2. Keep ``colorama`` as a required dependency and refactor the **colorize** method to use it 3. Remove the dependency on ``colorama`` and use **colorize** where it is currently being used Other possible additions include things such as: - adding an optional **color** argument to methods such as ``poutput`` - refactoring the underlying implementation of how colors are stored - creating a separate module which provides string color codes based on a color name lookup and wraps ``colorama`` and/or ``colored`` I would be curious to hear other people's opinions regarding how to best proceed with this. What are your thoughts?
python-cmd2/cmd2
diff --git a/tests/conftest.py b/tests/conftest.py index 561f281b..39aa3473 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -82,11 +82,8 @@ SHORTCUTS_TXT = """Shortcuts for other commands: @@: _relative_load """ -expect_colors = True -if sys.platform.startswith('win'): - expect_colors = False # Output from the show command with default settings -SHOW_TXT = """colors: {} +SHOW_TXT = """colors: Terminal continuation_prompt: > debug: False echo: False @@ -96,14 +93,10 @@ locals_in_py: False prompt: (Cmd) quiet: False timing: False -""".format(expect_colors) +""" -if expect_colors: - color_str = 'True ' -else: - color_str = 'False' SHOW_LONG = """ -colors: {} # Colorized output (*nix only) +colors: Terminal # Allow colorized output (valid values: Terminal, Always, Never) continuation_prompt: > # On 2nd+ line of input debug: False # Show full error stack on error echo: False # Echo command issued into output @@ -113,8 +106,7 @@ locals_in_py: False # Allow access to your application in py via self prompt: (Cmd) # The prompt issued to solicit input quiet: False # Don't print nonessential feedback timing: False # Report execution times -""".format(color_str) - +""" def normalize(block): """ Normalize a block of text to perform comparison. diff --git a/tests/scripts/postcmds.txt b/tests/scripts/postcmds.txt index 2b478b57..dea8f265 100644 --- a/tests/scripts/postcmds.txt +++ b/tests/scripts/postcmds.txt @@ -1,1 +1,1 @@ -set colors off +set colors Never diff --git a/tests/scripts/precmds.txt b/tests/scripts/precmds.txt index d0b27fb6..0ae7eae8 100644 --- a/tests/scripts/precmds.txt +++ b/tests/scripts/precmds.txt @@ -1,1 +1,1 @@ -set colors on +set colors Always diff --git a/tests/test_cmd2.py b/tests/test_cmd2.py index c6a90fdf..dece1ab4 100644 --- a/tests/test_cmd2.py +++ b/tests/test_cmd2.py @@ -13,6 +13,7 @@ import os import sys import tempfile +from colorama import Fore, Back, Style import pytest # Python 3.5 had some regressions in the unitest.mock module, so use 3rd party mock if available @@ -564,11 +565,11 @@ def test_load_nested_loads(base_app, request): expected = """ %s _relative_load precmds.txt -set colors on +set colors Always help shortcuts _relative_load postcmds.txt -set colors off""" % initial_load +set colors Never""" % initial_load assert run_cmd(base_app, 'history -s') == normalize(expected) @@ -586,11 +587,11 @@ def test_base_runcmds_plus_hooks(base_app, request): 'load ' + postfilepath]) expected = """ load %s -set colors on +set colors Always help shortcuts load %s -set colors off""" % (prefilepath, postfilepath) +set colors Never""" % (prefilepath, postfilepath) assert run_cmd(base_app, 'history -s') == normalize(expected) @@ -817,12 +818,7 @@ def test_base_colorize(base_app): # But if we create a fresh Cmd() instance, it will fresh_app = cmd2.Cmd() color_test = fresh_app.colorize('Test', 'red') - # Actually, colorization only ANSI escape codes is only applied on non-Windows systems - if sys.platform == 'win32': - assert color_test == 'Test' - else: - assert color_test == '\x1b[31mTest\x1b[39m' - + assert color_test == '\x1b[31mTest\x1b[39m' def _expected_no_editor_error(): expected_exception = 'OSError' @@ -1109,22 +1105,22 @@ def test_ansi_prompt_not_esacped(base_app): def test_ansi_prompt_escaped(): from cmd2.rl_utils import rl_make_safe_prompt app = cmd2.Cmd() - color = 'cyan' + color = Fore.CYAN prompt = 'InColor' - color_prompt = app.colorize(prompt, color) + color_prompt = color + prompt + Fore.RESET readline_hack_start = "\x01" readline_hack_end = "\x02" readline_safe_prompt = rl_make_safe_prompt(color_prompt) + assert prompt != color_prompt if sys.platform.startswith('win'): - # colorize() does nothing on Windows due to lack of ANSI color support - assert prompt == color_prompt - assert readline_safe_prompt == prompt + # PyReadline on Windows doesn't suffer from the GNU readline bug which requires the hack + assert readline_safe_prompt.startswith(color) + assert readline_safe_prompt.endswith(Fore.RESET) else: - assert prompt != color_prompt - assert readline_safe_prompt.startswith(readline_hack_start + app._colorcodes[color][True] + readline_hack_end) - assert readline_safe_prompt.endswith(readline_hack_start + app._colorcodes[color][False] + readline_hack_end) + assert readline_safe_prompt.startswith(readline_hack_start + color + readline_hack_end) + assert readline_safe_prompt.endswith(readline_hack_start + Fore.RESET + readline_hack_end) class HelpApp(cmd2.Cmd): @@ -1750,6 +1746,24 @@ def test_poutput_none(base_app): expected = '' assert out == expected +def test_poutput_color_always(base_app): + msg = 'Hello World' + color = Fore.CYAN + base_app.colors = 'Always' + base_app.poutput(msg, color=color) + out = base_app.stdout.getvalue() + expected = color + msg + '\n' + Fore.RESET + assert out == expected + +def test_poutput_color_never(base_app): + msg = 'Hello World' + color = Fore.CYAN + base_app.colors = 'Never' + base_app.poutput(msg, color=color) + out = base_app.stdout.getvalue() + expected = msg + '\n' + assert out == expected + def test_alias(base_app, capsys): # Create the alias @@ -1968,7 +1982,6 @@ def test_bad_history_file_path(capsys, request): assert 'readline cannot read' in err - def test_get_all_commands(base_app): # Verify that the base app has the expected commands commands = base_app.get_all_commands() @@ -2055,3 +2068,136 @@ def test_exit_code_nonzero(exit_code_repl): app.cmdloop() out = app.stdout.getvalue() assert out == expected + + +class ColorsApp(cmd2.Cmd): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def do_echo(self, args): + self.poutput(args) + self.perror(args, False) + + def do_echo_error(self, args): + color_on = Fore.RED + Back.BLACK + color_off = Style.RESET_ALL + self.poutput(color_on + args + color_off) + # perror uses colors by default + self.perror(args, False) + +def test_colors_default(): + app = ColorsApp() + assert app.colors == cmd2.constants.COLORS_TERMINAL + +def test_colors_pouterr_always_tty(mocker, capsys): + app = ColorsApp() + app.colors = cmd2.constants.COLORS_ALWAYS + mocker.patch.object(app.stdout, 'isatty', return_value=True) + mocker.patch.object(sys.stderr, 'isatty', return_value=True) + + app.onecmd_plus_hooks('echo_error oopsie') + out, err = capsys.readouterr() + # if colors are on, the output should have some escape sequences in it + assert len(out) > len('oopsie\n') + assert 'oopsie' in out + assert len(err) > len('Error: oopsie\n') + assert 'ERROR: oopsie' in err + + # but this one shouldn't + app.onecmd_plus_hooks('echo oopsie') + out, err = capsys.readouterr() + assert out == 'oopsie\n' + # errors always have colors + assert len(err) > len('Error: oopsie\n') + assert 'ERROR: oopsie' in err + +def test_colors_pouterr_always_notty(mocker, capsys): + app = ColorsApp() + app.colors = cmd2.constants.COLORS_ALWAYS + mocker.patch.object(app.stdout, 'isatty', return_value=False) + mocker.patch.object(sys.stderr, 'isatty', return_value=False) + + app.onecmd_plus_hooks('echo_error oopsie') + out, err = capsys.readouterr() + # if colors are on, the output should have some escape sequences in it + assert len(out) > len('oopsie\n') + assert 'oopsie' in out + assert len(err) > len('Error: oopsie\n') + assert 'ERROR: oopsie' in err + + # but this one shouldn't + app.onecmd_plus_hooks('echo oopsie') + out, err = capsys.readouterr() + assert out == 'oopsie\n' + # errors always have colors + assert len(err) > len('Error: oopsie\n') + assert 'ERROR: oopsie' in err + +def test_colors_terminal_tty(mocker, capsys): + app = ColorsApp() + app.colors = cmd2.constants.COLORS_TERMINAL + mocker.patch.object(app.stdout, 'isatty', return_value=True) + mocker.patch.object(sys.stderr, 'isatty', return_value=True) + + app.onecmd_plus_hooks('echo_error oopsie') + # if colors are on, the output should have some escape sequences in it + out, err = capsys.readouterr() + assert len(out) > len('oopsie\n') + assert 'oopsie' in out + assert len(err) > len('Error: oopsie\n') + assert 'ERROR: oopsie' in err + + # but this one shouldn't + app.onecmd_plus_hooks('echo oopsie') + out, err = capsys.readouterr() + assert out == 'oopsie\n' + assert len(err) > len('Error: oopsie\n') + assert 'ERROR: oopsie' in err + +def test_colors_terminal_notty(mocker, capsys): + app = ColorsApp() + app.colors = cmd2.constants.COLORS_TERMINAL + mocker.patch.object(app.stdout, 'isatty', return_value=False) + mocker.patch.object(sys.stderr, 'isatty', return_value=False) + + app.onecmd_plus_hooks('echo_error oopsie') + out, err = capsys.readouterr() + assert out == 'oopsie\n' + assert err == 'ERROR: oopsie\n' + + app.onecmd_plus_hooks('echo oopsie') + out, err = capsys.readouterr() + assert out == 'oopsie\n' + assert err == 'ERROR: oopsie\n' + +def test_colors_never_tty(mocker, capsys): + app = ColorsApp() + app.colors = cmd2.constants.COLORS_NEVER + mocker.patch.object(app.stdout, 'isatty', return_value=True) + mocker.patch.object(sys.stderr, 'isatty', return_value=True) + + app.onecmd_plus_hooks('echo_error oopsie') + out, err = capsys.readouterr() + assert out == 'oopsie\n' + assert err == 'ERROR: oopsie\n' + + app.onecmd_plus_hooks('echo oopsie') + out, err = capsys.readouterr() + assert out == 'oopsie\n' + assert err == 'ERROR: oopsie\n' + +def test_colors_never_notty(mocker, capsys): + app = ColorsApp() + app.colors = cmd2.constants.COLORS_NEVER + mocker.patch.object(app.stdout, 'isatty', return_value=False) + mocker.patch.object(sys.stderr, 'isatty', return_value=False) + + app.onecmd_plus_hooks('echo_error oopsie') + out, err = capsys.readouterr() + assert out == 'oopsie\n' + assert err == 'ERROR: oopsie\n' + + app.onecmd_plus_hooks('echo oopsie') + out, err = capsys.readouterr() + assert out == 'oopsie\n' + assert err == 'ERROR: oopsie\n' diff --git a/tests/transcripts/regex_set.txt b/tests/transcripts/regex_set.txt index b818c464..d45672a7 100644 --- a/tests/transcripts/regex_set.txt +++ b/tests/transcripts/regex_set.txt @@ -1,10 +1,10 @@ # Run this transcript with "python example.py -t transcript_regex.txt" -# The regex for colors is because no color on Windows. +# The regex for colors shows all possible settings for colors # The regex for editor will match whatever program you use. # Regexes on prompts just make the trailing space obvious (Cmd) set -colors: /(True|False)/ +colors: /(Terminal|Always|Never)/ continuation_prompt: >/ / debug: False echo: False
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 9 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-mock", "pytest-cov", "codecov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 anyio==4.9.0 astroid==3.3.9 attrs==25.3.0 babel==2.17.0 backports.tarfile==1.2.0 cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 click==8.1.8 -e git+https://github.com/python-cmd2/cmd2.git@982d2f2c2b91c04ecd2ba45dc2f6e1d26d77e4c1#egg=cmd2 codecov==2.1.13 colorama==0.4.6 coverage==7.8.0 cryptography==44.0.2 dill==0.3.9 distlib==0.3.9 docutils==0.21.2 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work filelock==3.18.0 h11==0.14.0 id==1.5.0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work invoke==2.2.0 isort==6.0.1 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jeepney==0.9.0 Jinja2==3.1.6 keyring==25.6.0 markdown-it-py==3.0.0 MarkupSafe==3.0.2 mccabe==0.7.0 mdurl==0.1.2 more-itertools==10.6.0 nh3==0.2.21 packaging @ file:///croot/packaging_1734472117206/work platformdirs==4.3.7 pluggy @ file:///croot/pluggy_1733169602837/work pycparser==2.22 Pygments==2.19.1 pylint==3.3.6 pyperclip==1.9.0 pyproject-api==1.9.0 pytest @ file:///croot/pytest_1738938843180/work pytest-cov==6.0.0 pytest-mock==3.14.0 readme_renderer==44.0 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 SecretStorage==3.3.3 sniffio==1.3.1 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinx-autobuild==2024.10.3 sphinx-rtd-theme==3.0.2 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 starlette==0.46.1 tomli==2.2.1 tomlkit==0.13.2 tox==4.25.0 twine==6.1.0 typing_extensions==4.13.0 urllib3==2.3.0 uvicorn==0.34.0 virtualenv==20.29.3 watchfiles==1.0.4 wcwidth==0.2.13 websockets==15.0.1 zipp==3.21.0
name: cmd2 channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - anyio==4.9.0 - astroid==3.3.9 - attrs==25.3.0 - babel==2.17.0 - backports-tarfile==1.2.0 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - click==8.1.8 - codecov==2.1.13 - colorama==0.4.6 - coverage==7.8.0 - cryptography==44.0.2 - dill==0.3.9 - distlib==0.3.9 - docutils==0.21.2 - filelock==3.18.0 - h11==0.14.0 - id==1.5.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - invoke==2.2.0 - isort==6.0.1 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jeepney==0.9.0 - jinja2==3.1.6 - keyring==25.6.0 - markdown-it-py==3.0.0 - markupsafe==3.0.2 - mccabe==0.7.0 - mdurl==0.1.2 - more-itertools==10.6.0 - nh3==0.2.21 - platformdirs==4.3.7 - pycparser==2.22 - pygments==2.19.1 - pylint==3.3.6 - pyperclip==1.9.0 - pyproject-api==1.9.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - readme-renderer==44.0 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - secretstorage==3.3.3 - sniffio==1.3.1 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinx-autobuild==2024.10.3 - sphinx-rtd-theme==3.0.2 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - starlette==0.46.1 - tomli==2.2.1 - tomlkit==0.13.2 - tox==4.25.0 - twine==6.1.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - uvicorn==0.34.0 - virtualenv==20.29.3 - watchfiles==1.0.4 - wcwidth==0.2.13 - websockets==15.0.1 - zipp==3.21.0 prefix: /opt/conda/envs/cmd2
[ "tests/test_cmd2.py::test_base_show", "tests/test_cmd2.py::test_base_show_long", "tests/test_cmd2.py::test_base_show_readonly", "tests/test_cmd2.py::test_poutput_color_always", "tests/test_cmd2.py::test_poutput_color_never", "tests/test_cmd2.py::test_colors_default", "tests/test_cmd2.py::test_colors_pouterr_always_tty", "tests/test_cmd2.py::test_colors_pouterr_always_notty", "tests/test_cmd2.py::test_colors_terminal_tty", "tests/test_cmd2.py::test_colors_terminal_notty", "tests/test_cmd2.py::test_colors_never_tty", "tests/test_cmd2.py::test_colors_never_notty" ]
[ "tests/test_cmd2.py::test_which_editor_good" ]
[ "tests/test_cmd2.py::test_version", "tests/test_cmd2.py::test_empty_statement", "tests/test_cmd2.py::test_base_help", "tests/test_cmd2.py::test_base_help_verbose", "tests/test_cmd2.py::test_base_help_history", "tests/test_cmd2.py::test_base_argparse_help", "tests/test_cmd2.py::test_base_invalid_option", "tests/test_cmd2.py::test_base_shortcuts", "tests/test_cmd2.py::test_cast", "tests/test_cmd2.py::test_cast_problems", "tests/test_cmd2.py::test_base_set", "tests/test_cmd2.py::test_set_not_supported", "tests/test_cmd2.py::test_set_quiet", "tests/test_cmd2.py::test_base_shell", "tests/test_cmd2.py::test_base_py", "tests/test_cmd2.py::test_base_run_python_script", "tests/test_cmd2.py::test_base_run_pyscript", "tests/test_cmd2.py::test_recursive_pyscript_not_allowed", "tests/test_cmd2.py::test_pyscript_with_nonexist_file", "tests/test_cmd2.py::test_pyscript_with_exception", "tests/test_cmd2.py::test_pyscript_requires_an_argument", "tests/test_cmd2.py::test_base_error", "tests/test_cmd2.py::test_history_span", "tests/test_cmd2.py::test_history_get", "tests/test_cmd2.py::test_base_history", "tests/test_cmd2.py::test_history_script_format", "tests/test_cmd2.py::test_history_with_string_argument", "tests/test_cmd2.py::test_history_with_integer_argument", "tests/test_cmd2.py::test_history_with_integer_span", "tests/test_cmd2.py::test_history_with_span_start", "tests/test_cmd2.py::test_history_with_span_end", "tests/test_cmd2.py::test_history_with_span_index_error", "tests/test_cmd2.py::test_history_output_file", "tests/test_cmd2.py::test_history_edit", "tests/test_cmd2.py::test_history_run_all_commands", "tests/test_cmd2.py::test_history_run_one_command", "tests/test_cmd2.py::test_history_clear", "tests/test_cmd2.py::test_base_load", "tests/test_cmd2.py::test_load_with_empty_args", "tests/test_cmd2.py::test_load_with_nonexistent_file", "tests/test_cmd2.py::test_load_with_directory", "tests/test_cmd2.py::test_load_with_empty_file", "tests/test_cmd2.py::test_load_with_binary_file", "tests/test_cmd2.py::test_load_with_utf8_file", "tests/test_cmd2.py::test_load_nested_loads", "tests/test_cmd2.py::test_base_runcmds_plus_hooks", "tests/test_cmd2.py::test_base_relative_load", "tests/test_cmd2.py::test_relative_load_requires_an_argument", "tests/test_cmd2.py::test_output_redirection", "tests/test_cmd2.py::test_output_redirection_to_nonexistent_directory", "tests/test_cmd2.py::test_output_redirection_to_too_long_filename", "tests/test_cmd2.py::test_feedback_to_output_true", "tests/test_cmd2.py::test_feedback_to_output_false", "tests/test_cmd2.py::test_allow_redirection", "tests/test_cmd2.py::test_pipe_to_shell", "tests/test_cmd2.py::test_pipe_to_shell_error", "tests/test_cmd2.py::test_base_timing", "tests/test_cmd2.py::test_base_debug", "tests/test_cmd2.py::test_base_colorize", "tests/test_cmd2.py::test_edit_no_editor", "tests/test_cmd2.py::test_edit_file", "tests/test_cmd2.py::test_edit_file_with_spaces", "tests/test_cmd2.py::test_edit_blank", "tests/test_cmd2.py::test_base_py_interactive", "tests/test_cmd2.py::test_exclude_from_history", "tests/test_cmd2.py::test_base_cmdloop_with_queue", "tests/test_cmd2.py::test_base_cmdloop_without_queue", "tests/test_cmd2.py::test_cmdloop_without_rawinput", "tests/test_cmd2.py::test_precmd_hook_success", "tests/test_cmd2.py::test_precmd_hook_failure", "tests/test_cmd2.py::test_interrupt_quit", "tests/test_cmd2.py::test_interrupt_noquit", "tests/test_cmd2.py::test_default_to_shell_unknown", "tests/test_cmd2.py::test_default_to_shell_good", "tests/test_cmd2.py::test_default_to_shell_failure", "tests/test_cmd2.py::test_ansi_prompt_not_esacped", "tests/test_cmd2.py::test_ansi_prompt_escaped", "tests/test_cmd2.py::test_custom_command_help", "tests/test_cmd2.py::test_custom_help_menu", "tests/test_cmd2.py::test_help_undocumented", "tests/test_cmd2.py::test_help_overridden_method", "tests/test_cmd2.py::test_help_cat_base", "tests/test_cmd2.py::test_help_cat_verbose", "tests/test_cmd2.py::test_select_options", "tests/test_cmd2.py::test_select_invalid_option", "tests/test_cmd2.py::test_select_list_of_strings", "tests/test_cmd2.py::test_select_list_of_tuples", "tests/test_cmd2.py::test_select_uneven_list_of_tuples", "tests/test_cmd2.py::test_help_with_no_docstring", "tests/test_cmd2.py::test_which_editor_bad", "tests/test_cmd2.py::test_multiline_complete_empty_statement_raises_exception", "tests/test_cmd2.py::test_multiline_complete_statement_without_terminator", "tests/test_cmd2.py::test_multiline_complete_statement_with_unclosed_quotes", "tests/test_cmd2.py::test_clipboard_failure", "tests/test_cmd2.py::test_commandresult_truthy", "tests/test_cmd2.py::test_commandresult_falsy", "tests/test_cmd2.py::test_is_text_file_bad_input", "tests/test_cmd2.py::test_eof", "tests/test_cmd2.py::test_eos", "tests/test_cmd2.py::test_echo", "tests/test_cmd2.py::test_pseudo_raw_input_tty_rawinput_true", "tests/test_cmd2.py::test_pseudo_raw_input_tty_rawinput_false", "tests/test_cmd2.py::test_pseudo_raw_input_piped_rawinput_true_echo_true", "tests/test_cmd2.py::test_pseudo_raw_input_piped_rawinput_true_echo_false", "tests/test_cmd2.py::test_pseudo_raw_input_piped_rawinput_false_echo_true", "tests/test_cmd2.py::test_pseudo_raw_input_piped_rawinput_false_echo_false", "tests/test_cmd2.py::test_raw_input", "tests/test_cmd2.py::test_stdin_input", "tests/test_cmd2.py::test_empty_stdin_input", "tests/test_cmd2.py::test_poutput_string", "tests/test_cmd2.py::test_poutput_zero", "tests/test_cmd2.py::test_poutput_empty_string", "tests/test_cmd2.py::test_poutput_none", "tests/test_cmd2.py::test_alias", "tests/test_cmd2.py::test_alias_with_quotes", "tests/test_cmd2.py::test_alias_lookup_invalid_alias", "tests/test_cmd2.py::test_unalias", "tests/test_cmd2.py::test_unalias_all", "tests/test_cmd2.py::test_unalias_non_existing", "tests/test_cmd2.py::test_create_invalid_alias[\">\"]", "tests/test_cmd2.py::test_create_invalid_alias[\"no>pe\"]", "tests/test_cmd2.py::test_create_invalid_alias[\"no", "tests/test_cmd2.py::test_create_invalid_alias[\"nopipe|\"]", "tests/test_cmd2.py::test_create_invalid_alias[\"noterm;\"]", "tests/test_cmd2.py::test_create_invalid_alias[noembedded\"quotes]", "tests/test_cmd2.py::test_complete_unalias", "tests/test_cmd2.py::test_multiple_aliases", "tests/test_cmd2.py::test_ppaged", "tests/test_cmd2.py::test_parseline_empty", "tests/test_cmd2.py::test_parseline", "tests/test_cmd2.py::test_readline_remove_history_item", "tests/test_cmd2.py::test_onecmd_raw_str_continue", "tests/test_cmd2.py::test_onecmd_raw_str_quit", "tests/test_cmd2.py::test_existing_history_file", "tests/test_cmd2.py::test_new_history_file", "tests/test_cmd2.py::test_bad_history_file_path", "tests/test_cmd2.py::test_get_all_commands", "tests/test_cmd2.py::test_get_help_topics", "tests/test_cmd2.py::test_exit_code_default", "tests/test_cmd2.py::test_exit_code_nonzero" ]
[]
MIT License
3,064
[ "cmd2/cmd2.py", "examples/plumbum_colors.py", "CHANGELOG.md", "examples/python_scripting.py", "examples/transcripts/transcript_regex.txt", "cmd2/constants.py", "examples/transcripts/exampleSession.txt", "examples/pirate.py", "examples/colors.py", "docs/settingchanges.rst", "docs/unfreefeatures.rst" ]
[ "cmd2/cmd2.py", "examples/plumbum_colors.py", "CHANGELOG.md", "examples/python_scripting.py", "examples/transcripts/transcript_regex.txt", "cmd2/constants.py", "examples/transcripts/exampleSession.txt", "examples/pirate.py", "examples/colors.py", "docs/settingchanges.rst", "docs/unfreefeatures.rst" ]
BrandwatchLtd__api_sdk-62
0570363bdee1621b85f8290599e0c86c75bcf802
2018-09-13 12:45:58
0570363bdee1621b85f8290599e0c86c75bcf802
diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index b33d109..0000000 --- a/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: python - -matrix: - include: - - python: 3.3 - - python: 3.4 - - python: 3.5 - - python: 3.6 - - python: 3.7 - dist: xenial - sudo: true - - python: 3.7-dev - dist: xenial - sudo: true - - python: 3.8-dev - dist: xenial - sudo: true - -install: - - python setup.py install - -script: - - python setup.py test - diff --git a/README.md b/README.md index 7084960..15c6827 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,3 @@ -[![Build Status](https://travis-ci.com/BrandwatchLtd/api_sdk.svg?branch=master)](https://travis-ci.com/BrandwatchLtd/api_sdk) - # Brandwatch API SDK ## Introduction diff --git a/bwproject.py b/bwproject.py index c879cae..3a41d39 100644 --- a/bwproject.py +++ b/bwproject.py @@ -59,7 +59,7 @@ class BWUser: if "username" in user: if username is None: return user["username"], token - elif user["username"] == username: + elif user["username"].lower() == username.lower(): return username, token else: raise KeyError("Username " + username + " does not match provided token", user) @@ -84,8 +84,8 @@ class BWUser: def _read_auth(self, username, token_path): user_tokens = self._read_auth_file(token_path) - if username in user_tokens: - return self._test_auth(username, user_tokens[username]) + if username.lower() in user_tokens: + return self._test_auth(username, user_tokens[username.lower()]) else: raise KeyError("Token not found in file: " + token_path) diff --git a/setup.py b/setup.py index 9c1d705..56c9ab2 100644 --- a/setup.py +++ b/setup.py @@ -23,15 +23,16 @@ setup( 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7' + 'Programming Language :: Python :: 3.5' ], py_modules=['bwproject', 'bwresources', 'bwdata', 'filters'], - install_requires=['requests'] + install_requires=['requests'], + + tests_require=['responses'] )
username with capital letters I came across an issue when I use the token_path instead of the password to request my project: BWProject(username=YOUR_ACCOUNT, project=YOUR_PROJECT) When at the beginning I used my username and password, the tokens.txt was generated with the username in lowercase (_read_auth_file). On the other hand, my username contains capital letters, meaning my user´s json will be returned with these capital letters in the username field, so the _test_auth() method will fail in bwproject.py. I can think of ways to get around it by making it case sensitive, but I would appreciate some suggestions. Many thanks in advance!
BrandwatchLtd/api_sdk
diff --git a/test/__init__.py b/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/test/test_bwproject.py b/test/test_bwproject.py new file mode 100644 index 0000000..4820262 --- /dev/null +++ b/test/test_bwproject.py @@ -0,0 +1,69 @@ +import unittest +import responses +import os +import tempfile + +from bwproject import BWProject + + +class TestBWProjectUsernameCaseSensitivity(unittest.TestCase): + + USERNAME = "[email protected]" + ACCESS_TOKEN = "00000000-0000-0000-0000-000000000000" + PROJECT_NAME = "Example project" + PROJECTS = [ + { + "id": 0, + "name": PROJECT_NAME, + "description": "", + "billableClientId": 0, + "billableClientName": "My company", + "timezone": "Africa/Abidjan", + "billableClientIsPitch": False + } + ] + + def setUp(self): + self.token_path = tempfile.NamedTemporaryFile(suffix='-tokens.txt').name + + responses.add(responses.GET, 'https://api.brandwatch.com/projects', + json={ + "resultsTotal": len(self.PROJECTS), + "resultsPage": -1, + "resultsPageSize": -1, + "results": self.PROJECTS + }, status=200) + + responses.add(responses.POST, 'https://api.brandwatch.com/oauth/token', + json={'access_token': self.ACCESS_TOKEN}, status=200) + + def tearDown(self): + os.unlink(self.token_path) + responses.reset() + + + @responses.activate + def test_lowercase_username(self): + self._test_username("[email protected]") + + @responses.activate + def test_uppercase_username(self): + self._test_username("[email protected]") + + @responses.activate + def test_mixedcase_username(self): + self._test_username("[email protected]") + + def _test_username(self, username): + + responses.add(responses.GET, 'https://api.brandwatch.com/me', json={"username": username}, status=200) + + BWProject(username=username, project=self.PROJECT_NAME, password="", token_path=self.token_path) + try: + BWProject(username=username, project=self.PROJECT_NAME, token_path=self.token_path) + except KeyError as e: + self.fail(e) + + +if __name__ == '__main__': + unittest.main()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_removed_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 3 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "responses" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/BrandwatchLtd/api_sdk.git@0570363bdee1621b85f8290599e0c86c75bcf802#egg=bwapi certifi==2025.1.31 charset-normalizer==3.4.1 exceptiongroup==1.2.2 idna==3.10 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 PyYAML==6.0.2 requests==2.32.3 responses==0.25.7 tomli==2.2.1 urllib3==2.3.0
name: api_sdk channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - exceptiongroup==1.2.2 - idna==3.10 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pyyaml==6.0.2 - requests==2.32.3 - responses==0.25.7 - tomli==2.2.1 - urllib3==2.3.0 prefix: /opt/conda/envs/api_sdk
[ "test/test_bwproject.py::TestBWProjectUsernameCaseSensitivity::test_mixedcase_username", "test/test_bwproject.py::TestBWProjectUsernameCaseSensitivity::test_uppercase_username" ]
[]
[ "test/test_bwproject.py::TestBWProjectUsernameCaseSensitivity::test_lowercase_username" ]
[]
MIT License
3,065
[ "setup.py", ".travis.yml", "README.md", "bwproject.py" ]
[ "setup.py", ".travis.yml", "README.md", "bwproject.py" ]
marshmallow-code__marshmallow-950
b0ebaf6f13f9833ccc6b19900208b211597480e9
2018-09-13 16:03:59
54e1605604aaf647ee4b03340284b348341eff62
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index aff06634..d30adf1c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,14 @@ Changelog --------- +2.15.5 ++++++++++++++++++++ + +Bug fixes: + +- Handle empty SQAlchemy lazy lists gracefully when dumping (:issue:`948`). + Thanks :user:`vke-code` for the catch and :user:`YuriHeupa` for the patch. + 2.15.4 (2018-08-04) +++++++++++++++++++ diff --git a/marshmallow/schema.py b/marshmallow/schema.py index 90e84bff..1d2a71db 100644 --- a/marshmallow/schema.py +++ b/marshmallow/schema.py @@ -819,7 +819,7 @@ class BaseSchema(base.SchemaABC): else: obj_prototype = next(iter(obj)) except (StopIteration, IndexError): # Nothing to serialize - return self.declared_fields + return dict((k, v) for k, v in self.declared_fields.items() if k in field_names) obj = obj_prototype ret = self.dict_class() for key in field_names:
No attribute '_add_to_schema' when dumping Schema and Nested Field is None Hi Marshmallow Code! I have recently discovered and began implementing your code for my own projects and I must say you have changed how I work with databases forever! I have been running up against a wall with this issue however, and decided to reach out to the pros! It could be that I am missing something simple. For context, I am storing hosts in a database. 'Hosts' have a one-to-many relationship with 'Ports'. I have developed a HostSchema and PortSchema using ModelSchema from marshmallow-sqlalchemy. This works BEAUTIFULLY, except when a host does not have any ports associated with it. Attached below is example code which demonstrates the expected behavior as well as the error. Please excuse me if this isn't an actual bug and just user error, although some insight would be very much appreciated. ### Truncated Pip Freeze - Flask==1.0.2 - flask-marshmallow==0.9.0 - Flask-SQLAlchemy==2.3.2 - marshmallow==2.15.4 - marshmallow-sqlalchemy==0.14.1 - SQLAlchemy==1.2.11 ### Minimal Example ```py from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_marshmallow import Marshmallow from marshmallow import fields, post_dump, pre_dump import json app = Flask(__name__) db = SQLAlchemy(app) ma = Marshmallow(app) class Host(db.Model): id = db.Column(db.Integer, primary_key=True) ip = db.Column(db.String(16), index=True, unique=True, nullable=False) ports = db.relationship('Port', backref='host', lazy='dynamic') class Port(db.Model): id = db.Column(db.Integer, primary_key=True) host_id = db.Column(db.Integer, db.ForeignKey('host.id'), index=True) port = db.Column(db.Integer) transport = db.Column(db.String(8)) service = db.Column(db.String(16)) class PortSchema(ma.ModelSchema): class Meta: model = Port exclude = ('id' , 'host') sqla_session = db.Session class HostSchema(ma.ModelSchema): ports = fields.Nested(PortSchema, many=True) class Meta: model = Host sqla_session = db.Session ``` ### Behavior when 'ports' is defined in 'host' object. This works as expected. ```py host = Host(ip='127.0.0.1') port = Port(port=80, transport='tcp', service='http') host.ports.append(port) result = HostSchema().dump(host) print json.dumps(result.data, indent=2) # { # "ip": "127.0.0.1", # "ports": [ # { # "transport": "tcp", # "service": "http", # "port": 80 # } # ], # "id": null # } ``` ### Unexpected behavior when 'ports' is undefined in 'host' object. From what I've gathered from the documentation the default behavior is to skip any fields which are missing during serialization. I have also tried using `missing` and `default`, but if I understand correctly these only apply for deserialization. ```py host = Host(ip='127.0.0.1') result = HostSchema().dump(host) ``` ### Expected Behavior ```py { "ip": "127.0.0.1", "ports": {}, "id": null } ``` ### Actual result: ```py Traceback (most recent call last): File "scratch/missing-field-nested-bug.py", line 72, in <module> result = HostSchema().dump(host) File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/schema.py", line 509, in dump **kwargs File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/marshalling.py", line 138, in serialize index=(index if index_errors else None) File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/marshalling.py", line 62, in call_and_store value = getter_func(data) File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/marshalling.py", line 132, in <lambda> getter = lambda d: field_obj.serialize(attr_name, d, accessor=accessor) File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/fields.py", line 252, in serialize return self._serialize(value, attr, obj) File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/fields.py", line 447, in _serialize schema._update_fields(obj=nested_obj, many=self.many) File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/schema.py", line 767, in _update_fields self.__set_field_attrs(ret) File "/Users/kai/.virtualenvs/SPEZZI/lib/python2.7/site-packages/marshmallow/schema.py", line 788, in __set_field_attrs field_obj._add_to_schema(field_name, self) AttributeError: 'NoneType' object has no attribute '_add_to_schema' ``` Thank you all for any help or insights you can offer! Please let me know if further clarification is required.
marshmallow-code/marshmallow
diff --git a/tests/test_schema.py b/tests/test_schema.py index 5a6fd959..7f0e8cb8 100755 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -694,6 +694,36 @@ def test_error_raised_if_additional_option_is_not_list(): additional = 'email' +def test_nested_custom_set_in_exclude_reusing_schema(): + + class CustomSet(object): + # This custom set is to allow the obj check in BaseSchema.__filter_fields + # to pass, since it'll be a valid instance, and this class overrides + # getitem method to allow the hasattr check to pass too, which will try + # to access the first obj index and will simulate a IndexError throwing. + # e.g. SqlAlchemy.Query is a valid use case for this "obj". + + def __getitem__(self, item): + return [][item] + + class ChildSchema(Schema): + foo = fields.Field(required=True) + bar = fields.Field() + + class Meta: + only = ('bar', ) + + class ParentSchema(Schema): + child = fields.Nested(ChildSchema, many=True, exclude=('foo',)) + + sch = ParentSchema(strict=True) + obj = dict(child=CustomSet()) + sch.dumps(obj) + data = dict(child=[{'bar': 1}]) + result = sch.load(data, partial=True) + assert not result.errors + + def test_nested_only(): class ChildSchema(Schema): foo = fields.Field()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 2 }
2.15
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "dev-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==5.5.2 chardet==5.2.0 colorama==0.4.6 coverage==7.8.0 distlib==0.3.9 exceptiongroup==1.2.2 execnet==2.1.1 filelock==3.18.0 flake8==2.4.1 iniconfig==2.1.0 invoke==0.21.0 -e git+https://github.com/marshmallow-code/marshmallow.git@b0ebaf6f13f9833ccc6b19900208b211597480e9#egg=marshmallow mccabe==0.3.1 packaging==24.2 pep8==1.7.1 platformdirs==4.3.7 pluggy==1.5.0 py==1.11.0 pyflakes==0.8.1 pyproject-api==1.9.0 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 python-dateutil==2.9.0.post0 pytz==2025.2 simplejson==3.20.1 six==1.17.0 tomli==2.2.1 tox==4.25.0 typing_extensions==4.13.0 virtualenv==20.29.3
name: marshmallow channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==5.5.2 - chardet==5.2.0 - colorama==0.4.6 - coverage==7.8.0 - distlib==0.3.9 - exceptiongroup==1.2.2 - execnet==2.1.1 - filelock==3.18.0 - flake8==2.4.1 - iniconfig==2.1.0 - invoke==0.21.0 - mccabe==0.3.1 - packaging==24.2 - pep8==1.7.1 - platformdirs==4.3.7 - pluggy==1.5.0 - py==1.11.0 - pyflakes==0.8.1 - pyproject-api==1.9.0 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - simplejson==3.20.1 - six==1.17.0 - tomli==2.2.1 - tox==4.25.0 - typing-extensions==4.13.0 - virtualenv==20.29.3 prefix: /opt/conda/envs/marshmallow
[ "tests/test_schema.py::test_nested_custom_set_in_exclude_reusing_schema" ]
[]
[ "tests/test_schema.py::test_serializing_basic_object[UserSchema]", "tests/test_schema.py::test_serializing_basic_object[UserMetaSchema]", "tests/test_schema.py::test_serializer_dump", "tests/test_schema.py::test_dump_returns_dict_of_errors", "tests/test_schema.py::test_dump_with_strict_mode_raises_error[UserSchema]", "tests/test_schema.py::test_dump_with_strict_mode_raises_error[UserMetaSchema]", "tests/test_schema.py::test_dump_resets_errors", "tests/test_schema.py::test_load_resets_errors", "tests/test_schema.py::test_dump_resets_error_fields", "tests/test_schema.py::test_load_resets_error_fields", "tests/test_schema.py::test_load_resets_error_kwargs", "tests/test_schema.py::test_errored_fields_do_not_appear_in_output", "tests/test_schema.py::test_load_many_stores_error_indices", "tests/test_schema.py::test_dump_many", "tests/test_schema.py::test_multiple_errors_can_be_stored_for_a_given_index", "tests/test_schema.py::test_dump_many_stores_error_indices", "tests/test_schema.py::test_dump_many_doesnt_stores_error_indices_when_index_errors_is_false", "tests/test_schema.py::test_dump_returns_a_marshalresult", "tests/test_schema.py::test_dumps_returns_a_marshalresult", "tests/test_schema.py::test_dumping_single_object_with_collection_schema", "tests/test_schema.py::test_loading_single_object_with_collection_schema", "tests/test_schema.py::test_dumps_many", "tests/test_schema.py::test_load_returns_an_unmarshalresult", "tests/test_schema.py::test_load_many", "tests/test_schema.py::test_loads_returns_an_unmarshalresult", "tests/test_schema.py::test_loads_many", "tests/test_schema.py::test_loads_deserializes_from_json", "tests/test_schema.py::test_serializing_none", "tests/test_schema.py::test_default_many_symmetry", "tests/test_schema.py::test_on_bind_field_hook", "tests/test_schema.py::test_nested_on_bind_field_hook", "tests/test_schema.py::TestValidate::test_validate_returns_errors_dict", "tests/test_schema.py::TestValidate::test_validate_many", "tests/test_schema.py::TestValidate::test_validate_many_doesnt_store_index_if_index_errors_option_is_false", "tests/test_schema.py::TestValidate::test_validate_strict", "tests/test_schema.py::TestValidate::test_validate_required", "tests/test_schema.py::test_fields_are_not_copies[UserSchema]", "tests/test_schema.py::test_fields_are_not_copies[UserMetaSchema]", "tests/test_schema.py::test_dumps_returns_json", "tests/test_schema.py::test_naive_datetime_field", "tests/test_schema.py::test_datetime_formatted_field", "tests/test_schema.py::test_datetime_iso_field", "tests/test_schema.py::test_tz_datetime_field", "tests/test_schema.py::test_local_datetime_field", "tests/test_schema.py::test_class_variable", "tests/test_schema.py::test_serialize_many[UserSchema]", "tests/test_schema.py::test_serialize_many[UserMetaSchema]", "tests/test_schema.py::test_inheriting_schema", "tests/test_schema.py::test_custom_field", "tests/test_schema.py::test_url_field", "tests/test_schema.py::test_relative_url_field", "tests/test_schema.py::test_stores_invalid_url_error[UserSchema]", "tests/test_schema.py::test_stores_invalid_url_error[UserMetaSchema]", "tests/test_schema.py::test_email_field[UserSchema]", "tests/test_schema.py::test_email_field[UserMetaSchema]", "tests/test_schema.py::test_stored_invalid_email", "tests/test_schema.py::test_integer_field", "tests/test_schema.py::test_as_string", "tests/test_schema.py::test_extra", "tests/test_schema.py::test_extra_many", "tests/test_schema.py::test_method_field[UserSchema]", "tests/test_schema.py::test_method_field[UserMetaSchema]", "tests/test_schema.py::test_function_field", "tests/test_schema.py::test_prefix[UserSchema]", "tests/test_schema.py::test_prefix[UserMetaSchema]", "tests/test_schema.py::test_fields_must_be_declared_as_instances", "tests/test_schema.py::test_serializing_generator[UserSchema]", "tests/test_schema.py::test_serializing_generator[UserMetaSchema]", "tests/test_schema.py::test_serializing_empty_list_returns_empty_list", "tests/test_schema.py::test_serializing_dict", "tests/test_schema.py::test_serializing_dict_with_meta_fields", "tests/test_schema.py::test_exclude_in_init[UserSchema]", "tests/test_schema.py::test_exclude_in_init[UserMetaSchema]", "tests/test_schema.py::test_only_in_init[UserSchema]", "tests/test_schema.py::test_only_in_init[UserMetaSchema]", "tests/test_schema.py::test_invalid_only_param", "tests/test_schema.py::test_can_serialize_uuid", "tests/test_schema.py::test_can_serialize_time", "tests/test_schema.py::test_invalid_time", "tests/test_schema.py::test_invalid_date", "tests/test_schema.py::test_invalid_email", "tests/test_schema.py::test_invalid_url", "tests/test_schema.py::test_invalid_dict_but_okay", "tests/test_schema.py::test_custom_json", "tests/test_schema.py::test_custom_error_message", "tests/test_schema.py::test_load_errors_with_many", "tests/test_schema.py::test_error_raised_if_fields_option_is_not_list", "tests/test_schema.py::test_error_raised_if_additional_option_is_not_list", "tests/test_schema.py::test_nested_only", "tests/test_schema.py::test_nested_only_inheritance", "tests/test_schema.py::test_nested_only_empty_inheritance", "tests/test_schema.py::test_nested_exclude", "tests/test_schema.py::test_nested_exclude_inheritance", "tests/test_schema.py::test_nested_only_and_exclude", "tests/test_schema.py::test_nested_only_then_exclude_inheritance", "tests/test_schema.py::test_nested_exclude_then_only_inheritance", "tests/test_schema.py::test_nested_exclude_and_only_inheritance", "tests/test_schema.py::test_meta_nested_exclude", "tests/test_schema.py::test_nested_custom_set_not_implementing_getitem", "tests/test_schema.py::test_deeply_nested_only_and_exclude", "tests/test_schema.py::TestDeeplyNestedLoadOnly::test_load_only", "tests/test_schema.py::TestDeeplyNestedLoadOnly::test_dump_only", "tests/test_schema.py::TestDeeplyNestedListLoadOnly::test_load_only", "tests/test_schema.py::TestDeeplyNestedListLoadOnly::test_dump_only", "tests/test_schema.py::test_nested_constructor_only_and_exclude", "tests/test_schema.py::test_only_and_exclude", "tests/test_schema.py::test_exclude_invalid_attribute", "tests/test_schema.py::test_only_with_invalid_attribute", "tests/test_schema.py::test_only_bounded_by_fields", "tests/test_schema.py::test_only_empty", "tests/test_schema.py::test_nested_with_sets", "tests/test_schema.py::test_meta_serializer_fields", "tests/test_schema.py::test_meta_fields_mapping", "tests/test_schema.py::test_meta_field_not_on_obj_raises_attribute_error", "tests/test_schema.py::test_exclude_fields", "tests/test_schema.py::test_fields_option_must_be_list_or_tuple", "tests/test_schema.py::test_exclude_option_must_be_list_or_tuple", "tests/test_schema.py::test_dateformat_option", "tests/test_schema.py::test_default_dateformat", "tests/test_schema.py::test_inherit_meta", "tests/test_schema.py::test_inherit_meta_override", "tests/test_schema.py::test_additional", "tests/test_schema.py::test_cant_set_both_additional_and_fields", "tests/test_schema.py::test_serializing_none_meta", "tests/test_schema.py::TestErrorHandler::test_error_handler_decorator_is_deprecated", "tests/test_schema.py::TestErrorHandler::test_dump_with_custom_error_handler", "tests/test_schema.py::TestErrorHandler::test_dump_with_custom_error_handler_and_strict", "tests/test_schema.py::TestErrorHandler::test_load_with_custom_error_handler", "tests/test_schema.py::TestErrorHandler::test_load_with_custom_error_handler_and_partially_valid_data", "tests/test_schema.py::TestErrorHandler::test_custom_error_handler_with_validates_decorator", "tests/test_schema.py::TestErrorHandler::test_custom_error_handler_with_validates_schema_decorator", "tests/test_schema.py::TestErrorHandler::test_validate_with_custom_error_handler", "tests/test_schema.py::TestFieldValidation::test_errors_are_cleared_after_loading_collection", "tests/test_schema.py::TestFieldValidation::test_raises_error_with_list", "tests/test_schema.py::TestFieldValidation::test_raises_error_with_dict", "tests/test_schema.py::TestFieldValidation::test_ignored_if_not_in_only", "tests/test_schema.py::test_schema_repr", "tests/test_schema.py::TestNestedSchema::test_flat_nested", "tests/test_schema.py::TestNestedSchema::test_flat_nested_with_dump_to", "tests/test_schema.py::TestNestedSchema::test_nested_many_with_missing_attribute", "tests/test_schema.py::TestNestedSchema::test_nested_with_attribute_none", "tests/test_schema.py::TestNestedSchema::test_flat_nested2", "tests/test_schema.py::TestNestedSchema::test_nested_field_does_not_validate_required", "tests/test_schema.py::TestNestedSchema::test_nested_required_errors_with_load_from", "tests/test_schema.py::TestNestedSchema::test_nested_none", "tests/test_schema.py::TestNestedSchema::test_nested", "tests/test_schema.py::TestNestedSchema::test_nested_many_fields", "tests/test_schema.py::TestNestedSchema::test_nested_meta_many", "tests/test_schema.py::TestNestedSchema::test_nested_only", "tests/test_schema.py::TestNestedSchema::test_exclude", "tests/test_schema.py::TestNestedSchema::test_list_field", "tests/test_schema.py::TestNestedSchema::test_nested_load_many", "tests/test_schema.py::TestNestedSchema::test_nested_errors", "tests/test_schema.py::TestNestedSchema::test_nested_strict", "tests/test_schema.py::TestNestedSchema::test_nested_dump_errors", "tests/test_schema.py::TestNestedSchema::test_nested_dump_strict", "tests/test_schema.py::TestNestedSchema::test_nested_method_field", "tests/test_schema.py::TestNestedSchema::test_nested_function_field", "tests/test_schema.py::TestNestedSchema::test_nested_prefixed_field", "tests/test_schema.py::TestNestedSchema::test_nested_prefixed_many_field", "tests/test_schema.py::TestNestedSchema::test_invalid_float_field", "tests/test_schema.py::TestNestedSchema::test_serializer_meta_with_nested_fields", "tests/test_schema.py::TestNestedSchema::test_serializer_with_nested_meta_fields", "tests/test_schema.py::TestNestedSchema::test_nested_fields_must_be_passed_a_serializer", "tests/test_schema.py::TestNestedSchema::test_invalid_type_passed_to_nested_field", "tests/test_schema.py::TestNestedSchema::test_all_errors_on_many_nested_field_with_validates_decorator", "tests/test_schema.py::TestNestedSchema::test_missing_required_nested_field", "tests/test_schema.py::TestSelfReference::test_nesting_schema_within_itself", "tests/test_schema.py::TestSelfReference::test_nesting_schema_by_passing_class_name", "tests/test_schema.py::TestSelfReference::test_nesting_within_itself_meta", "tests/test_schema.py::TestSelfReference::test_recursive_missing_required_field", "tests/test_schema.py::TestSelfReference::test_recursive_missing_required_field_one_level_in", "tests/test_schema.py::TestSelfReference::test_nested_self_with_only_param", "tests/test_schema.py::TestSelfReference::test_multiple_nested_self_fields", "tests/test_schema.py::TestSelfReference::test_nested_many", "tests/test_schema.py::test_serialization_with_required_field", "tests/test_schema.py::test_deserialization_with_required_field", "tests/test_schema.py::test_deserialization_with_required_field_and_custom_validator", "tests/test_schema.py::TestContext::test_context_method", "tests/test_schema.py::TestContext::test_context_method_function", "tests/test_schema.py::TestContext::test_function_field_raises_error_when_context_not_available", "tests/test_schema.py::TestContext::test_fields_context", "tests/test_schema.py::TestContext::test_nested_fields_inherit_context", "tests/test_schema.py::TestContext::test_nested_list_fields_inherit_context", "tests/test_schema.py::test_serializer_can_specify_nested_object_as_attribute", "tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_schema_subclass", "tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_non_schema_subclass", "tests/test_schema.py::TestFieldInheritance::test_inheritance_follows_mro", "tests/test_schema.py::TestAccessor::test_accessor_decorator_is_deprecated", "tests/test_schema.py::TestAccessor::test_accessor_is_used", "tests/test_schema.py::TestAccessor::test_accessor_with_many", "tests/test_schema.py::TestRequiredFields::test_required_string_field_missing", "tests/test_schema.py::TestRequiredFields::test_required_string_field_failure", "tests/test_schema.py::TestRequiredFields::test_allow_none_param", "tests/test_schema.py::TestRequiredFields::test_allow_none_custom_message", "tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_dump_output", "tests/test_schema.py::TestDefaults::test_none_is_serialized_to_none", "tests/test_schema.py::TestDefaults::test_default_and_value_missing", "tests/test_schema.py::TestDefaults::test_loading_none", "tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_load_output", "tests/test_schema.py::TestLoadOnly::test_load_only", "tests/test_schema.py::TestLoadOnly::test_dump_only", "tests/test_schema.py::TestStrictDefault::test_default", "tests/test_schema.py::TestStrictDefault::test_meta_true", "tests/test_schema.py::TestStrictDefault::test_meta_false", "tests/test_schema.py::TestStrictDefault::test_default_init_true", "tests/test_schema.py::TestStrictDefault::test_default_init_false", "tests/test_schema.py::TestStrictDefault::test_meta_true_init_true", "tests/test_schema.py::TestStrictDefault::test_meta_true_init_false", "tests/test_schema.py::TestStrictDefault::test_meta_false_init_true", "tests/test_schema.py::TestStrictDefault::test_meta_false_init_false" ]
[]
MIT License
3,066
[ "CHANGELOG.rst", "marshmallow/schema.py" ]
[ "CHANGELOG.rst", "marshmallow/schema.py" ]
EmilStenstrom__conllu-19
1604bada91968725102b626da9537f361fc3178d
2018-09-13 19:40:05
e9c8f2c6ab390c706680c6077d344cecffb71397
EmilStenstrom: This looks great! The coverage and flake8 tests seem to fail on Travis. Do you get the same errors locally? Summary: * There's a test missing for "models.py line 30->exit" * There's a coverage error for "conllu/models.py:24:16: E126 continuation line over-indented for hanging indent" Mind fixing those two? Thanks again! bryant1410: That's weird, tests were passing locally on Python 2.7 and 3.6 and coverage was 100%. I will check it out later. On Thu, Sep 13, 2018, 3:46 PM Emil Stenström <[email protected]> wrote: > This looks great! The coverage and flake8 tests seem to fail on Travis. Do > you get the same errors locally? > > Summary: > > - There's a test missing for "models.py line 30->exit" > - There's a coverage error for "conllu/models.py:24:16: E126 > continuation line over-indented for hanging indent" > > Mind fixing those two? Thanks again! > > — > You are receiving this because you authored the thread. > Reply to this email directly, view it on GitHub > <https://github.com/EmilStenstrom/conllu/pull/19#issuecomment-421129685>, > or mute the thread > <https://github.com/notifications/unsubscribe-auth/ADuX3cIVLa4OnW6z7LYsyAqk75RvA4sPks5uarXxgaJpZM4WoFFX> > . > EmilStenstrom: This looks fantastic. I'll merge and release a new version to PyPI!
diff --git a/conllu/models.py b/conllu/models.py index d57a49a..cd98ae4 100644 --- a/conllu/models.py +++ b/conllu/models.py @@ -5,25 +5,53 @@ from conllu.parser import ParseException, head_to_token, serialize DEFAULT_EXCLUDE_FIELDS = ('id', 'deprel', 'xpostag', 'feats', 'head', 'deps', 'misc') -class TokenList(object): - tokens = None + +class TokenList(list): metadata = None def __init__(self, tokens, metadata=None): + super(TokenList, self).__init__(tokens) if not isinstance(tokens, list): raise ParseException("Can't create TokenList, tokens is not a list.") - self.tokens = tokens self.metadata = metadata - def __getitem__(self, key): - return self.tokens[key] - def __repr__(self): - return 'TokenList<' + ', '.join([token['form'] for token in self.tokens]) + '>' + return 'TokenList<' + ', '.join(token['form'] for token in self) + '>' def __eq__(self, other): - return self.tokens == other.tokens and self.metadata == other.metadata + return super(TokenList, self).__eq__(other) \ + and (not hasattr(other, 'metadata') or self.metadata == other.metadata) + + def __ne__(self, other): + return not self == other + + def clear(self): + self[:] = [] # Supported in Python 2 and 3, unlike clear() + self.metadata = None + + def copy(self): + tokens_copy = self[:] # Supported in Python 2 and 3, unlike copy() + return TokenList(tokens_copy, self.metadata) + + def extend(self, iterable): + super(TokenList, self).extend(iterable) + if hasattr(iterable, 'metadata'): + if hasattr(self.metadata, '__add__') and hasattr(iterable.metadata, '__add__'): + self.metadata += iterable.metadata + elif type(self.metadata) is dict and type(iterable.metadata) is dict: + # noinspection PyUnresolvedReferences + self.metadata.update(iterable.metadata) + else: + self.metadata = [self.metadata, iterable.metadata] + + @property + def tokens(self): + return self + + @tokens.setter + def tokens(self, value): + self[:] = value # Supported in Python 2 and 3, unlike clear() def serialize(self): return serialize(self) @@ -39,6 +67,7 @@ class TokenList(object): root.set_metadata(self.metadata) return root + class TokenTree(object): token = None children = None
TokenList could inherit "list" Hey. I have found this library quite useful. There is a suggestion that I ran into: `TokenList` could maybe inherit `list`, and so be treated like that. I tried manipulating the sentences (TokenLists) as if they were lists, such as using `len` but then I saw they weren't. I thought about adding `__len__`, but I think it's more appropriate to make them lists, so they natively support other things such as adding them, comparing them, and so on. What do you think guys?
EmilStenstrom/conllu
diff --git a/tests/test_models.py b/tests/test_models.py index 02d377a..0e5a182 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -26,6 +26,55 @@ class TestTokenList(unittest.TestCase): tokenlist2.metadata = metadata self.assertEqual(tokenlist1, tokenlist2) + def test_len(self): + tokenlist = TokenList([{"id": 1}, {"id": 2}, {"id": 3}]) + self.assertEqual(3, len(tokenlist)) + + def test_clear(self): + tokenlist = TokenList([{"id": 1}, {"id": 2}, {"id": 3}], {"meta": "data"}) + tokenlist.clear() + self.assertEqual(len(tokenlist.tokens), 0) + self.assertEqual(tokenlist.metadata, None) + + def test_copy(self): + tokenlist1 = TokenList([{"id": 1}, {"id": 2}, {"id": 3}], {"meta": "data"}) + tokenlist2 = tokenlist1.copy() + self.assertIsNot(tokenlist1, tokenlist2) + self.assertEqual(tokenlist1, tokenlist2) + + def test_extend(self): + tokenlist1 = TokenList([{"id": 1}, {"id": 2}, {"id": 3}]) + tokenlist2 = [{"id": 4}, {"id": 5}, {"id": 6}] + tokenlist1.extend(tokenlist2) + tokenlist3 = TokenList([{"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}, {"id": 6}]) + self.assertEqual(tokenlist1, tokenlist3) + + tokenlist4 = TokenList([{"id": 1}, {"id": 2}, {"id": 3}], {"meta1": "data1"}) + tokenlist5 = TokenList([{"id": 4}, {"id": 5}, {"id": 6}], {"meta2": "data2"}) + tokenlist4.extend(tokenlist5) + tokenlist6 = TokenList([{"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}, {"id": 6}], + {"meta1": "data1", "meta2": "data2"}) + self.assertEqual(tokenlist4, tokenlist6) + + tokenlist7 = TokenList([{"id": 1}, {"id": 2}, {"id": 3}], "abc") + tokenlist8 = TokenList([{"id": 4}, {"id": 5}, {"id": 6}], "de") + tokenlist7.extend(tokenlist8) + tokenlist9 = TokenList([{"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}, {"id": 6}], "abcde") + self.assertEqual(tokenlist7, tokenlist9) + + tokenlist7 = TokenList([{"id": 1}, {"id": 2}, {"id": 3}], "abc") + tokenlist8 = TokenList([{"id": 4}, {"id": 5}, {"id": 6}], {"meta2": "data2"}) + tokenlist7.extend(tokenlist8) + tokenlist9 = TokenList([{"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}, {"id": 6}], + ["abc", {"meta2": "data2"}]) + self.assertEqual(tokenlist7, tokenlist9) + + def test_tokens(self): + tokenlist = TokenList([{"id": 1}, {"id": 2}, {"id": 3}]) + self.assertEqual(tokenlist.tokens, [{"id": 1}, {"id": 2}, {"id": 3}]) + tokenlist.tokens = [{"id": 4}, {"id": 5}] + self.assertEqual(tokenlist.tokens, [{"id": 4}, {"id": 5}]) + def test_to_tree(self): tokenlist = TokenList([ OrderedDict([("id", 2), ("form", "dog"), ("head", 0)]), @@ -40,11 +89,13 @@ class TestTokenList(unittest.TestCase): ) self.assertEqual(tokenlist.to_tree(), tree) + class TestSerialize(unittest.TestCase): def test_serialize_on_tokenlist(self): tokenlist = TokenList([{"id": 1}]) self.assertEqual(tokenlist.serialize(), serialize(tokenlist)) + class TestTokenTree(unittest.TestCase): def test_eq(self): metadata = {"meta": "data"} @@ -67,6 +118,7 @@ class TestTokenTree(unittest.TestCase): tree.set_metadata(metadata) self.assertEqual(tree.metadata, metadata) + class TestSerializeTree(unittest.TestCase): def test_missing_id(self): tree = TokenTree(token={"form": "hej"}, children=[]) @@ -105,6 +157,7 @@ class TestSerializeTree(unittest.TestCase): """) ) + class TestPrintTree(unittest.TestCase): def test_print_empty_list(self): tree = TokenTree(None, [])
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "coverage", "flake8", "isort", "pytest" ], "pre_install": [], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 -e git+https://github.com/EmilStenstrom/conllu.git@1604bada91968725102b626da9537f361fc3178d#egg=conllu coverage==6.2 flake8==5.0.4 importlib-metadata==4.2.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work isort==5.10.1 mccabe==0.7.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pycodestyle==2.9.1 pyflakes==2.5.0 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: conllu channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==6.2 - flake8==5.0.4 - importlib-metadata==4.2.0 - isort==5.10.1 - mccabe==0.7.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 prefix: /opt/conda/envs/conllu
[ "tests/test_models.py::TestTokenList::test_clear", "tests/test_models.py::TestTokenList::test_copy", "tests/test_models.py::TestTokenList::test_extend", "tests/test_models.py::TestTokenList::test_len" ]
[]
[ "tests/test_models.py::TestTokenList::test_constructor", "tests/test_models.py::TestTokenList::test_eq", "tests/test_models.py::TestTokenList::test_to_tree", "tests/test_models.py::TestTokenList::test_tokens", "tests/test_models.py::TestSerialize::test_serialize_on_tokenlist", "tests/test_models.py::TestTokenTree::test_eq", "tests/test_models.py::TestTokenTree::test_metadata", "tests/test_models.py::TestSerializeTree::test_flatten", "tests/test_models.py::TestSerializeTree::test_missing_id", "tests/test_models.py::TestPrintTree::test_print_empty_list", "tests/test_models.py::TestPrintTree::test_print_simple", "tests/test_models.py::TestPrintTree::test_print_with_children", "tests/test_models.py::TestPrintTree::test_tree_without_deprel", "tests/test_models.py::TestPrintTree::test_tree_without_id" ]
[]
MIT License
3,067
[ "conllu/models.py" ]
[ "conllu/models.py" ]
squaresLab__Houston-81
50127bd1c4e3bbdc577c7e2aaf300980e8241b0c
2018-09-13 20:59:09
50127bd1c4e3bbdc577c7e2aaf300980e8241b0c
coveralls: [![Coverage Status](https://coveralls.io/builds/18988873/badge)](https://coveralls.io/builds/18988873) Coverage increased (+0.04%) to 44.354% when pulling **7fda6f4500aa23fad0d89a64ddbdb36510c23bee on state-equiv** into **50127bd1c4e3bbdc577c7e2aaf300980e8241b0c on master**.
diff --git a/houston/state.py b/houston/state.py index 5fdefb2..18b1fce 100644 --- a/houston/state.py +++ b/houston/state.py @@ -211,12 +211,20 @@ class State(object, metaclass=StateMeta): def time_offset(self) -> float: return self.__time_offset - def __eq__(self, other: 'State') -> bool: + def equiv(self, other: 'State') -> bool: if type(self) != type(other): msg = "illegal comparison of states: [{}] vs. [{}]" msg = msg.format(self.__class__.__name__, state.__class__.__name__) raise Exception(msg) # FIXME use HoustonException - return self.__dict__ == other.__dict__ + for v in self.__class__.variables: + if self.__dict__[v._field] != other.__dict__[v._field]: + return False + return True + + def exact(self, other: 'State') -> bool: + return self.equiv(other) and self.time_offset == other.time_offset + + __eq__ = exact def __getitem__(self, name: str) -> Any: # FIXME use frozendict
Implement State.equivalent or alter semantics of equality Implement `State.equiv` and `State.exact`.
squaresLab/Houston
diff --git a/test/test_state.py b/test/test_state.py index 0711712..007f014 100644 --- a/test/test_state.py +++ b/test/test_state.py @@ -66,6 +66,16 @@ def test_eq(): assert S(foo=1, bar=2, time_offset=0.0) == Y(foo=1, bar=2, time_offset=0.0) +def test_equiv(): + class S(State): + foo = var(int, lambda c: 0) + bar = var(int, lambda c: 0) + + assert S(foo=1, bar=1, time_offset=0.0).equiv(S(foo=1, bar=1, time_offset=0.0)) + assert S(foo=1, bar=1, time_offset=0.0).equiv(S(foo=1, bar=1, time_offset=1.0)) + assert not S(foo=0, bar=1, time_offset=0.0).equiv(S(foo=1, bar=1, time_offset=0.0)) + + def test_to_and_from_json(): class S(State): foo = var(int, lambda c: 0)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==19.3.0 bugzoo==2.1.33 cement==3.0.8 certifi==2021.5.30 chardet==5.0.0 charset-normalizer==2.0.12 click==8.0.4 dataclasses==0.8 Deprecated==1.2.18 docker==5.0.3 dockerpty==0.4.1 dronekit==2.9.2 Flask==2.0.3 future==1.0.0 geographiclib==1.52 geopy==2.2.0 gitdb==4.0.9 GitPython==3.1.18 -e git+https://github.com/squaresLab/Houston.git@50127bd1c4e3bbdc577c7e2aaf300980e8241b0c#egg=houston idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work itsdangerous==2.0.1 Jinja2==3.0.3 lxml==5.3.1 MarkupSafe==2.0.1 monotonic==1.6 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work mypy-extensions==1.0.0 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pexpect==4.9.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work psutil==7.0.0 ptyprocess==0.7.0 py @ file:///opt/conda/conda-bld/py_1644396412707/work pymavlink==2.4.43 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 PyYAML==6.0.1 requests==2.27.1 six==1.17.0 smmap==5.0.0 tabulate==0.8.10 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 websocket-client==1.3.1 Werkzeug==2.0.3 wrapt==1.16.0 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: Houston channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==19.3.0 - bugzoo==2.1.33 - cement==3.0.8 - chardet==5.0.0 - charset-normalizer==2.0.12 - click==8.0.4 - dataclasses==0.8 - deprecated==1.2.18 - docker==5.0.3 - dockerpty==0.4.1 - dronekit==2.9.2 - flask==2.0.3 - future==1.0.0 - geographiclib==1.52 - geopy==2.2.0 - gitdb==4.0.9 - gitpython==3.1.18 - idna==3.10 - itsdangerous==2.0.1 - jinja2==3.0.3 - lxml==5.3.1 - markupsafe==2.0.1 - monotonic==1.6 - mypy-extensions==1.0.0 - pexpect==4.9.0 - psutil==7.0.0 - ptyprocess==0.7.0 - pymavlink==2.4.43 - pyyaml==6.0.1 - requests==2.27.1 - six==1.17.0 - smmap==5.0.0 - tabulate==0.8.10 - urllib3==1.26.20 - websocket-client==1.3.1 - werkzeug==2.0.3 - wrapt==1.16.0 prefix: /opt/conda/envs/Houston
[ "test/test_state.py::test_equiv" ]
[ "test/test_state.py::test_constructor", "test/test_state.py::test_is_frozen", "test/test_state.py::test_eq" ]
[ "test/test_state.py::test_variable_construction", "test/test_state.py::test_to_and_from_json" ]
[]
MIT License
3,068
[ "houston/state.py" ]
[ "houston/state.py" ]
conan-io__conan-3554
9a9a0c4c84b89ce29fa9a5695ba544584d27dd69
2018-09-14 09:18:22
b02cce4e78d5982e00b66f80a683465b3c679033
diff --git a/conans/client/conan_api.py b/conans/client/conan_api.py index d96725350..96a7b5744 100644 --- a/conans/client/conan_api.py +++ b/conans/client/conan_api.py @@ -111,25 +111,32 @@ def _get_conanfile_path(path, cwd, py): """ param py= True: Must be .py, False: Must be .txt, None: Try .py, then .txt """ + candidate_paths = list() path = _make_abs_path(path, cwd) if os.path.isdir(path): # Can be a folder if py: path = os.path.join(path, "conanfile.py") + candidate_paths.append(path) elif py is False: path = os.path.join(path, "conanfile.txt") + candidate_paths.append(path) else: path_py = os.path.join(path, "conanfile.py") + candidate_paths.append(path_py) if os.path.exists(path_py): path = path_py else: path = os.path.join(path, "conanfile.txt") + candidate_paths.append(path) + else: + candidate_paths.append(path) if not os.path.isfile(path): # Must exist - raise ConanException("Conanfile not found: %s" % path) + raise ConanException("Conanfile not found at %s" % " or ".join(candidate_paths)) if py and not path.endswith(".py"): - raise ConanException("A conanfile.py is needed (not valid conanfile.txt)") + raise ConanException("A conanfile.py is needed, " + path + " is not acceptable") return path
Error message when no conanfile was found is confusing When no conanfile is found, a message such as ``` ERROR: Conanfile not found: D:\Foo\conanfile.txt ``` is displayed. There is no reference to a conanfile.py that was considered (and is also missing). To help us debug your issue please explain: - [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md). - [x] I've specified the Conan version, operating system version and any tool that can be relevant. - [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
conan-io/conan
diff --git a/conans/test/command/package_test.py b/conans/test/command/package_test.py index 2106dc342..7f00d26bc 100644 --- a/conans/test/command/package_test.py +++ b/conans/test/command/package_test.py @@ -1,11 +1,11 @@ +import os import unittest from conans import tools -from conans.test.utils.tools import TestClient -import os from conans.paths import CONANFILE -from conans.util.files import load, mkdir from conans.test.utils.test_files import temp_folder +from conans.test.utils.tools import TestClient +from conans.util.files import load, mkdir from parameterized import parameterized @@ -70,14 +70,16 @@ class PackageLocalCommandTest(unittest.TestCase): error = client.run("package conanfile.txt --build-folder build2 --install-folder build", ignore_error=True) self.assertTrue(error) - self.assertIn("A conanfile.py is needed (not valid conanfile.txt)", client.out) + self.assertIn( + "A conanfile.py is needed, %s is not acceptable" % os.path.join(client.current_folder, "conanfile.txt"), + client.out) # Path with wrong conanfile path error = client.run("package not_real_dir/conanfile.py --build-folder build2 --install-folder build", ignore_error=True) self.assertTrue(error) - self.assertIn("Conanfile not found: %s" % os.path.join(client.current_folder, "not_real_dir", - "conanfile.py"), client.out) + self.assertIn("Conanfile not found at %s" % os.path.join(client.current_folder, "not_real_dir", + "conanfile.py"), client.out) def package_with_reference_errors_test(self): client = TestClient() diff --git a/conans/test/command/source_test.py b/conans/test/command/source_test.py index 9fdba512b..a0e98dbac 100644 --- a/conans/test/command/source_test.py +++ b/conans/test/command/source_test.py @@ -93,14 +93,16 @@ class ConanLib(ConanFile): # Path with conanfile.txt error = client.run("source conanfile.txt --install-folder subdir", ignore_error=True) self.assertTrue(error) - self.assertIn("A conanfile.py is needed (not valid conanfile.txt)", client.out) + self.assertIn( + "A conanfile.py is needed, %s is not acceptable" % os.path.join(client.current_folder, "conanfile.txt"), + client.out) # Path with wrong conanfile path error = client.run("package not_real_dir/conanfile.py --build-folder build2 --install-folder build", ignore_error=True) self.assertTrue(error) - self.assertIn("Conanfile not found: %s" % os.path.join(client.current_folder, "not_real_dir", - "conanfile.py"), client.out) + self.assertIn("Conanfile not found at %s" % os.path.join(client.current_folder, "not_real_dir", + "conanfile.py"), client.out) def source_local_cwd_test(self): conanfile = ''' diff --git a/conans/test/command/test_package_test.py b/conans/test/command/test_package_test.py index ce09f0238..050e78d4a 100644 --- a/conans/test/command/test_package_test.py +++ b/conans/test/command/test_package_test.py @@ -109,14 +109,16 @@ class ConanLib(ConanFile): # Path with conanfile.txt error = client.run("test conanfile.txt other/0.2@user2/channel2", ignore_error=True) self.assertTrue(error) - self.assertIn("A conanfile.py is needed (not valid conanfile.txt)", client.out) + self.assertIn( + "A conanfile.py is needed, %s is not acceptable" % os.path.join(client.current_folder, "conanfile.txt"), + client.out) # Path with wrong conanfile path error = client.run("test not_real_dir/conanfile.py other/0.2@user2/channel2", ignore_error=True) self.assertTrue(error) - self.assertIn("Conanfile not found: %s" % os.path.join(client.current_folder, "not_real_dir", - "conanfile.py"), client.out) + self.assertIn("Conanfile not found at %s" % os.path.join(client.current_folder, "not_real_dir", + "conanfile.py"), client.out) def build_folder_handling_test(self): test_conanfile = '''
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
1.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "conans/requirements.txt", "conans/requirements_server.txt", "conans/requirements_dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==2.11.7 attrs==22.2.0 beautifulsoup4==4.12.3 bottle==0.12.25 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 colorama==0.3.9 -e git+https://github.com/conan-io/conan.git@9a9a0c4c84b89ce29fa9a5695ba544584d27dd69#egg=conan coverage==4.2 deprecation==2.0.7 dill==0.3.4 distro==1.1.0 fasteners==0.19 future==0.16.0 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 isort==5.10.1 lazy-object-proxy==1.7.1 mccabe==0.7.0 mock==1.3.0 node-semver==0.2.0 nose==1.3.7 packaging==21.3 parameterized==0.8.1 patch==1.16 pbr==6.1.1 platformdirs==2.4.0 pluggy==1.0.0 pluginbase==0.7 py==1.11.0 Pygments==2.14.0 PyJWT==1.7.1 pylint==2.13.9 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 PyYAML==3.13 requests==2.27.1 six==1.17.0 soupsieve==2.3.2.post1 tomli==1.2.3 typed-ast==1.5.5 typing_extensions==4.1.1 urllib3==1.26.20 waitress==2.0.0 WebOb==1.8.9 WebTest==2.0.35 wrapt==1.16.0 zipp==3.6.0
name: conan channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==2.11.7 - attrs==22.2.0 - beautifulsoup4==4.12.3 - bottle==0.12.25 - charset-normalizer==2.0.12 - codecov==2.1.13 - colorama==0.3.9 - coverage==4.2 - deprecation==2.0.7 - dill==0.3.4 - distro==1.1.0 - fasteners==0.19 - future==0.16.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isort==5.10.1 - lazy-object-proxy==1.7.1 - mccabe==0.7.0 - mock==1.3.0 - node-semver==0.2.0 - nose==1.3.7 - packaging==21.3 - parameterized==0.8.1 - patch==1.16 - pbr==6.1.1 - platformdirs==2.4.0 - pluggy==1.0.0 - pluginbase==0.7 - py==1.11.0 - pygments==2.14.0 - pyjwt==1.7.1 - pylint==2.13.9 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pyyaml==3.13 - requests==2.27.1 - six==1.17.0 - soupsieve==2.3.2.post1 - tomli==1.2.3 - typed-ast==1.5.5 - typing-extensions==4.1.1 - urllib3==1.26.20 - waitress==2.0.0 - webob==1.8.9 - webtest==2.0.35 - wrapt==1.16.0 - zipp==3.6.0 prefix: /opt/conda/envs/conan
[ "conans/test/command/test_package_test.py::TestPackageTest::test_with_path_errors_test" ]
[ "conans/test/command/test_package_test.py::TestPackageTest::test_only_test" ]
[]
[]
MIT License
3,069
[ "conans/client/conan_api.py" ]
[ "conans/client/conan_api.py" ]
pypa__twine-396
4504db8b12cff0a289d939b9e369b390b9085fde
2018-09-14 16:09:18
c977b44cf87e066125e9de496429f8b3f5c90bf4
theacodes: @jaraco this LGTM, but it's failing on `python setup.py check -r`.
diff --git a/README.rst b/README.rst index 2d3bec5..4893582 100644 --- a/README.rst +++ b/README.rst @@ -134,10 +134,24 @@ The next time you run ``twine`` it will prompt you for a username and will grab .. _`Using Keyring on headless systems`: https://keyring.readthedocs.io/en/latest/#using-keyring-on-headless-linux-systems +Disabling Keyring +^^^^^^^^^^^^^^^^^ + +In some cases, the presence of keyring may be problemmatic. To disable +keyring and defer to a prompt for passwords, uninstall ``keyring`` +or if that's not an option, you can also configure keyring to be disabled. + +See `twine 338 <https://github.com/pypa/twine/issues/338>`_ for a +discussion on ways to do that. Options ------- +``twine upload`` +^^^^^^^^^^^^^^^^ + +Uploads one or more distributions to a repository. + .. code-block:: console $ twine upload -h @@ -191,16 +205,29 @@ Options containing the private key and the certificate in PEM format. -Twine also includes a ``register`` command. +``twine check`` +^^^^^^^^^^^^^^^ + +Checks whether your distributions long description will render correctly on PyPI. + +.. code-block:: console + + $ twine check -h + usage: twine check [-h] dist [dist ...] + + positional arguments: + dist The distribution files to check, usually dist/* + + optional arguments: + -h, --help show this help message and exit + +``twine register`` +^^^^^^^^^^^^^^^^^^ -.. WARNING:: - ``register`` is `no longer necessary if you are - uploading to pypi.org - <https://packaging.python.org/guides/migrating-to-pypi-org/#registering-package-names-metadata>`_. As - such, it is `no longer supported - <https://github.com/pypa/warehouse/issues/1627>`_ in `Warehouse`_ - (the new PyPI software running on pypi.org). However, you may need - this if you are using a different package index. +**WARNING**: The ``register`` command is `no longer necessary if you are uploading to +pypi.org`_. As such, it is `no longer supported`_ in `Warehouse`_ (the new +PyPI software running on pypi.org). However, you may need this if you are using +a different package index. For completeness, its usage: @@ -300,3 +327,5 @@ trackers, chat rooms, and mailing lists is expected to follow the .. _`PyPA Code of Conduct`: https://www.pypa.io/en/latest/code-of-conduct/ .. _`Warehouse`: https://github.com/pypa/warehouse .. _`wheels`: https://packaging.python.org/glossary/#term-wheel +.. _`no longer necessary if you are uploading to pypi.org`: https://packaging.python.org/guides/migrating-to-pypi-org/#registering-package-names-metadata +.. _`no longer supported`: https://github.com/pypa/warehouse/issues/1627 diff --git a/docs/changelog.rst b/docs/changelog.rst index 8100cfd..6e9fc0a 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -4,6 +4,7 @@ Changelog ========= +* :feature:`395 major` Add ``twine check`` command to check long description * :feature:`392 major` Drop support for Python 3.3 * :release:`1.11.0 <2018-03-19>` * :bug:`269 major` Avoid uploading to PyPI when given alternate diff --git a/setup.cfg b/setup.cfg index 2102387..925c448 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,4 +1,4 @@ -[wheel] +[bdist_wheel] universal = 1 [check-manifest] @@ -9,12 +9,12 @@ ignore = .github/* [metadata] +license_file = LICENSE requires-dist = tqdm >= 4.14 requests >= 2.5.0, != 2.15, != 2.16 requests-toolbelt >= 0.8.0 pkginfo >= 1.4.2 setuptools >= 0.7.0 - argparse; python_version == '2.6' pyblake2; extra == 'with-blake2' and python_version < '3.6' keyring; extra == 'keyring' diff --git a/setup.py b/setup.py index bda1c1c..b96202a 100644 --- a/setup.py +++ b/setup.py @@ -18,20 +18,6 @@ import sys import twine -install_requires = [ - "tqdm >= 4.14", - "pkginfo >= 1.4.2", - "requests >= 2.5.0, != 2.15, != 2.16", - "requests-toolbelt >= 0.8.0", - "setuptools >= 0.7.0", -] - -if sys.version_info[:2] < (2, 7): - install_requires += [ - "argparse", - ] - - blake2_requires = [] if sys.version_info[:2] < (3, 6): @@ -81,6 +67,7 @@ setup( entry_points={ "twine.registered_commands": [ + "check = twine.commands.check:main", "upload = twine.commands.upload:main", "register = twine.commands.register:main", ], @@ -89,7 +76,14 @@ setup( ], }, - install_requires=install_requires, + install_requires=[ + "pkginfo >= 1.4.2", + "readme_renderer >= 21.0", + "requests >= 2.5.0, != 2.15, != 2.16", + "requests-toolbelt >= 0.8.0", + "setuptools >= 0.7.0", + "tqdm >= 4.14", + ], extras_require={ 'with-blake2': blake2_requires, 'keyring': [ diff --git a/tox.ini b/tox.ini index 26b3d49..af3174d 100644 --- a/tox.ini +++ b/tox.ini @@ -7,19 +7,23 @@ deps = pretend pytest py27,py34,py35: pyblake2 + readme_renderer commands = coverage run --source twine -m pytest {posargs:tests} coverage report -m [testenv:docs] basepython = python3.6 -deps = -rdocs/requirements.txt +deps = + . + -rdocs/requirements.txt commands = sphinx-build -W -b html -d {envtmpdir}/doctrees docs docs/_build/html sphinx-build -W -b doctest -d {envtmpdir}/doctrees docs docs/_build/html doc8 docs sphinx-build -W -b linkcheck -d {envtmpdir}/doctrees docs docs/_build/linkcheck - python setup.py check -r -s + python setup.py sdist + twine check dist/* [testenv:release] deps = @@ -32,12 +36,13 @@ commands = [testenv:lint] basepython = python3.6 deps = + . flake8 check-manifest - readme_renderer mypy commands = flake8 twine/ tests/ check-manifest -v - python setup.py check -r -s + python setup.py sdist + twine check dist/* -mypy -s twine/ tests/ diff --git a/twine/__main__.py b/twine/__main__.py index 9eed3aa..fbf276d 100644 --- a/twine/__main__.py +++ b/twine/__main__.py @@ -17,13 +17,16 @@ from __future__ import unicode_literals import sys +import requests + +from twine import exceptions from twine.cli import dispatch def main(): try: return dispatch(sys.argv[1:]) - except Exception as exc: + except (exceptions.TwineException, requests.exceptions.HTTPError) as exc: return '{0}: {1}'.format( exc.__class__.__name__, exc.args[0], diff --git a/twine/cli.py b/twine/cli.py index 68f568c..173ad51 100644 --- a/twine/cli.py +++ b/twine/cli.py @@ -72,4 +72,4 @@ def dispatch(argv): main = registered_commands[args.command].load() - main(args.args) + return main(args.args) diff --git a/twine/commands/__init__.py b/twine/commands/__init__.py index 3048666..1bb61ae 100644 --- a/twine/commands/__init__.py +++ b/twine/commands/__init__.py @@ -13,3 +13,38 @@ # limitations under the License. from __future__ import absolute_import, division, print_function from __future__ import unicode_literals + +import glob +import os.path + +from twine import exceptions + +__all__ = [] + + +def _group_wheel_files_first(files): + if not any(fname for fname in files if fname.endswith(".whl")): + # Return early if there's no wheel files + return files + + files.sort(key=lambda x: -1 if x.endswith(".whl") else 0) + + return files + + +def _find_dists(dists): + uploads = [] + for filename in dists: + if os.path.exists(filename): + uploads.append(filename) + continue + # The filename didn't exist so it may be a glob + files = glob.glob(filename) + # If nothing matches, files is [] + if not files: + raise exceptions.InvalidDistribution( + "Cannot find file (or expand pattern): '%s'" % filename + ) + # Otherwise, files will be filenames that exist + uploads.extend(files) + return _group_wheel_files_first(uploads) diff --git a/twine/commands/check.py b/twine/commands/check.py new file mode 100644 index 0000000..2037761 --- /dev/null +++ b/twine/commands/check.py @@ -0,0 +1,126 @@ +# Copyright 2018 Dustin Ingram +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function +from __future__ import unicode_literals + +import argparse +import cgi +import re +import sys + +try: + from StringIO import StringIO +except ImportError: + from _io import StringIO + +import readme_renderer.markdown +import readme_renderer.rst +import readme_renderer.txt + +from twine.commands import _find_dists +from twine.package import PackageFile + +_RENDERERS = { + None: readme_renderer.rst, # Default if description_content_type is None + "": readme_renderer.rst, # Default if description_content_type is None + "text/plain": readme_renderer.txt, + "text/x-rst": readme_renderer.rst, + "text/markdown": readme_renderer.markdown, +} + + +# Regular expression used to capture and reformat doctuils warnings into +# something that a human can understand. This is loosely borrowed from +# Sphinx: https://github.com/sphinx-doc/sphinx/blob +# /c35eb6fade7a3b4a6de4183d1dd4196f04a5edaf/sphinx/util/docutils.py#L199 +_REPORT_RE = re.compile( + r"^<string>:(?P<line>(?:\d+)?): " + r"\((?P<level>DEBUG|INFO|WARNING|ERROR|SEVERE)/(\d+)?\) " + r"(?P<message>.*)", + re.DOTALL | re.MULTILINE, +) + + +class _WarningStream(object): + def __init__(self): + self.output = StringIO() + + def write(self, text): + matched = _REPORT_RE.search(text) + + if not matched: + self.output.write(text) + return + + self.output.write( + "line {line}: {level_text}: {message}\n".format( + level_text=matched.group("level").capitalize(), + line=matched.group("line"), + message=matched.group("message").rstrip("\r\n"), + ) + ) + + def __str__(self): + return self.output.getvalue() + + +def check(dists, output_stream=sys.stdout): + uploads = [i for i in _find_dists(dists) if not i.endswith(".asc")] + stream = _WarningStream() + failure = False + + for filename in uploads: + output_stream.write("Checking distribution %s: " % filename) + package = PackageFile.from_filename(filename, comment=None) + + metadata = package.metadata_dictionary() + content_type, parameters = cgi.parse_header( + metadata.get("description_content_type") or "" + ) + + # Get the appropriate renderer + renderer = _RENDERERS.get(content_type, readme_renderer.txt) + + # Actually render the given value + rendered = renderer.render( + metadata.get("description"), stream=stream, **parameters + ) + + if rendered is None: + failure = True + output_stream.write("Failed\n") + output_stream.write( + "The project's long_description has invalid markup which will " + "not be rendered on PyPI. The following syntax errors were " + "detected:\n%s" % stream + ) + else: + output_stream.write("Passed\n") + + return failure + + +def main(args): + parser = argparse.ArgumentParser(prog="twine check") + parser.add_argument( + "dists", + nargs="+", + metavar="dist", + help="The distribution files to check, usually dist/*", + ) + + args = parser.parse_args(args) + + # Call the check function with the arguments from the command line + return check(args.dists) diff --git a/twine/commands/register.py b/twine/commands/register.py index 30400f8..11e551a 100644 --- a/twine/commands/register.py +++ b/twine/commands/register.py @@ -16,8 +16,8 @@ from __future__ import absolute_import, unicode_literals, print_function import argparse import os.path -from twine import exceptions as exc from twine.package import PackageFile +from twine import exceptions from twine import settings @@ -28,7 +28,7 @@ def register(register_settings, package): repository = register_settings.create_repository() if not os.path.exists(package): - raise exc.PackageNotFound( + raise exceptions.PackageNotFound( '"{0}" does not exist on the file system.'.format(package) ) @@ -38,7 +38,7 @@ def register(register_settings, package): repository.close() if resp.is_redirect: - raise exc.RedirectDetected( + raise exceptions.RedirectDetected( ('"{0}" attempted to redirect to "{1}" during registration.' ' Aborting...').format(repository_url, resp.headers["location"])) diff --git a/twine/commands/upload.py b/twine/commands/upload.py index 96efc81..01eea0f 100644 --- a/twine/commands/upload.py +++ b/twine/commands/upload.py @@ -15,64 +15,40 @@ from __future__ import absolute_import, division, print_function from __future__ import unicode_literals import argparse -import glob import os.path -import sys -import twine.exceptions as exc +from twine.commands import _find_dists from twine.package import PackageFile +from twine import exceptions from twine import settings from twine import utils -def group_wheel_files_first(files): - if not any(fname for fname in files if fname.endswith(".whl")): - # Return early if there's no wheel files - return files - - files.sort(key=lambda x: -1 if x.endswith(".whl") else 0) - - return files - - -def find_dists(dists): - uploads = [] - for filename in dists: - if os.path.exists(filename): - uploads.append(filename) - continue - # The filename didn't exist so it may be a glob - files = glob.glob(filename) - # If nothing matches, files is [] - if not files: - raise ValueError( - "Cannot find file (or expand pattern): '%s'" % filename - ) - # Otherwise, files will be filenames that exist - uploads.extend(files) - return group_wheel_files_first(uploads) - - def skip_upload(response, skip_existing, package): filename = package.basefilename # NOTE(sigmavirus24): Old PyPI returns the first message while Warehouse # returns the latter. This papers over the differences. - msg = ('A file named "{0}" already exists for'.format(filename), - 'File already exists') + msg_400 = ('A file named "{0}" already exists for'.format(filename), + 'File already exists') + msg_403 = 'Not enough permissions to overwrite artifact' # NOTE(sigmavirus24): PyPI presently returns a 400 status code with the # error message in the reason attribute. Other implementations return a - # 409 status code. We only want to skip an upload if: + # 409 or 403 status code. We only want to skip an upload if: # 1. The user has told us to skip existing packages (skip_existing is # True) AND # 2. a) The response status code is 409 OR # 2. b) The response status code is 400 AND it has a reason that matches - # what we expect PyPI to return to us. + # what we expect PyPI to return to us. OR + # 2. c) The response status code is 403 AND the text matches what we + # expect Artifactory to return to us. return (skip_existing and (response.status_code == 409 or - (response.status_code == 400 and response.reason.startswith(msg)))) + (response.status_code == 400 and + response.reason.startswith(msg_400)) or + (response.status_code == 403 and msg_403 in response.text))) def upload(upload_settings, dists): - dists = find_dists(dists) + dists = _find_dists(dists) # Determine if the user has passed in pre-signed distributions signatures = dict( @@ -114,7 +90,7 @@ def upload(upload_settings, dists): # by PyPI should never happen in reality. This should catch malicious # redirects as well. if resp.is_redirect: - raise exc.RedirectDetected( + raise exceptions.RedirectDetected( ('"{0}" attempted to redirect to "{1}" during upload.' ' Aborting...').format(repository_url, resp.headers["location"])) @@ -128,6 +104,8 @@ def upload(upload_settings, dists): # pool. repository.close() + return True + def main(args): parser = argparse.ArgumentParser(prog="twine upload") @@ -146,8 +124,4 @@ def main(args): upload_settings = settings.Settings.from_argparse(args) # Call the upload function with the arguments from the command line - upload(upload_settings, args.dists) - - -if __name__ == "__main__": - sys.exit(main()) + return upload(upload_settings, args.dists) diff --git a/twine/exceptions.py b/twine/exceptions.py index d227db6..9e35ea0 100644 --- a/twine/exceptions.py +++ b/twine/exceptions.py @@ -73,3 +73,15 @@ class InvalidSigningConfiguration(TwineException): """Both the sign and identity parameters must be present.""" pass + + +class InvalidConfiguration(TwineException): + """Raised when configuration is invalid.""" + + pass + + +class InvalidDistribution(TwineException): + """Raised when a distribution is invalid.""" + + pass diff --git a/twine/package.py b/twine/package.py index 4f07361..c9e5197 100644 --- a/twine/package.py +++ b/twine/package.py @@ -31,6 +31,7 @@ except ImportError: from twine.wheel import Wheel from twine.wininst import WinInst +from twine import exceptions DIST_TYPES = { "bdist_wheel": Wheel, @@ -78,7 +79,7 @@ class PackageFile(object): meta = DIST_TYPES[dtype](filename) break else: - raise ValueError( + raise exceptions.InvalidDistribution( "Unknown distribution format: '%s'" % os.path.basename(filename) ) @@ -151,7 +152,9 @@ class PackageFile(object): def add_gpg_signature(self, signature_filepath, signature_filename): if self.gpg_signature is not None: - raise ValueError('GPG Signature can only be added once') + raise exceptions.InvalidDistribution( + 'GPG Signature can only be added once' + ) with open(signature_filepath, "rb") as gpg: self.gpg_signature = (signature_filename, gpg.read()) diff --git a/twine/utils.py b/twine/utils.py index e00d145..83a1867 100644 --- a/twine/utils.py +++ b/twine/utils.py @@ -35,7 +35,7 @@ try: except ImportError: from urllib.parse import urlparse, urlunparse -import twine.exceptions +from twine import exceptions # Shim for raw_input in python3 if sys.version_info > (3,): @@ -109,7 +109,7 @@ def get_repository_from_config(config_file, repository, repository_url=None): "password": None, } if repository_url and "://" not in repository_url: - raise twine.exceptions.UnreachableRepositoryURLDetected( + raise exceptions.UnreachableRepositoryURLDetected( "Repository URL {0} has no protocol. Please add " "'https://'. \n".format(repository_url)) try: @@ -125,7 +125,7 @@ def get_repository_from_config(config_file, repository, repository_url=None): repo=repository, cfg=config_file ) - raise KeyError(msg) + raise exceptions.InvalidConfiguration(msg) _HOSTNAMES = set(["pypi.python.org", "testpypi.python.org", "upload.pypi.org", @@ -207,12 +207,11 @@ def password_prompt(prompt_text): # Always expects unicode for our own sanity def get_password_from_keyring(system, username): - try: - import keyring - except ImportError: + if 'keyring' not in sys.modules: return try: + import keyring return keyring.get_password(system, username) except Exception as exc: warnings.warn(str(exc)) diff --git a/twine/wheel.py b/twine/wheel.py index cf6155d..8a52d6e 100644 --- a/twine/wheel.py +++ b/twine/wheel.py @@ -26,6 +26,8 @@ except ImportError: from pkginfo import distribution from pkginfo.distribution import Distribution +from twine import exceptions + # Monkeypatch Metadata 2.0 support distribution.HEADER_ATTRS_2_0 = distribution.HEADER_ATTRS_1_2 distribution.HEADER_ATTRS.update({"2.0": distribution.HEADER_ATTRS_2_0}) @@ -69,7 +71,9 @@ class Wheel(Distribution): def read(self): fqn = os.path.abspath(os.path.normpath(self.filename)) if not os.path.exists(fqn): - raise ValueError('No such file: %s' % fqn) + raise exceptions.InvalidDistribution( + 'No such file: %s' % fqn + ) if fqn.endswith('.whl'): archive = zipfile.ZipFile(fqn) @@ -78,7 +82,9 @@ class Wheel(Distribution): def read_file(name): return archive.read(name) else: - raise ValueError('Not a known archive format: %s' % fqn) + raise exceptions.InvalidDistribution( + 'Not a known archive format: %s' % fqn + ) try: for path in self.find_candidate_metadata_files(names): @@ -89,7 +95,9 @@ class Wheel(Distribution): finally: archive.close() - raise ValueError('No METADATA in archive: %s' % fqn) + raise exceptions.InvalidDistribution( + 'No METADATA in archive: %s' % fqn + ) def parse(self, data): super(Wheel, self).parse(data) diff --git a/twine/wininst.py b/twine/wininst.py index 6951070..5e8932a 100644 --- a/twine/wininst.py +++ b/twine/wininst.py @@ -7,6 +7,8 @@ import zipfile from pkginfo.distribution import Distribution +from twine import exceptions + wininst_file_re = re.compile(r".*py(?P<pyver>\d+\.\d+)\.exe$") @@ -28,7 +30,9 @@ class WinInst(Distribution): def read(self): fqn = os.path.abspath(os.path.normpath(self.filename)) if not os.path.exists(fqn): - raise ValueError('No such file: %s' % fqn) + raise exceptions.InvalidDistribution( + 'No such file: %s' % fqn + ) if fqn.endswith('.exe'): archive = zipfile.ZipFile(fqn) @@ -37,7 +41,9 @@ class WinInst(Distribution): def read_file(name): return archive.read(name) else: - raise ValueError('Not a known archive format: %s' % fqn) + raise exceptions.InvalidDistribution( + 'Not a known archive format: %s' % fqn + ) try: tuples = [x.split('/') for x in names @@ -51,4 +57,6 @@ class WinInst(Distribution): finally: archive.close() - raise ValueError('No PKG-INFO/.egg-info in archive: %s' % fqn) + raise exceptions.InvalidDistribution( + 'No PKG-INFO/.egg-info in archive: %s' % fqn + )
How to disable keyring Twine [supports keyring](https://github.com/pypa/twine/issues/277), but it's not clear to me how to use twine without this support. Is this implemented or do we just miss documentation?
pypa/twine
diff --git a/tests/test_check.py b/tests/test_check.py new file mode 100644 index 0000000..58bfbdf --- /dev/null +++ b/tests/test_check.py @@ -0,0 +1,119 @@ +# Copyright 2018 Dustin Ingram +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import unicode_literals + +import pretend + +from twine.commands import check + + +def test_warningstream_write_match(): + stream = check._WarningStream() + stream.output = pretend.stub(write=pretend.call_recorder(lambda a: None)) + + stream.write("<string>:2: (WARNING/2) Title underline too short.") + + assert stream.output.write.calls == [ + pretend.call("line 2: Warning: Title underline too short.\n") + ] + + +def test_warningstream_write_nomatch(): + stream = check._WarningStream() + stream.output = pretend.stub(write=pretend.call_recorder(lambda a: None)) + + stream.write("this does not match") + + assert stream.output.write.calls == [pretend.call("this does not match")] + + +def test_warningstream_str(): + stream = check._WarningStream() + stream.output = pretend.stub(getvalue=lambda: "result") + + assert str(stream) == "result" + + +def test_check_no_distributions(monkeypatch): + stream = check.StringIO() + + monkeypatch.setattr(check, "_find_dists", lambda a: []) + + assert not check.check("dist/*", output_stream=stream) + assert stream.getvalue() == "" + + +def test_check_passing_distribution(monkeypatch): + renderer = pretend.stub( + render=pretend.call_recorder(lambda *a, **kw: "valid") + ) + package = pretend.stub(metadata_dictionary=lambda: {"description": "blah"}) + output_stream = check.StringIO() + warning_stream = "" + + monkeypatch.setattr(check, "_RENDERERS", {"": renderer}) + monkeypatch.setattr(check, "_find_dists", lambda a: ["dist/dist.tar.gz"]) + monkeypatch.setattr( + check, + "PackageFile", + pretend.stub(from_filename=lambda *a, **kw: package), + ) + monkeypatch.setattr(check, "_WarningStream", lambda: warning_stream) + + assert not check.check("dist/*", output_stream=output_stream) + assert ( + output_stream.getvalue() + == "Checking distribution dist/dist.tar.gz: Passed\n" + ) + assert renderer.render.calls == [ + pretend.call("blah", stream=warning_stream) + ] + + +def test_check_failing_distribution(monkeypatch): + renderer = pretend.stub( + render=pretend.call_recorder(lambda *a, **kw: None) + ) + package = pretend.stub(metadata_dictionary=lambda: {"description": "blah"}) + output_stream = check.StringIO() + warning_stream = "WARNING" + + monkeypatch.setattr(check, "_RENDERERS", {"": renderer}) + monkeypatch.setattr(check, "_find_dists", lambda a: ["dist/dist.tar.gz"]) + monkeypatch.setattr( + check, + "PackageFile", + pretend.stub(from_filename=lambda *a, **kw: package), + ) + monkeypatch.setattr(check, "_WarningStream", lambda: warning_stream) + + assert check.check("dist/*", output_stream=output_stream) + assert output_stream.getvalue() == ( + "Checking distribution dist/dist.tar.gz: Failed\n" + "The project's long_description has invalid markup which will not be " + "rendered on PyPI. The following syntax errors were detected:\n" + "WARNING" + ) + assert renderer.render.calls == [ + pretend.call("blah", stream=warning_stream) + ] + + +def test_main(monkeypatch): + check_result = pretend.stub() + check_stub = pretend.call_recorder(lambda a: check_result) + monkeypatch.setattr(check, "check", check_stub) + + assert check.main(["dist/*"]) == check_result + assert check_stub.calls == [pretend.call(["dist/*"])] diff --git a/tests/test_commands.py b/tests/test_commands.py new file mode 100644 index 0000000..af556af --- /dev/null +++ b/tests/test_commands.py @@ -0,0 +1,51 @@ +import os + +import pytest + +from twine.commands import _find_dists, _group_wheel_files_first +from twine import exceptions + + +def test_ensure_wheel_files_uploaded_first(): + files = _group_wheel_files_first( + ["twine/foo.py", "twine/first.whl", "twine/bar.py", "twine/second.whl"] + ) + expected = [ + "twine/first.whl", + "twine/second.whl", + "twine/foo.py", + "twine/bar.py", + ] + assert expected == files + + +def test_ensure_if_no_wheel_files(): + files = _group_wheel_files_first(["twine/foo.py", "twine/bar.py"]) + expected = ["twine/foo.py", "twine/bar.py"] + assert expected == files + + +def test_find_dists_expands_globs(): + files = sorted(_find_dists(["twine/__*.py"])) + expected = [ + os.path.join("twine", "__init__.py"), + os.path.join("twine", "__main__.py"), + ] + assert expected == files + + +def test_find_dists_errors_on_invalid_globs(): + with pytest.raises(exceptions.InvalidDistribution): + _find_dists(["twine/*.rb"]) + + +def test_find_dists_handles_real_files(): + expected = [ + "twine/__init__.py", + "twine/__main__.py", + "twine/cli.py", + "twine/utils.py", + "twine/wheel.py", + ] + files = _find_dists(expected) + assert expected == files diff --git a/tests/test_main.py b/tests/test_main.py index e7c6037..9c8d20c 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -11,11 +11,14 @@ # limitations under the License. from twine import __main__ as dunder_main +from twine import exceptions import pretend def test_exception_handling(monkeypatch): - replaced_dispatch = pretend.raiser(KeyError('foo')) + replaced_dispatch = pretend.raiser( + exceptions.InvalidConfiguration('foo') + ) monkeypatch.setattr(dunder_main, 'dispatch', replaced_dispatch) - assert dunder_main.main() == 'KeyError: foo' + assert dunder_main.main() == 'InvalidConfiguration: foo' diff --git a/tests/test_upload.py b/tests/test_upload.py index 3d32aa0..181f613 100644 --- a/tests/test_upload.py +++ b/tests/test_upload.py @@ -28,45 +28,6 @@ import helpers WHEEL_FIXTURE = 'tests/fixtures/twine-1.5.0-py2.py3-none-any.whl' -def test_ensure_wheel_files_uploaded_first(): - files = upload.group_wheel_files_first(["twine/foo.py", - "twine/first.whl", - "twine/bar.py", - "twine/second.whl"]) - expected = ["twine/first.whl", - "twine/second.whl", - "twine/foo.py", - "twine/bar.py"] - assert expected == files - - -def test_ensure_if_no_wheel_files(): - files = upload.group_wheel_files_first(["twine/foo.py", - "twine/bar.py"]) - expected = ["twine/foo.py", - "twine/bar.py"] - assert expected == files - - -def test_find_dists_expands_globs(): - files = sorted(upload.find_dists(['twine/__*.py'])) - expected = [os.path.join('twine', '__init__.py'), - os.path.join('twine', '__main__.py')] - assert expected == files - - -def test_find_dists_errors_on_invalid_globs(): - with pytest.raises(ValueError): - upload.find_dists(['twine/*.rb']) - - -def test_find_dists_handles_real_files(): - expected = ['twine/__init__.py', 'twine/__main__.py', 'twine/cli.py', - 'twine/utils.py', 'twine/wheel.py'] - files = upload.find_dists(expected) - assert expected == files - - def test_get_config_old_format(tmpdir): pypirc = os.path.join(str(tmpdir), ".pypirc") @@ -156,6 +117,21 @@ def test_skip_existing_skips_files_already_on_pypiserver(monkeypatch): package=pkg) is True +def test_skip_existing_skips_files_already_on_artifactory(monkeypatch): + # Artifactory (https://jfrog.com/artifactory/) responds with 403 + # when the file already exists. + response = pretend.stub( + status_code=403, + text="Not enough permissions to overwrite artifact " + "'pypi-local:twine/1.5.0/twine-1.5.0-py2.py3-none-any.whl'" + "(user 'twine-deployer' needs DELETE permission).") + + pkg = package.PackageFile.from_filename(WHEEL_FIXTURE, None) + assert upload.skip_upload(response=response, + skip_existing=True, + package=pkg) is True + + def test_skip_upload_respects_skip_existing(monkeypatch): response = pretend.stub( status_code=400, diff --git a/tests/test_utils.py b/tests/test_utils.py index 4d60e04..520b78f 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -18,11 +18,6 @@ import sys import os.path import textwrap -try: - import builtins -except ImportError: - import __builtin__ as builtins - import pytest from twine import utils @@ -239,13 +234,7 @@ def keyring_missing(monkeypatch): """ Simulate that 'import keyring' raises an ImportError """ - real_import = builtins.__import__ - - def my_import(name, *args, **kwargs): - if name == 'keyring': - raise ImportError - return real_import(name, *args, **kwargs) - monkeypatch.setattr(builtins, '__import__', my_import) + monkeypatch.delitem(sys.modules, 'keyring', raising=False) @pytest.fixture
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 15 }
1.11
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "coverage", "pretend", "pyblake2" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "docs/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 bleach==4.1.0 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 colorama==0.4.5 coverage==6.2 cryptography==40.0.2 distlib==0.3.9 doc8==0.11.2 docutils==0.18.1 filelock==3.4.1 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 jeepney==0.7.1 Jinja2==3.0.3 keyring==23.4.1 MarkupSafe==2.0.1 packaging==21.3 pbr==6.1.1 pkginfo==1.10.0 platformdirs==2.4.0 pluggy==1.0.0 pretend==1.0.9 py==1.11.0 pyblake2==1.1.2 pycparser==2.21 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytz==2025.2 readme-renderer==34.0 releases==2.1.1 requests==2.27.1 requests-toolbelt==1.0.0 restructuredtext-lint==1.4.0 rfc3986==1.5.0 SecretStorage==3.3.3 semantic-version==2.6.0 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 stevedore==3.5.2 toml==0.10.2 tomli==1.2.3 tox==3.28.0 tqdm==4.64.1 -e git+https://github.com/pypa/twine.git@4504db8b12cff0a289d939b9e369b390b9085fde#egg=twine typing_extensions==4.1.1 urllib3==1.26.20 virtualenv==20.17.1 webencodings==0.5.1 zipp==3.6.0
name: twine channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - bleach==4.1.0 - cffi==1.15.1 - charset-normalizer==2.0.12 - colorama==0.4.5 - coverage==6.2 - cryptography==40.0.2 - distlib==0.3.9 - doc8==0.11.2 - docutils==0.18.1 - filelock==3.4.1 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - jeepney==0.7.1 - jinja2==3.0.3 - keyring==23.4.1 - markupsafe==2.0.1 - packaging==21.3 - pbr==6.1.1 - pkginfo==1.10.0 - platformdirs==2.4.0 - pluggy==1.0.0 - pretend==1.0.9 - py==1.11.0 - pyblake2==1.1.2 - pycparser==2.21 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytz==2025.2 - readme-renderer==34.0 - releases==2.1.1 - requests==2.27.1 - requests-toolbelt==1.0.0 - restructuredtext-lint==1.4.0 - rfc3986==1.5.0 - secretstorage==3.3.3 - semantic-version==2.6.0 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - stevedore==3.5.2 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - tqdm==4.64.1 - typing-extensions==4.1.1 - urllib3==1.26.20 - virtualenv==20.17.1 - webencodings==0.5.1 - zipp==3.6.0 prefix: /opt/conda/envs/twine
[ "tests/test_check.py::test_warningstream_write_match", "tests/test_check.py::test_warningstream_write_nomatch", "tests/test_check.py::test_warningstream_str", "tests/test_check.py::test_check_no_distributions", "tests/test_check.py::test_check_passing_distribution", "tests/test_check.py::test_check_failing_distribution", "tests/test_check.py::test_main", "tests/test_commands.py::test_ensure_wheel_files_uploaded_first", "tests/test_commands.py::test_ensure_if_no_wheel_files", "tests/test_commands.py::test_find_dists_expands_globs", "tests/test_commands.py::test_find_dists_errors_on_invalid_globs", "tests/test_commands.py::test_find_dists_handles_real_files", "tests/test_main.py::test_exception_handling", "tests/test_upload.py::test_get_config_old_format", "tests/test_upload.py::test_deprecated_repo", "tests/test_upload.py::test_skip_existing_skips_files_already_on_PyPI", "tests/test_upload.py::test_skip_existing_skips_files_already_on_pypiserver", "tests/test_upload.py::test_skip_existing_skips_files_already_on_artifactory", "tests/test_upload.py::test_skip_upload_respects_skip_existing", "tests/test_upload.py::test_values_from_env", "tests/test_utils.py::test_get_config", "tests/test_utils.py::test_get_config_no_distutils", "tests/test_utils.py::test_get_config_no_section", "tests/test_utils.py::test_get_config_override_pypi_url", "tests/test_utils.py::test_get_config_missing", "tests/test_utils.py::test_get_repository_config_missing", "tests/test_utils.py::test_get_config_deprecated_pypirc", "tests/test_utils.py::test_get_userpass_value[cli-config0-key-<lambda>-cli]", "tests/test_utils.py::test_get_userpass_value[None-config1-key-<lambda>-value]", "tests/test_utils.py::test_get_userpass_value[None-config2-key-<lambda>-fallback]", "tests/test_utils.py::test_default_to_environment_action[MY_PASSWORD-None-environ0-None]", "tests/test_utils.py::test_default_to_environment_action[MY_PASSWORD-None-environ1-foo]", "tests/test_utils.py::test_default_to_environment_action[URL-https://example.org-environ2-https://example.org]", "tests/test_utils.py::test_default_to_environment_action[URL-https://example.org-environ3-https://pypi.org]", "tests/test_utils.py::test_get_password_keyring_overrides_prompt", "tests/test_utils.py::test_get_password_keyring_defers_to_prompt", "tests/test_utils.py::test_get_password_keyring_missing_prompts", "tests/test_utils.py::test_get_password_runtime_error_suppressed", "tests/test_utils.py::test_no_positional_on_method", "tests/test_utils.py::test_no_positional_on_function" ]
[]
[]
[]
Apache License 2.0
3,070
[ "README.rst", "twine/wininst.py", "twine/commands/upload.py", "setup.py", "twine/commands/register.py", "docs/changelog.rst", "twine/utils.py", "twine/wheel.py", "twine/cli.py", "tox.ini", "setup.cfg", "twine/exceptions.py", "twine/commands/__init__.py", "twine/__main__.py", "twine/package.py", "twine/commands/check.py" ]
[ "README.rst", "twine/wininst.py", "twine/commands/upload.py", "setup.py", "twine/commands/register.py", "docs/changelog.rst", "twine/utils.py", "twine/wheel.py", "twine/cli.py", "tox.ini", "setup.cfg", "twine/exceptions.py", "twine/commands/__init__.py", "twine/__main__.py", "twine/package.py", "twine/commands/check.py" ]
tox-dev__tox-980
7dd5448145ab5b291cee45184b0498355fd92808
2018-09-14 17:39:38
cf6afcecaca22df7b509facaea43c09a15570f75
diff --git a/changelog/921.feature.rst b/changelog/921.feature.rst new file mode 100644 index 00000000..7a061b95 --- /dev/null +++ b/changelog/921.feature.rst @@ -0,0 +1,1 @@ +keep additional environments config order when listing them - by :user:`gaborbernat` diff --git a/doc/config.rst b/doc/config.rst index dc89ca09..e56fe8de 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -189,7 +189,7 @@ Complete list of settings that you can put into ``testenv*`` sections: **default**:: - pip install {opts} {packages} + python -m pip install {opts} {packages} .. confval:: list_dependencies_command diff --git a/src/tox/config.py b/src/tox/config.py index e9f55c99..a7e62d52 100755 --- a/src/tox/config.py +++ b/src/tox/config.py @@ -10,6 +10,7 @@ import string import sys import uuid import warnings +from collections import OrderedDict from fnmatch import fnmatchcase from subprocess import list2cmdline @@ -783,7 +784,7 @@ class Config(object): """Global Tox config object.""" def __init__(self, pluginmanager, option, interpreters, parser): - self.envconfigs = {} + self.envconfigs = OrderedDict() """Mapping envname -> envconfig""" self.invocationcwd = py.path.local() self.interpreters = interpreters @@ -1110,25 +1111,28 @@ class ParseIni(object): env_list = _split_env(env_str) # collect section envs - all_envs = set(env_list) - {"ALL"} + all_envs = OrderedDict((i, None) for i in env_list) + if "ALL" in all_envs: + all_envs.pop("ALL") for section in self._cfg: if section.name.startswith(testenvprefix): - all_envs.add(section.name[len(testenvprefix) :]) + all_envs[section.name[len(testenvprefix) :]] = None if not all_envs: - all_envs.add("python") + all_envs["python"] = None package_env = config.isolated_build_env if config.isolated_build is True and package_env in all_envs: - all_envs.remove(package_env) + all_envs.pop(package_env) if not env_list or "ALL" in env_list: - env_list = sorted(all_envs) + env_list = list(all_envs.keys()) if config.isolated_build is True and package_env in env_list: msg = "isolated_build_env {} cannot be part of envlist".format(package_env) raise tox.exception.ConfigError(msg) - return env_list, all_envs + all_env_list = list(all_envs.keys()) + return env_list, all_env_list def _split_env(env): diff --git a/src/tox/session.py b/src/tox/session.py index 5ac0c7d3..97e5da19 100644 --- a/src/tox/session.py +++ b/src/tox/session.py @@ -621,7 +621,7 @@ class Session: env_conf = self.config.envconfigs # this contains all environments default = self.config.envlist # this only the defaults ignore = {self.config.isolated_build_env}.union(default) - extra = sorted(e for e in env_conf if e not in ignore) if all_envs else [] + extra = [e for e in env_conf if e not in ignore] if all_envs else [] if description: self.report.line("default environments:")
additional environments config order is not kept Our own config shows this: ``` using tox.ini: /home/bernat/git/tox/tox.ini using tox-3.1.2 from /home/bernat/.local/lib/python2.7/site-packages/tox/__init__.pyc default environments: py27 -> run the tests with pytest under python2.7 py34 -> run the tests with pytest under python3.4 py35 -> run the tests with pytest under python3.5 py36 -> run the tests with pytest under python3.6 py37 -> run the tests with pytest under python3.7 pypy -> run the tests with pytest under pypy coverage -> [run locally after tests]: combine coverage data and create report; generates a diff coverage against origin/master (can be changed by setting DIFF_AGAINST env var) fix-lint -> format the code base to adhere to our styles, and complain about what we cannot do automatically docs -> invoke sphinx-build to build the HTML docs and check that all links are valid package-description -> check that the long description is valid additional environments: X -> print the positional arguments passed in with echo codecov -> [only run on CI]: upload coverage data to codecov (depends on coverage running first) dev -> generate a DEV environment exit_code -> commands with several exit codes notify -> notify people about the release of the library release -> do a release, required posarg of the version number ``` Even though dev is last it's not displayed last.
tox-dev/tox
diff --git a/tests/unit/session/test_list_env.py b/tests/unit/session/test_list_env.py index 8ccee897..e48e532a 100644 --- a/tests/unit/session/test_list_env.py +++ b/tests/unit/session/test_list_env.py @@ -150,3 +150,45 @@ def test_listenvs_packaging_excluded(cmd, initproj): result = cmd("-a") expected = ["py36", "py27", "py34", "pypi", "docs", "notincluded"] assert result.outlines == expected, result.outlines + + +def test_listenvs_all_extra_definition_order_decreasing(cmd, initproj): + initproj( + "listenvs_all", + filedefs={ + "tox.ini": """ + [tox] + envlist=py36 + + [testenv:b] + changedir = whatever + + [testenv:a] + changedir = docs + """ + }, + ) + result = cmd("-a") + expected = ["py36", "b", "a"] + assert result.outlines == expected + + +def test_listenvs_all_extra_definition_order_increasing(cmd, initproj): + initproj( + "listenvs_all", + filedefs={ + "tox.ini": """ + [tox] + envlist=py36 + + [testenv:a] + changedir = whatever + + [testenv:b] + changedir = docs + """ + }, + ) + result = cmd("-a") + expected = ["py36", "a", "b"] + assert result.outlines == expected diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index d4d940f7..8b5d2285 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -1833,9 +1833,9 @@ class TestGlobalOptions: assert config.envlist == ["py35", "py36"] monkeypatch.setenv("TOXENV", "ALL") config = newconfig([], inisource) - assert config.envlist == ["py27", "py35", "py36"] + assert config.envlist == ["py36", "py35", "py27"] config = newconfig(["-eALL"], inisource) - assert config.envlist == ["py27", "py35", "py36"] + assert config.envlist == ["py36", "py35", "py27"] config = newconfig(["-espam"], inisource) assert config.envlist == ["spam"]
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 3 }
3.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-mock", "pytest-timeout", "pytest-xdist", "pytest-randomly" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 distlib==0.3.9 execnet==1.9.0 filelock==3.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 packaging==21.3 platformdirs==2.4.0 pluggy==0.13.1 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-randomly==3.10.3 pytest-timeout==2.1.0 pytest-xdist==3.0.2 six==1.17.0 toml==0.10.2 tomli==1.2.3 -e git+https://github.com/tox-dev/tox.git@7dd5448145ab5b291cee45184b0498355fd92808#egg=tox typing_extensions==4.1.1 virtualenv==20.17.1 zipp==3.6.0
name: tox channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - distlib==0.3.9 - execnet==1.9.0 - filelock==3.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - packaging==21.3 - platformdirs==2.4.0 - pluggy==0.13.1 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-randomly==3.10.3 - pytest-timeout==2.1.0 - pytest-xdist==3.0.2 - six==1.17.0 - toml==0.10.2 - tomli==1.2.3 - typing-extensions==4.1.1 - virtualenv==20.17.1 - zipp==3.6.0 prefix: /opt/conda/envs/tox
[ "tests/unit/session/test_list_env.py::test_listenvs_all_extra_definition_order_decreasing", "tests/unit/test_config.py::TestGlobalOptions::test_env_selection" ]
[ "tests/unit/test_config.py::TestVenvConfig::test_force_dep_with_url" ]
[ "tests/unit/session/test_list_env.py::test_listenvs_all_verbose_description", "tests/unit/session/test_list_env.py::test_listenvs_packaging_excluded", "tests/unit/session/test_list_env.py::test_listenvs_all_extra_definition_order_increasing", "tests/unit/session/test_list_env.py::test_listenvs_all_verbose_description_no_additional_environments", "tests/unit/session/test_list_env.py::test_listenvs", "tests/unit/session/test_list_env.py::test_listenvs_all", "tests/unit/session/test_list_env.py::test_listenvs_verbose_description", "tests/unit/test_config.py::TestParseconfig::test_explicit_config_path", "tests/unit/test_config.py::TestParseconfig::test_search_parents", "tests/unit/test_config.py::TestVenvConfig::test_process_deps", "tests/unit/test_config.py::TestVenvConfig::test_config_parsing_minimal", "tests/unit/test_config.py::TestVenvConfig::test_config_parsing_multienv", "tests/unit/test_config.py::TestVenvConfig::test_force_dep_version", "tests/unit/test_config.py::TestVenvConfig::test_envdir_set_manually", "tests/unit/test_config.py::TestVenvConfig::test_envdir_set_manually_with_substitutions", "tests/unit/test_config.py::TestVenvConfig::test_is_same_dep", "tests/unit/test_config.py::TestIniParserPrefix::test_value_doesn_match_prefixed_section_substitution", "tests/unit/test_config.py::TestIniParserPrefix::test_fallback_sections", "tests/unit/test_config.py::TestIniParserPrefix::test_value_matches_prefixed_section_substitution", "tests/unit/test_config.py::TestIniParserPrefix::test_basic_section_access", "tests/unit/test_config.py::TestIniParserPrefix::test_other_section_substitution", "tests/unit/test_config.py::TestIndexServer::test_multiple_homedir_relative_local_indexservers", "tests/unit/test_config.py::TestIndexServer::test_parse_indexserver", "tests/unit/test_config.py::TestIndexServer::test_indexserver", "tests/unit/test_config.py::TestHashseedOption::test_passing_string", "tests/unit/test_config.py::TestHashseedOption::test_one_random_hashseed", "tests/unit/test_config.py::TestHashseedOption::test_noset", "tests/unit/test_config.py::TestHashseedOption::test_setenv", "tests/unit/test_config.py::TestHashseedOption::test_passing_integer", "tests/unit/test_config.py::TestHashseedOption::test_passing_no_argument", "tests/unit/test_config.py::TestHashseedOption::test_default", "tests/unit/test_config.py::TestHashseedOption::test_noset_with_setenv", "tests/unit/test_config.py::TestHashseedOption::test_passing_empty_string", "tests/unit/test_config.py::TestHashseedOption::test_setenv_in_one_testenv", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_noargs_issue240", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[win32]", "tests/unit/test_config.py::TestConfigTestEnv::test_simple", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_defaults", "tests/unit/test_config.py::TestConfigTestEnv::test_installpkg_tops_develop", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir_jython[jython]", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_from_global_env", "tests/unit/test_config.py::TestConfigTestEnv::test_factors_groups_touch", "tests/unit/test_config.py::TestConfigTestEnv::test_ignore_errors", "tests/unit/test_config.py::TestConfigTestEnv::test_install_command_substitutions", "tests/unit/test_config.py::TestConfigTestEnv::test_specific_command_overrides", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_glob_from_global_env", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue246", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[win32]", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy3]", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_with_factor", "tests/unit/test_config.py::TestConfigTestEnv::test_rewrite_posargs", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_error", "tests/unit/test_config.py::TestConfigTestEnv::test_defaults", "tests/unit/test_config.py::TestConfigTestEnv::test_rewrite_simple_posargs", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors", "tests/unit/test_config.py::TestConfigTestEnv::test_install_command_must_contain_packages", "tests/unit/test_config.py::TestConfigTestEnv::test_factors_in_boolean", "tests/unit/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_section", "tests/unit/test_config.py::TestConfigTestEnv::test_multilevel_substitution", "tests/unit/test_config.py::TestConfigTestEnv::test_factor_ops", "tests/unit/test_config.py::TestConfigTestEnv::test_sitepackages_switch", "tests/unit/test_config.py::TestConfigTestEnv::test_whitelist_externals", "tests/unit/test_config.py::TestConfigTestEnv::test_changedir_override", "tests/unit/test_config.py::TestConfigTestEnv::test_factor_use_not_checked", "tests/unit/test_config.py::TestConfigTestEnv::test_pip_pre", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_positional", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy]", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_nested_env_defaults", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[linux2]", "tests/unit/test_config.py::TestConfigTestEnv::test_period_in_factor", "tests/unit/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist0-deps0]", "tests/unit/test_config.py::TestConfigTestEnv::test_commentchars_issue33", "tests/unit/test_config.py::TestConfigTestEnv::test_install_command_setting", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[linux2]", "tests/unit/test_config.py::TestConfigTestEnv::test_posargs_backslashed_or_quoted", "tests/unit/test_config.py::TestConfigTestEnv::test_pip_pre_cmdline_override", "tests/unit/test_config.py::TestConfigTestEnv::test_envconfigs_based_on_factors", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors_conflict_ignore", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_double", "tests/unit/test_config.py::TestConfigTestEnv::test_recursive_substitution_cycle_fails", "tests/unit/test_config.py::TestConfigTestEnv::test_factors", "tests/unit/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist1-deps1]", "tests/unit/test_config.py::TestConfigTestEnv::test_single_value_from_other_secton", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors_conflict", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue515", "tests/unit/test_config.py::TestConfigTestEnv::test_factors_in_setenv", "tests/unit/test_config.py::TestConfigTestEnv::test_ignore_outcome", "tests/unit/test_config.py::TestConfigTestEnv::test_changedir", "tests/unit/test_config.py::TestSetenv::test_setenv_with_envdir_and_basepython", "tests/unit/test_config.py::TestSetenv::test_setenv_cross_section_mixed", "tests/unit/test_config.py::TestSetenv::test_setenv_cross_section_subst_issue294", "tests/unit/test_config.py::TestSetenv::test_setenv_default_os_environ", "tests/unit/test_config.py::TestSetenv::test_setenv_overrides", "tests/unit/test_config.py::TestSetenv::test_setenv_cross_section_subst_twice", "tests/unit/test_config.py::TestSetenv::test_getdict_lazy", "tests/unit/test_config.py::TestSetenv::test_setenv_uses_other_setenv", "tests/unit/test_config.py::TestSetenv::test_setenv_recursive_direct", "tests/unit/test_config.py::TestSetenv::test_setenv_uses_os_environ", "tests/unit/test_config.py::TestSetenv::test_getdict_lazy_update", "tests/unit/test_config.py::TestSetenv::test_setenv_ordering_1", "tests/unit/test_config.py::test_get_homedir", "tests/unit/test_config.py::test_env_spec[-e", "tests/unit/test_config.py::test_config_bad_config_type_specified", "tests/unit/test_config.py::test_config_via_pyproject_legacy", "tests/unit/test_config.py::test_plugin_require", "tests/unit/test_config.py::test_config_bad_pyproject_specified", "tests/unit/test_config.py::test_isolated_build_env_cannot_be_in_envlist", "tests/unit/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[:]", "tests/unit/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[;]", "tests/unit/test_config.py::TestConfigConstSubstitutions::test_pathsep_regex", "tests/unit/test_config.py::TestCommandParser::test_command_with_split_line_in_subst_arguments", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_multiple_words", "tests/unit/test_config.py::TestCommandParser::test_command_with_runs_of_whitespace", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_substitution_with_spaces", "tests/unit/test_config.py::TestCommandParser::test_commands_with_backslash", "tests/unit/test_config.py::TestCommandParser::test_command_parser_with_complex_word_set", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_posargs", "tests/unit/test_config.py::TestCommandParser::test_command_parsing_for_issue_10", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_word", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[osx]", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[lin]", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[win]", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_rex", "tests/unit/test_config.py::TestGlobalOptions::test_substitution_jenkins_context", "tests/unit/test_config.py::TestGlobalOptions::test_py_venv", "tests/unit/test_config.py::TestGlobalOptions::test_substitution_jenkins_default", "tests/unit/test_config.py::TestGlobalOptions::test_envlist_expansion", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_false", "tests/unit/test_config.py::TestGlobalOptions::test_verbosity", "tests/unit/test_config.py::TestGlobalOptions::test_minversion", "tests/unit/test_config.py::TestGlobalOptions::test_sdist_specification", "tests/unit/test_config.py::TestGlobalOptions::test_defaultenv_partial_override", "tests/unit/test_config.py::TestGlobalOptions::test_defaultenv_commandline", "tests/unit/test_config.py::TestGlobalOptions::test_notest", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_true", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args2-2]", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args3-3]", "tests/unit/test_config.py::TestGlobalOptions::test_envlist_multiline", "tests/unit/test_config.py::TestGlobalOptions::test_envlist_cross_product", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args0-0]", "tests/unit/test_config.py::TestGlobalOptions::test_correct_basepython_chosen_from_default_factors", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args1-1]", "tests/unit/test_config.py::TestGetcontextname::test_blank", "tests/unit/test_config.py::TestGetcontextname::test_jenkins", "tests/unit/test_config.py::TestGetcontextname::test_hudson_legacy", "tests/unit/test_config.py::TestIniParser::test_getlist", "tests/unit/test_config.py::TestIniParser::test_value_doesn_match_section_substitution", "tests/unit/test_config.py::TestIniParser::test_substitution_with_multiple_words", "tests/unit/test_config.py::TestIniParser::test_argvlist_comment_after_command", "tests/unit/test_config.py::TestIniParser::test_getstring_fallback_sections", "tests/unit/test_config.py::TestIniParser::test_getpath", "tests/unit/test_config.py::TestIniParser::test_argvlist_posargs_with_quotes", "tests/unit/test_config.py::TestIniParser::test_argvlist_quoting_in_command", "tests/unit/test_config.py::TestIniParser::test_getargv", "tests/unit/test_config.py::TestIniParser::test_getdict", "tests/unit/test_config.py::TestIniParser::test_posargs_are_added_escaped_issue310", "tests/unit/test_config.py::TestIniParser::test_argvlist_command_contains_hash", "tests/unit/test_config.py::TestIniParser::test_getstring_substitution", "tests/unit/test_config.py::TestIniParser::test_positional_arguments_are_only_replaced_when_standing_alone", "tests/unit/test_config.py::TestIniParser::test_getstring_other_section_substitution", "tests/unit/test_config.py::TestIniParser::test_value_matches_section_substitution", "tests/unit/test_config.py::TestIniParser::test_missing_substitution", "tests/unit/test_config.py::TestIniParser::test_normal_env_sub_works", "tests/unit/test_config.py::TestIniParser::test_argvlist_multiline", "tests/unit/test_config.py::TestIniParser::test_missing_env_sub_raises_config_error_in_non_testenv", "tests/unit/test_config.py::TestIniParser::test_getbool", "tests/unit/test_config.py::TestIniParser::test_argvlist", "tests/unit/test_config.py::TestIniParser::test_getstring_environment_substitution_with_default", "tests/unit/test_config.py::TestIniParser::test_argvlist_positional_substitution", "tests/unit/test_config.py::TestIniParser::test_argvlist_quoted_posargs", "tests/unit/test_config.py::TestIniParser::test_getstring_single", "tests/unit/test_config.py::TestIniParser::test_missing_env_sub_populates_missing_subs", "tests/unit/test_config.py::TestIniParser::test_argvlist_windows_escaping", "tests/unit/test_config.py::TestCmdInvocation::test_version_with_normal_plugin", "tests/unit/test_config.py::TestCmdInvocation::test_config_specific_ini", "tests/unit/test_config.py::TestCmdInvocation::test_help", "tests/unit/test_config.py::TestCmdInvocation::test_version_no_plugins", "tests/unit/test_config.py::TestCmdInvocation::test_version_simple", "tests/unit/test_config.py::TestCmdInvocation::test_override_workdir", "tests/unit/test_config.py::TestCmdInvocation::test_no_tox_ini", "tests/unit/test_config.py::TestCmdInvocation::test_showconfig_with_force_dep_version", "tests/unit/test_config.py::TestCmdInvocation::test_version_with_fileless_module", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_posargs", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_regression_issue595", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_section_and_posargs_substitution", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_multiline", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_global", "tests/unit/test_config.py::TestParseEnv::test_parse_recreate", "tests/unit/test_config.py::TestConfigPackage::test_defaults_changed_dir", "tests/unit/test_config.py::TestConfigPackage::test_project_paths", "tests/unit/test_config.py::TestConfigPackage::test_defaults_distshare", "tests/unit/test_config.py::TestConfigPackage::test_defaults" ]
[]
MIT License
3,071
[ "src/tox/session.py", "changelog/921.feature.rst", "doc/config.rst", "src/tox/config.py" ]
[ "src/tox/session.py", "changelog/921.feature.rst", "doc/config.rst", "src/tox/config.py" ]
tox-dev__tox-981
e4645ecea2a1ffcff769d9891a3080f2c90db8f7
2018-09-14 19:54:38
cf6afcecaca22df7b509facaea43c09a15570f75
diff --git a/changelog/903.feature.rst b/changelog/903.feature.rst new file mode 100644 index 00000000..bb700ffb --- /dev/null +++ b/changelog/903.feature.rst @@ -0,0 +1,1 @@ +skip missing interpreters value from the config file can now be overridden via the ``--skip-missing-interpreters`` cli flag - by :user:`gaborbernat` diff --git a/doc/config.rst b/doc/config.rst index e56fe8de..cb47a68f 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -54,18 +54,18 @@ and will first lookup global tox settings in this section: commands = ... # override [tox] settings for the jenkins context # note: for jenkins distshare defaults to ``{toxworkdir}/distshare`` (DEPRECATED) -.. confval:: skip_missing_interpreters=BOOL +.. confval:: skip_missing_interpreters=config|true|false .. versionadded:: 1.7.2 - Setting this to ``True`` is equivalent of passing the - ``--skip-missing-interpreters`` command line option, and will force ``tox`` to - return success even if some of the specified environments were missing. This is - useful for some CI systems or running on a developer box, where you might only - have a subset of all your supported interpreters installed but don't want to - mark the build as failed because of it. As expected, the command line switch - always overrides this setting if passed on the invokation. - **Default:** ``False`` + When skip missing interpreters is ``true`` will force ``tox`` to return success even + if some of the specified environments were missing. This is useful for some CI + systems or running on a developer box, where you might only have a subset of + all your supported interpreters installed but don't want to mark the build as + failed because of it. As expected, the command line switch always overrides + this setting if passed on the invocation. Setting it to ``config`` + means that the value is read from the config file (default is ``false``). + **Default:** ``config`` .. confval:: envlist=CSV diff --git a/src/tox/config.py b/src/tox/config.py index a7e62d52..12ac98c5 100755 --- a/src/tox/config.py +++ b/src/tox/config.py @@ -493,11 +493,8 @@ def tox_addoption(parser): parser.add_argument( "--alwayscopy", action="store_true", help="override alwayscopy setting to True in all envs" ) - parser.add_argument( - "--skip-missing-interpreters", - action="store_true", - help="don't fail tests for missing interpreters", - ) + + cli_skip_missing_interpreter(parser) parser.add_argument( "--workdir", action="store", @@ -780,6 +777,24 @@ def tox_addoption(parser): ) +def cli_skip_missing_interpreter(parser): + class SkipMissingInterpreterAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + value = "true" if values is None else values + if value not in ("config", "true", "false"): + raise argparse.ArgumentTypeError("value must be config, true or false") + setattr(namespace, self.dest, value) + + parser.add_argument( + "--skip-missing-interpreters", + default="config", + metavar="val", + nargs="?", + action=SkipMissingInterpreterAction, + help="don't fail tests for missing interpreters: {config,true,false} choice", + ) + + class Config(object): """Global Tox config object.""" @@ -947,10 +962,9 @@ class ParseIni(object): else: config.toxworkdir = config.toxinidir.join(config.option.workdir, abs=True) - if not config.option.skip_missing_interpreters: - config.option.skip_missing_interpreters = reader.getbool( - "skip_missing_interpreters", False - ) + if config.option.skip_missing_interpreters == "config": + val = reader.getbool("skip_missing_interpreters", False) + config.option.skip_missing_interpreters = "true" if val else "false" config.ignore_basepython_conflict = reader.getbool("ignore_basepython_conflict", False) diff --git a/src/tox/session.py b/src/tox/session.py index 97e5da19..07e99708 100644 --- a/src/tox/session.py +++ b/src/tox/session.py @@ -470,7 +470,7 @@ class Session: ) except tox.exception.InterpreterNotFound as e: status = e - if self.config.option.skip_missing_interpreters: + if self.config.option.skip_missing_interpreters == "true": default_ret_code = 0 if status: str_status = str(status) @@ -573,7 +573,7 @@ class Session: status = venv.status if isinstance(status, tox.exception.InterpreterNotFound): msg = " {}: {}".format(venv.envconfig.envname, str(status)) - if self.config.option.skip_missing_interpreters: + if self.config.option.skip_missing_interpreters == "true": self.report.skip(msg) else: retcode = 1
skip missing interpreters should be a toggle-able flag It makes sense to set skip missing interpreters inside the tox file to ensure that users locally don't get failures for interpreters they don't have. In this cases, it should be still possible to override that and force not skipping missing interpreters in the CI environment for example.
tox-dev/tox
diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 8b5d2285..3b53bad2 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -1902,20 +1902,48 @@ class TestGlobalOptions: newconfig([], inisource) def test_skip_missing_interpreters_true(self, newconfig): - inisource = """ + ini_source = """ [tox] skip_missing_interpreters = True """ - config = newconfig([], inisource) - assert config.option.skip_missing_interpreters + config = newconfig([], ini_source) + assert config.option.skip_missing_interpreters == "true" def test_skip_missing_interpreters_false(self, newconfig): - inisource = """ + ini_source = """ [tox] skip_missing_interpreters = False """ - config = newconfig([], inisource) - assert not config.option.skip_missing_interpreters + config = newconfig([], ini_source) + assert config.option.skip_missing_interpreters == "false" + + def test_skip_missing_interpreters_cli_no_arg(self, newconfig): + ini_source = """ + [tox] + skip_missing_interpreters = False + """ + config = newconfig(["--skip-missing-interpreters"], ini_source) + assert config.option.skip_missing_interpreters == "true" + + def test_skip_missing_interpreters_cli_not_specified(self, newconfig): + config = newconfig([], "") + assert config.option.skip_missing_interpreters == "false" + + def test_skip_missing_interpreters_cli_overrides_true(self, newconfig): + ini_source = """ + [tox] + skip_missing_interpreters = False + """ + config = newconfig(["--skip-missing-interpreters", "true"], ini_source) + assert config.option.skip_missing_interpreters == "true" + + def test_skip_missing_interpreters_cli_overrides_false(self, newconfig): + ini_source = """ + [tox] + skip_missing_interpreters = True + """ + config = newconfig(["--skip-missing-interpreters", "false"], ini_source) + assert config.option.skip_missing_interpreters == "false" def test_defaultenv_commandline(self, newconfig): config = newconfig(["-epy27"], "")
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 3 }
3.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-mock", "pytest-timeout", "pytest-xdist", "pytest-randomly" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 distlib==0.3.9 execnet==1.9.0 filelock==3.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 packaging==21.3 platformdirs==2.4.0 pluggy==0.13.1 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-randomly==3.10.3 pytest-timeout==2.1.0 pytest-xdist==3.0.2 six==1.17.0 toml==0.10.2 tomli==1.2.3 -e git+https://github.com/tox-dev/tox.git@e4645ecea2a1ffcff769d9891a3080f2c90db8f7#egg=tox typing_extensions==4.1.1 virtualenv==20.17.1 zipp==3.6.0
name: tox channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - distlib==0.3.9 - execnet==1.9.0 - filelock==3.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - packaging==21.3 - platformdirs==2.4.0 - pluggy==0.13.1 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-randomly==3.10.3 - pytest-timeout==2.1.0 - pytest-xdist==3.0.2 - six==1.17.0 - toml==0.10.2 - tomli==1.2.3 - typing-extensions==4.1.1 - virtualenv==20.17.1 - zipp==3.6.0 prefix: /opt/conda/envs/tox
[ "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_false", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_overrides_true", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_overrides_false", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_not_specified", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_no_arg", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_true" ]
[ "tests/unit/test_config.py::TestVenvConfig::test_force_dep_with_url" ]
[ "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_word", "tests/unit/test_config.py::TestCommandParser::test_command_parser_with_complex_word_set", "tests/unit/test_config.py::TestCommandParser::test_command_with_split_line_in_subst_arguments", "tests/unit/test_config.py::TestCommandParser::test_commands_with_backslash", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_substitution_with_spaces", "tests/unit/test_config.py::TestCommandParser::test_command_with_runs_of_whitespace", "tests/unit/test_config.py::TestCommandParser::test_command_parsing_for_issue_10", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_multiple_words", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_posargs", "tests/unit/test_config.py::TestGlobalOptions::test_envlist_multiline", "tests/unit/test_config.py::TestGlobalOptions::test_substitution_jenkins_context", "tests/unit/test_config.py::TestGlobalOptions::test_env_selection", "tests/unit/test_config.py::TestGlobalOptions::test_envlist_expansion", "tests/unit/test_config.py::TestGlobalOptions::test_defaultenv_commandline", "tests/unit/test_config.py::TestGlobalOptions::test_notest", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args2-2]", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args1-1]", "tests/unit/test_config.py::TestGlobalOptions::test_minversion", "tests/unit/test_config.py::TestGlobalOptions::test_verbosity", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args3-3]", "tests/unit/test_config.py::TestGlobalOptions::test_py_venv", "tests/unit/test_config.py::TestGlobalOptions::test_correct_basepython_chosen_from_default_factors", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args0-0]", "tests/unit/test_config.py::TestGlobalOptions::test_substitution_jenkins_default", "tests/unit/test_config.py::TestGlobalOptions::test_envlist_cross_product", "tests/unit/test_config.py::TestGlobalOptions::test_sdist_specification", "tests/unit/test_config.py::TestGlobalOptions::test_defaultenv_partial_override", "tests/unit/test_config.py::TestCmdInvocation::test_no_tox_ini", "tests/unit/test_config.py::TestCmdInvocation::test_version_with_fileless_module", "tests/unit/test_config.py::TestCmdInvocation::test_version_no_plugins", "tests/unit/test_config.py::TestCmdInvocation::test_showconfig_with_force_dep_version", "tests/unit/test_config.py::TestCmdInvocation::test_version_simple", "tests/unit/test_config.py::TestCmdInvocation::test_override_workdir", "tests/unit/test_config.py::TestCmdInvocation::test_version_with_normal_plugin", "tests/unit/test_config.py::TestCmdInvocation::test_help", "tests/unit/test_config.py::TestCmdInvocation::test_config_specific_ini", "tests/unit/test_config.py::TestIniParserPrefix::test_fallback_sections", "tests/unit/test_config.py::TestIniParserPrefix::test_value_doesn_match_prefixed_section_substitution", "tests/unit/test_config.py::TestIniParserPrefix::test_other_section_substitution", "tests/unit/test_config.py::TestIniParserPrefix::test_value_matches_prefixed_section_substitution", "tests/unit/test_config.py::TestIniParserPrefix::test_basic_section_access", "tests/unit/test_config.py::TestVenvConfig::test_force_dep_version", "tests/unit/test_config.py::TestVenvConfig::test_process_deps", "tests/unit/test_config.py::TestVenvConfig::test_envdir_set_manually_with_substitutions", "tests/unit/test_config.py::TestVenvConfig::test_envdir_set_manually", "tests/unit/test_config.py::TestVenvConfig::test_config_parsing_minimal", "tests/unit/test_config.py::TestVenvConfig::test_is_same_dep", "tests/unit/test_config.py::TestVenvConfig::test_config_parsing_multienv", "tests/unit/test_config.py::TestGetcontextname::test_hudson_legacy", "tests/unit/test_config.py::TestGetcontextname::test_jenkins", "tests/unit/test_config.py::TestGetcontextname::test_blank", "tests/unit/test_config.py::TestSetenv::test_getdict_lazy_update", "tests/unit/test_config.py::TestSetenv::test_setenv_ordering_1", "tests/unit/test_config.py::TestSetenv::test_setenv_recursive_direct", "tests/unit/test_config.py::TestSetenv::test_setenv_cross_section_subst_twice", "tests/unit/test_config.py::TestSetenv::test_setenv_default_os_environ", "tests/unit/test_config.py::TestSetenv::test_setenv_uses_os_environ", "tests/unit/test_config.py::TestSetenv::test_setenv_cross_section_mixed", "tests/unit/test_config.py::TestSetenv::test_setenv_overrides", "tests/unit/test_config.py::TestSetenv::test_setenv_with_envdir_and_basepython", "tests/unit/test_config.py::TestSetenv::test_setenv_uses_other_setenv", "tests/unit/test_config.py::TestSetenv::test_getdict_lazy", "tests/unit/test_config.py::TestSetenv::test_setenv_cross_section_subst_issue294", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[win32]", "tests/unit/test_config.py::TestConfigTestEnv::test_ignore_errors", "tests/unit/test_config.py::TestConfigTestEnv::test_install_command_must_contain_packages", "tests/unit/test_config.py::TestConfigTestEnv::test_pip_pre", "tests/unit/test_config.py::TestConfigTestEnv::test_sitepackages_switch", "tests/unit/test_config.py::TestConfigTestEnv::test_factors_in_boolean", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_defaults", "tests/unit/test_config.py::TestConfigTestEnv::test_single_value_from_other_secton", "tests/unit/test_config.py::TestConfigTestEnv::test_posargs_backslashed_or_quoted", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[linux2]", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy3]", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir", "tests/unit/test_config.py::TestConfigTestEnv::test_installpkg_tops_develop", "tests/unit/test_config.py::TestConfigTestEnv::test_factor_ops", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors_conflict", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_error", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir_jython[jython]", "tests/unit/test_config.py::TestConfigTestEnv::test_envconfigs_based_on_factors", "tests/unit/test_config.py::TestConfigTestEnv::test_changedir_override", "tests/unit/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_section", "tests/unit/test_config.py::TestConfigTestEnv::test_factors_in_setenv", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors_conflict_ignore", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue515", "tests/unit/test_config.py::TestConfigTestEnv::test_ignore_outcome", "tests/unit/test_config.py::TestConfigTestEnv::test_factors", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_with_factor", "tests/unit/test_config.py::TestConfigTestEnv::test_changedir", "tests/unit/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist1-deps1]", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_double", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[linux2]", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_noargs_issue240", "tests/unit/test_config.py::TestConfigTestEnv::test_install_command_substitutions", "tests/unit/test_config.py::TestConfigTestEnv::test_simple", "tests/unit/test_config.py::TestConfigTestEnv::test_multilevel_substitution", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_nested_env_defaults", "tests/unit/test_config.py::TestConfigTestEnv::test_specific_command_overrides", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_glob_from_global_env", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_positional", "tests/unit/test_config.py::TestConfigTestEnv::test_install_command_setting", "tests/unit/test_config.py::TestConfigTestEnv::test_whitelist_externals", "tests/unit/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist0-deps0]", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_from_global_env", "tests/unit/test_config.py::TestConfigTestEnv::test_recursive_substitution_cycle_fails", "tests/unit/test_config.py::TestConfigTestEnv::test_commentchars_issue33", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[win32]", "tests/unit/test_config.py::TestConfigTestEnv::test_rewrite_posargs", "tests/unit/test_config.py::TestConfigTestEnv::test_rewrite_simple_posargs", "tests/unit/test_config.py::TestConfigTestEnv::test_factor_use_not_checked", "tests/unit/test_config.py::TestConfigTestEnv::test_defaults", "tests/unit/test_config.py::TestConfigTestEnv::test_period_in_factor", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue246", "tests/unit/test_config.py::TestConfigTestEnv::test_pip_pre_cmdline_override", "tests/unit/test_config.py::TestConfigTestEnv::test_factors_groups_touch", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy]", "tests/unit/test_config.py::TestConfigPackage::test_defaults_distshare", "tests/unit/test_config.py::TestConfigPackage::test_project_paths", "tests/unit/test_config.py::TestConfigPackage::test_defaults_changed_dir", "tests/unit/test_config.py::TestConfigPackage::test_defaults", "tests/unit/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[:]", "tests/unit/test_config.py::TestConfigConstSubstitutions::test_pathsep_regex", "tests/unit/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[;]", "tests/unit/test_config.py::TestHashseedOption::test_passing_empty_string", "tests/unit/test_config.py::TestHashseedOption::test_setenv", "tests/unit/test_config.py::TestHashseedOption::test_passing_string", "tests/unit/test_config.py::TestHashseedOption::test_noset_with_setenv", "tests/unit/test_config.py::TestHashseedOption::test_passing_no_argument", "tests/unit/test_config.py::TestHashseedOption::test_setenv_in_one_testenv", "tests/unit/test_config.py::TestHashseedOption::test_default", "tests/unit/test_config.py::TestHashseedOption::test_one_random_hashseed", "tests/unit/test_config.py::TestHashseedOption::test_passing_integer", "tests/unit/test_config.py::TestHashseedOption::test_noset", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[lin]", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[win]", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_rex", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[osx]", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_posargs", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_section_and_posargs_substitution", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_regression_issue595", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_multiline", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_global", "tests/unit/test_config.py::test_get_homedir", "tests/unit/test_config.py::test_env_spec[-e", "tests/unit/test_config.py::test_isolated_build_env_cannot_be_in_envlist", "tests/unit/test_config.py::test_config_bad_config_type_specified", "tests/unit/test_config.py::test_plugin_require", "tests/unit/test_config.py::test_config_bad_pyproject_specified", "tests/unit/test_config.py::test_config_via_pyproject_legacy", "tests/unit/test_config.py::TestIniParser::test_missing_env_sub_raises_config_error_in_non_testenv", "tests/unit/test_config.py::TestIniParser::test_getstring_fallback_sections", "tests/unit/test_config.py::TestIniParser::test_argvlist_multiline", "tests/unit/test_config.py::TestIniParser::test_getdict", "tests/unit/test_config.py::TestIniParser::test_substitution_with_multiple_words", "tests/unit/test_config.py::TestIniParser::test_argvlist_posargs_with_quotes", "tests/unit/test_config.py::TestIniParser::test_value_matches_section_substitution", "tests/unit/test_config.py::TestIniParser::test_normal_env_sub_works", "tests/unit/test_config.py::TestIniParser::test_argvlist_comment_after_command", "tests/unit/test_config.py::TestIniParser::test_getstring_substitution", "tests/unit/test_config.py::TestIniParser::test_argvlist_command_contains_hash", "tests/unit/test_config.py::TestIniParser::test_getstring_single", "tests/unit/test_config.py::TestIniParser::test_getbool", "tests/unit/test_config.py::TestIniParser::test_argvlist_quoted_posargs", "tests/unit/test_config.py::TestIniParser::test_getstring_environment_substitution_with_default", "tests/unit/test_config.py::TestIniParser::test_argvlist_positional_substitution", "tests/unit/test_config.py::TestIniParser::test_missing_env_sub_populates_missing_subs", "tests/unit/test_config.py::TestIniParser::test_getlist", "tests/unit/test_config.py::TestIniParser::test_positional_arguments_are_only_replaced_when_standing_alone", "tests/unit/test_config.py::TestIniParser::test_argvlist", "tests/unit/test_config.py::TestIniParser::test_getstring_other_section_substitution", "tests/unit/test_config.py::TestIniParser::test_missing_substitution", "tests/unit/test_config.py::TestIniParser::test_value_doesn_match_section_substitution", "tests/unit/test_config.py::TestIniParser::test_argvlist_quoting_in_command", "tests/unit/test_config.py::TestIniParser::test_posargs_are_added_escaped_issue310", "tests/unit/test_config.py::TestIniParser::test_argvlist_windows_escaping", "tests/unit/test_config.py::TestIniParser::test_getpath", "tests/unit/test_config.py::TestIniParser::test_getargv", "tests/unit/test_config.py::TestParseconfig::test_search_parents", "tests/unit/test_config.py::TestParseconfig::test_explicit_config_path", "tests/unit/test_config.py::TestIndexServer::test_multiple_homedir_relative_local_indexservers", "tests/unit/test_config.py::TestIndexServer::test_indexserver", "tests/unit/test_config.py::TestIndexServer::test_parse_indexserver", "tests/unit/test_config.py::TestParseEnv::test_parse_recreate" ]
[]
MIT License
3,072
[ "src/tox/session.py", "doc/config.rst", "changelog/903.feature.rst", "src/tox/config.py" ]
[ "src/tox/session.py", "doc/config.rst", "changelog/903.feature.rst", "src/tox/config.py" ]
certbot__certbot-6377
3de3188dd6fe4c8c9952848c155a56944b55ec2d
2018-09-14 21:52:38
1d783fd4b9c83c669b33adb2afaff8ec21e00cb6
diff --git a/CHANGELOG.md b/CHANGELOG.md index 5dd51ef16..20e82b5d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,8 @@ Certbot adheres to [Semantic Versioning](http://semver.org/). ### Changed -* `--manual` will explicitly warn users that earlier challenges should remain in place when setting up subsequent challenges. +* Write README to the base of (config-dir)/live directory +* `--manual` will explicitly warn users that earlier challenges should remain in place when setting up subsequent challenges. ### Fixed diff --git a/acme/acme/messages.py b/acme/acme/messages.py index 5be458580..7e86b0c3b 100644 --- a/acme/acme/messages.py +++ b/acme/acme/messages.py @@ -523,7 +523,7 @@ class Order(ResourceBody): """ identifiers = jose.Field('identifiers', omitempty=True) status = jose.Field('status', decoder=Status.from_json, - omitempty=True, default=STATUS_PENDING) + omitempty=True) authorizations = jose.Field('authorizations', omitempty=True) certificate = jose.Field('certificate', omitempty=True) finalize = jose.Field('finalize', omitempty=True) @@ -553,4 +553,3 @@ class OrderResource(ResourceWithURI): class NewOrder(Order): """New order.""" resource_type = 'new-order' - resource = fields.Resource(resource_type) diff --git a/certbot/storage.py b/certbot/storage.py index 32d6771c2..c16ea35b8 100644 --- a/certbot/storage.py +++ b/certbot/storage.py @@ -214,6 +214,26 @@ def get_link_target(link): target = os.path.join(os.path.dirname(link), target) return os.path.abspath(target) +def _write_live_readme_to(readme_path, is_base_dir=False): + prefix = "" + if is_base_dir: + prefix = "[cert name]/" + with open(readme_path, "w") as f: + logger.debug("Writing README to %s.", readme_path) + f.write("This directory contains your keys and certificates.\n\n" + "`{prefix}privkey.pem` : the private key for your certificate.\n" + "`{prefix}fullchain.pem`: the certificate file used in most server software.\n" + "`{prefix}chain.pem` : used for OCSP stapling in Nginx >=1.3.7.\n" + "`{prefix}cert.pem` : will break many server configurations, and " + "should not be used\n" + " without reading further documentation (see link below).\n\n" + "WARNING: DO NOT MOVE OR RENAME THESE FILES!\n" + " Certbot expects these files to remain in this location in order\n" + " to function properly!\n\n" + "We recommend not moving these files. For more information, see the Certbot\n" + "User Guide at https://certbot.eff.org/docs/using.html#where-are-my-" + "certificates.\n".format(prefix=prefix)) + def _relevant(option): """ @@ -1003,6 +1023,9 @@ class RenewableCert(object): logger.debug("Creating directory %s.", i) config_file, config_filename = util.unique_lineage_name( cli_config.renewal_configs_dir, lineagename) + base_readme_path = os.path.join(cli_config.live_dir, README) + if not os.path.exists(base_readme_path): + _write_live_readme_to(base_readme_path, is_base_dir=True) # Determine where on disk everything will go # lineagename will now potentially be modified based on which @@ -1045,21 +1068,7 @@ class RenewableCert(object): # Write a README file to the live directory readme_path = os.path.join(live_dir, README) - with open(readme_path, "w") as f: - logger.debug("Writing README to %s.", readme_path) - f.write("This directory contains your keys and certificates.\n\n" - "`privkey.pem` : the private key for your certificate.\n" - "`fullchain.pem`: the certificate file used in most server software.\n" - "`chain.pem` : used for OCSP stapling in Nginx >=1.3.7.\n" - "`cert.pem` : will break many server configurations, and " - "should not be used\n" - " without reading further documentation (see link below).\n\n" - "WARNING: DO NOT MOVE THESE FILES!\n" - " Certbot expects these files to remain in this location in order\n" - " to function properly!\n\n" - "We recommend not moving these files. For more information, see the Certbot\n" - "User Guide at https://certbot.eff.org/docs/using.html#where-are-my-" - "certificates.\n") + _write_live_readme_to(readme_path) # Document what we've done in a new renewal config file config_file.close()
Put a README in /etc/letsencrypt/live In addition to putting a README in /etc/letsencrypt/live/example.com, a lot of people work directly in /etc/letsencrypt/live, so we should probably put a README in there telling people not to move certs.
certbot/certbot
diff --git a/acme/acme/messages_test.py b/acme/acme/messages_test.py index 0e2d8c62d..876fbe825 100644 --- a/acme/acme/messages_test.py +++ b/acme/acme/messages_test.py @@ -424,6 +424,19 @@ class OrderResourceTest(unittest.TestCase): 'authorizations': None, }) +class NewOrderTest(unittest.TestCase): + """Tests for acme.messages.NewOrder.""" + + def setUp(self): + from acme.messages import NewOrder + self.reg = NewOrder( + identifiers=mock.sentinel.identifiers) + + def test_to_partial_json(self): + self.assertEqual(self.reg.to_json(), { + 'identifiers': mock.sentinel.identifiers, + }) + if __name__ == '__main__': unittest.main() # pragma: no cover diff --git a/certbot/tests/storage_test.py b/certbot/tests/storage_test.py index 53a976f8d..078a2858f 100644 --- a/certbot/tests/storage_test.py +++ b/certbot/tests/storage_test.py @@ -625,6 +625,8 @@ class RenewableCertTests(BaseRenewableCertTest): self.assertTrue(result._consistent()) self.assertTrue(os.path.exists(os.path.join( self.config.renewal_configs_dir, "the-lineage.com.conf"))) + self.assertTrue(os.path.exists(os.path.join( + self.config.live_dir, "README"))) self.assertTrue(os.path.exists(os.path.join( self.config.live_dir, "the-lineage.com", "README"))) with open(result.fullchain, "rb") as f:
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 3 }
0.27
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
acme==3.3.0 astroid==1.3.5 asttokens==3.0.0 backports.tarfile==1.2.0 cachetools==5.5.2 -e git+https://github.com/certbot/certbot.git@3de3188dd6fe4c8c9952848c155a56944b55ec2d#egg=certbot certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 colorama==0.4.6 ConfigArgParse==1.7 configobj==5.0.9 coverage==7.8.0 cryptography==44.0.2 decorator==5.2.1 distlib==0.3.9 docutils==0.21.2 exceptiongroup==1.2.2 execnet==2.1.1 executing==2.2.0 filelock==3.18.0 id==1.5.0 idna==3.10 importlib-metadata==6.11.0 iniconfig==2.1.0 ipdb==0.13.13 ipython==8.18.1 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jedi==0.19.2 jeepney==0.9.0 josepy==1.15.0 keyring==25.6.0 logilab-common==2.1.0 markdown-it-py==3.0.0 matplotlib-inline==0.1.7 mdurl==0.1.2 mock==5.2.0 more-itertools==10.6.0 mypy-extensions==1.0.0 nh3==0.2.21 packaging==24.2 parsedatetime==2.6 parso==0.8.4 pexpect==4.9.0 platformdirs==4.3.7 pluggy==1.5.0 prompt_toolkit==3.0.50 ptyprocess==0.7.0 pure_eval==0.2.3 pycparser==2.22 Pygments==2.19.1 pylint==1.4.2 pyOpenSSL==25.0.0 pyproject-api==1.9.0 pyRFC3339==2.0.1 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 pytz==2025.2 readme_renderer==44.0 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 SecretStorage==3.3.3 six==1.17.0 stack-data==0.6.3 tomli==2.2.1 tox==4.25.0 traitlets==5.14.3 twine==6.1.0 typing_extensions==4.13.0 urllib3==2.3.0 virtualenv==20.29.3 wcwidth==0.2.13 zipp==3.21.0 zope.component==6.0 zope.event==5.0 zope.hookable==7.0 zope.interface==7.2
name: certbot channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - acme==3.3.0 - astroid==1.3.5 - asttokens==3.0.0 - backports-tarfile==1.2.0 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - colorama==0.4.6 - configargparse==1.7 - configobj==5.0.9 - coverage==7.8.0 - cryptography==44.0.2 - decorator==5.2.1 - distlib==0.3.9 - docutils==0.21.2 - exceptiongroup==1.2.2 - execnet==2.1.1 - executing==2.2.0 - filelock==3.18.0 - id==1.5.0 - idna==3.10 - importlib-metadata==6.11.0 - iniconfig==2.1.0 - ipdb==0.13.13 - ipython==8.18.1 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jedi==0.19.2 - jeepney==0.9.0 - josepy==1.15.0 - keyring==25.6.0 - logilab-common==2.1.0 - markdown-it-py==3.0.0 - matplotlib-inline==0.1.7 - mdurl==0.1.2 - mock==5.2.0 - more-itertools==10.6.0 - mypy-extensions==1.0.0 - nh3==0.2.21 - packaging==24.2 - parsedatetime==2.6 - parso==0.8.4 - pexpect==4.9.0 - platformdirs==4.3.7 - pluggy==1.5.0 - prompt-toolkit==3.0.50 - ptyprocess==0.7.0 - pure-eval==0.2.3 - pycparser==2.22 - pygments==2.19.1 - pylint==1.4.2 - pyopenssl==25.0.0 - pyproject-api==1.9.0 - pyrfc3339==2.0.1 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - pytz==2025.2 - readme-renderer==44.0 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - secretstorage==3.3.3 - six==1.17.0 - stack-data==0.6.3 - tomli==2.2.1 - tox==4.25.0 - traitlets==5.14.3 - twine==6.1.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - virtualenv==20.29.3 - wcwidth==0.2.13 - zipp==3.21.0 - zope-component==6.0 - zope-event==5.0 - zope-hookable==7.0 - zope-interface==7.2 prefix: /opt/conda/envs/certbot
[ "acme/acme/messages_test.py::NewOrderTest::test_to_partial_json", "certbot/tests/storage_test.py::RenewableCertTests::test_new_lineage" ]
[]
[ "acme/acme/messages_test.py::ErrorTest::test_code", "acme/acme/messages_test.py::ErrorTest::test_default_typ", "acme/acme/messages_test.py::ErrorTest::test_description", "acme/acme/messages_test.py::ErrorTest::test_from_json_empty", "acme/acme/messages_test.py::ErrorTest::test_from_json_hashable", "acme/acme/messages_test.py::ErrorTest::test_is_acme_error", "acme/acme/messages_test.py::ErrorTest::test_str", "acme/acme/messages_test.py::ErrorTest::test_unicode_error", "acme/acme/messages_test.py::ErrorTest::test_with_code", "acme/acme/messages_test.py::ConstantTest::test_equality", "acme/acme/messages_test.py::ConstantTest::test_from_json", "acme/acme/messages_test.py::ConstantTest::test_from_json_hashable", "acme/acme/messages_test.py::ConstantTest::test_repr", "acme/acme/messages_test.py::ConstantTest::test_to_partial_json", "acme/acme/messages_test.py::DirectoryTest::test_from_json_deserialization_unknown_key_success", "acme/acme/messages_test.py::DirectoryTest::test_getattr", "acme/acme/messages_test.py::DirectoryTest::test_getattr_fails_with_attribute_error", "acme/acme/messages_test.py::DirectoryTest::test_getitem", "acme/acme/messages_test.py::DirectoryTest::test_getitem_fails_with_key_error", "acme/acme/messages_test.py::DirectoryTest::test_init_wrong_key_value_success", "acme/acme/messages_test.py::DirectoryTest::test_iter_meta", "acme/acme/messages_test.py::DirectoryTest::test_to_json", "acme/acme/messages_test.py::RegistrationTest::test_emails", "acme/acme/messages_test.py::RegistrationTest::test_from_data", "acme/acme/messages_test.py::RegistrationTest::test_from_json", "acme/acme/messages_test.py::RegistrationTest::test_from_json_hashable", "acme/acme/messages_test.py::RegistrationTest::test_phones", "acme/acme/messages_test.py::RegistrationTest::test_to_partial_json", "acme/acme/messages_test.py::UpdateRegistrationTest::test_empty", "acme/acme/messages_test.py::RegistrationResourceTest::test_to_partial_json", "acme/acme/messages_test.py::ChallengeResourceTest::test_uri", "acme/acme/messages_test.py::ChallengeBodyTest::test_encode", "acme/acme/messages_test.py::ChallengeBodyTest::test_from_json", "acme/acme/messages_test.py::ChallengeBodyTest::test_from_json_hashable", "acme/acme/messages_test.py::ChallengeBodyTest::test_proxy", "acme/acme/messages_test.py::ChallengeBodyTest::test_to_partial_json", "acme/acme/messages_test.py::AuthorizationTest::test_from_json", "acme/acme/messages_test.py::AuthorizationTest::test_from_json_hashable", "acme/acme/messages_test.py::AuthorizationTest::test_resolved_combinations", "acme/acme/messages_test.py::AuthorizationResourceTest::test_json_de_serializable", "acme/acme/messages_test.py::CertificateRequestTest::test_json_de_serializable", "acme/acme/messages_test.py::CertificateResourceTest::test_json_de_serializable", "acme/acme/messages_test.py::RevocationTest::test_from_json_hashable", "acme/acme/messages_test.py::OrderResourceTest::test_to_partial_json", "certbot/tests/storage_test.py::RenewableCertTests::test_add_time_interval", "certbot/tests/storage_test.py::RenewableCertTests::test_autodeployment_is_enabled", "certbot/tests/storage_test.py::RenewableCertTests::test_autorenewal_is_enabled", "certbot/tests/storage_test.py::RenewableCertTests::test_bad_kind", "certbot/tests/storage_test.py::RenewableCertTests::test_consistent", "certbot/tests/storage_test.py::RenewableCertTests::test_current_target", "certbot/tests/storage_test.py::RenewableCertTests::test_current_version", "certbot/tests/storage_test.py::RenewableCertTests::test_ensure_deployed", "certbot/tests/storage_test.py::RenewableCertTests::test_has_pending_deployment", "certbot/tests/storage_test.py::RenewableCertTests::test_initialization", "certbot/tests/storage_test.py::RenewableCertTests::test_invalid_config_filename", "certbot/tests/storage_test.py::RenewableCertTests::test_is_test_cert", "certbot/tests/storage_test.py::RenewableCertTests::test_latest_and_next_versions", "certbot/tests/storage_test.py::RenewableCertTests::test_missing_cert", "certbot/tests/storage_test.py::RenewableCertTests::test_names", "certbot/tests/storage_test.py::RenewableCertTests::test_new_lineage_nonexistent_dirs", "certbot/tests/storage_test.py::RenewableCertTests::test_no_current_version", "certbot/tests/storage_test.py::RenewableCertTests::test_no_renewal_version", "certbot/tests/storage_test.py::RenewableCertTests::test_ocsp_revoked", "certbot/tests/storage_test.py::RenewableCertTests::test_relevant_values", "certbot/tests/storage_test.py::RenewableCertTests::test_relevant_values_bool", "certbot/tests/storage_test.py::RenewableCertTests::test_relevant_values_default", "certbot/tests/storage_test.py::RenewableCertTests::test_relevant_values_namespace", "certbot/tests/storage_test.py::RenewableCertTests::test_relevant_values_nondefault", "certbot/tests/storage_test.py::RenewableCertTests::test_relevant_values_plugins_none", "certbot/tests/storage_test.py::RenewableCertTests::test_relevant_values_server", "certbot/tests/storage_test.py::RenewableCertTests::test_relevant_values_str", "certbot/tests/storage_test.py::RenewableCertTests::test_renewal_bad_config", "certbot/tests/storage_test.py::RenewableCertTests::test_renewal_incomplete_config", "certbot/tests/storage_test.py::RenewableCertTests::test_renewal_newer_version", "certbot/tests/storage_test.py::RenewableCertTests::test_save_successor", "certbot/tests/storage_test.py::RenewableCertTests::test_should_autodeploy", "certbot/tests/storage_test.py::RenewableCertTests::test_should_autorenew", "certbot/tests/storage_test.py::RenewableCertTests::test_time_interval_judgments", "certbot/tests/storage_test.py::RenewableCertTests::test_update_all_links_to_full_failure", "certbot/tests/storage_test.py::RenewableCertTests::test_update_all_links_to_partial_failure", "certbot/tests/storage_test.py::RenewableCertTests::test_update_all_links_to_success", "certbot/tests/storage_test.py::RenewableCertTests::test_update_link_to", "certbot/tests/storage_test.py::RenewableCertTests::test_update_symlinks", "certbot/tests/storage_test.py::RenewableCertTests::test_version", "certbot/tests/storage_test.py::RenewableCertTests::test_write_renewal_config", "certbot/tests/storage_test.py::DeleteFilesTest::test_bad_renewal_config", "certbot/tests/storage_test.py::DeleteFilesTest::test_delete_all_files", "certbot/tests/storage_test.py::DeleteFilesTest::test_livedir_not_empty", "certbot/tests/storage_test.py::DeleteFilesTest::test_no_archive", "certbot/tests/storage_test.py::DeleteFilesTest::test_no_cert_file", "certbot/tests/storage_test.py::DeleteFilesTest::test_no_readme_file", "certbot/tests/storage_test.py::DeleteFilesTest::test_no_renewal_config", "certbot/tests/storage_test.py::CertPathForCertNameTest::test_no_such_cert_name", "certbot/tests/storage_test.py::CertPathForCertNameTest::test_simple_cert_name" ]
[]
Apache License 2.0
3,073
[ "certbot/storage.py", "acme/acme/messages.py", "CHANGELOG.md" ]
[ "certbot/storage.py", "acme/acme/messages.py", "CHANGELOG.md" ]
pydata__pydata-google-auth-4
3564ac45831ca576cdfeeac28687268f782e0b15
2018-09-14 22:28:33
3564ac45831ca576cdfeeac28687268f782e0b15
diff --git a/docs/source/api.rst b/docs/source/api.rst index f5bcf95..8d90617 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -1,19 +1,18 @@ -.. currentmodule:: pandas_gbq +.. currentmodule:: pydata_google_auth .. _api: ************* API Reference ************* -.. note:: - - Only functions and classes which are members of the ``pandas_gbq`` module - are considered public. Submodules and their members are considered private. - .. autosummary:: - read_gbq - to_gbq + default + get_user_credentials + cache -.. autofunction:: read_gbq -.. autofunction:: to_gbq +.. autofunction:: default +.. autofunction:: get_user_credentials +.. automodule:: pydata_google_auth.cache + :members: + :show-inheritance: diff --git a/pydata_google_auth/auth.py b/pydata_google_auth/auth.py index 47090e6..ae5197f 100644 --- a/pydata_google_auth/auth.py +++ b/pydata_google_auth/auth.py @@ -13,6 +13,7 @@ import oauthlib.oauth2.rfc6749.errors import google.auth.transport.requests from pydata_google_auth import exceptions +from pydata_google_auth import cache logger = logging.getLogger(__name__) @@ -21,17 +22,13 @@ CLIENT_ID = ( "262006177488-3425ks60hkk80fssi9vpohv88g6q1iqd" ".apps.googleusercontent.com" ) CLIENT_SECRET = "JSF-iczmzEgbTR-XK-2xaWAc" -CREDENTIALS_DIRNAME = "pydata" -CREDENTIALS_FILENAME = "pydata_google_credentials.json" def default( scopes, - client_id=CLIENT_ID, - client_secret=CLIENT_SECRET, - credentials_dirname=CREDENTIALS_DIRNAME, - credentials_filename=CREDENTIALS_FILENAME, - reauth=False, + client_id=None, + client_secret=None, + credentials_cache=cache.READ_WRITE, auth_local_webserver=False, ): # Try to retrieve Application Default Credentials @@ -44,9 +41,7 @@ def default( scopes, client_id=client_id, client_secret=client_secret, - credentials_dirname=credentials_dirname, - credentials_filename=credentials_filename, - reauth=reauth, + credentials_cache=credentials_cache, auth_local_webserver=auth_local_webserver, ) @@ -92,14 +87,13 @@ def get_application_default_credentials(scopes): def get_user_credentials( scopes, - client_id=CLIENT_ID, - client_secret=CLIENT_SECRET, - credentials_dirname=CREDENTIALS_DIRNAME, - credentials_filename=CREDENTIALS_FILENAME, - reauth=False, + client_id=None, + client_secret=None, + credentials_cache=cache.READ_WRITE, auth_local_webserver=False, ): - """Gets user account credentials. + """ + Gets user account credentials. This method authenticates using user credentials, either loading saved credentials from a file or by going through the OAuth flow. @@ -113,16 +107,16 @@ def get_user_credentials( GoogleCredentials : credentials Credentials for the user with BigQuery access. """ - # Use the default credentials location under ~/.config and the - # equivalent directory on windows if the user has not specified a - # credentials path. - credentials_path = get_default_credentials_path( - credentials_dirname, credentials_filename - ) + # Use None as default for client_id and client_secret so that the values + # aren't included in the docs. A string of bytes isn't useful for the + # documentation and might encourage the values to be used outside of this + # library. + if client_id is None: + client_id = CLIENT_ID + if client_secret is None: + client_secret = CLIENT_SECRET - credentials = None - if not reauth: - credentials = load_user_credentials_from_file(credentials_path) + credentials = credentials_cache.load() client_config = { "installed": { @@ -149,100 +143,10 @@ def get_user_credentials( "Unable to get valid credentials: {0}".format(exc) ) - save_user_account_credentials(credentials, credentials_path) + credentials_cache.save(credentials) if credentials and not credentials.valid: request = google.auth.transport.requests.Request() credentials.refresh(request) return credentials - - -def load_user_credentials_from_info(credentials_json): - return google.oauth2.credentials.Credentials( - token=credentials_json.get("access_token"), - refresh_token=credentials_json.get("refresh_token"), - id_token=credentials_json.get("id_token"), - token_uri=credentials_json.get("token_uri"), - client_id=credentials_json.get("client_id"), - client_secret=credentials_json.get("client_secret"), - scopes=credentials_json.get("scopes"), - ) - - -def load_user_credentials_from_file(credentials_path): - """ - Loads user account credentials from a local file. - - .. versionadded 0.2.0 - - Parameters - ---------- - None - - Returns - ------- - - GoogleCredentials, - If the credentials can loaded. The retrieved credentials should - also have access to the project (project_id) on BigQuery. - - OR None, - If credentials can not be loaded from a file. Or, the retrieved - credentials do not have access to the project (project_id) - on BigQuery. - """ - try: - with open(credentials_path) as credentials_file: - credentials_json = json.load(credentials_file) - except (IOError, ValueError) as exc: - logger.debug( - "Error loading credentials from {}: {}".format(credentials_path, str(exc)) - ) - return None - - return load_user_credentials_from_info(credentials_json) - - -def get_default_credentials_path(credentials_dirname, credentials_filename): - """ - Gets the default path to the Google user credentials - - .. versionadded 0.3.0 - - Returns - ------- - Path to the Google user credentials - """ - if os.name == "nt": - config_path = os.environ["APPDATA"] - else: - config_path = os.path.join(os.path.expanduser("~"), ".config") - - config_path = os.path.join(config_path, credentials_dirname) - - # Create a pydata directory in an application-specific hidden - # user folder on the operating system. - if not os.path.exists(config_path): - os.makedirs(config_path) - - return os.path.join(config_path, credentials_filename) - - -def save_user_account_credentials(credentials, credentials_path): - """ - Saves user account credentials to a local file. - - .. versionadded 0.2.0 - """ - try: - with open(credentials_path, "w") as credentials_file: - credentials_json = { - "refresh_token": credentials.refresh_token, - "id_token": credentials.id_token, - "token_uri": credentials.token_uri, - "client_id": credentials.client_id, - "client_secret": credentials.client_secret, - "scopes": credentials.scopes, - } - json.dump(credentials_json, credentials_file) - except IOError: - logger.warning("Unable to save credentials.") diff --git a/pydata_google_auth/cache.py b/pydata_google_auth/cache.py new file mode 100644 index 0000000..c755dd0 --- /dev/null +++ b/pydata_google_auth/cache.py @@ -0,0 +1,233 @@ +"""Caching implementations for reading and writing user credentials.""" + +import json +import logging +import os +import os.path + +import google.oauth2.credentials + + +logger = logging.getLogger(__name__) + + +_DIRNAME = "pydata" +_FILENAME = "pydata_google_credentials.json" + + +def _get_default_credentials_path(credentials_dirname, credentials_filename): + """ + Gets the default path to the Google user credentials + + Returns + ------- + str + Path to the Google user credentials + """ + if os.name == "nt": + config_path = os.environ["APPDATA"] + else: + config_path = os.path.join(os.path.expanduser("~"), ".config") + + config_path = os.path.join(config_path, credentials_dirname) + + # Create a pydata directory in an application-specific hidden + # user folder on the operating system. + if not os.path.exists(config_path): + os.makedirs(config_path) + + return os.path.join(config_path, credentials_filename) + + +def _load_user_credentials_from_info(credentials_json): + return google.oauth2.credentials.Credentials( + token=credentials_json.get("access_token"), + refresh_token=credentials_json.get("refresh_token"), + id_token=credentials_json.get("id_token"), + token_uri=credentials_json.get("token_uri"), + client_id=credentials_json.get("client_id"), + client_secret=credentials_json.get("client_secret"), + scopes=credentials_json.get("scopes"), + ) + + +def _load_user_credentials_from_file(credentials_path): + """ + Loads user account credentials from a local file. + + Parameters + ---------- + None + + Returns + ------- + - GoogleCredentials, + If the credentials can loaded. The retrieved credentials should + also have access to the project (project_id) on BigQuery. + - OR None, + If credentials can not be loaded from a file. Or, the retrieved + credentials do not have access to the project (project_id) + on BigQuery. + """ + try: + with open(credentials_path) as credentials_file: + credentials_json = json.load(credentials_file) + except (IOError, ValueError) as exc: + logger.debug( + "Error loading credentials from {}: {}".format(credentials_path, str(exc)) + ) + return None + + return _load_user_credentials_from_info(credentials_json) + + +def _save_user_account_credentials(credentials, credentials_path): + """ + Saves user account credentials to a local file. + """ + try: + with open(credentials_path, "w") as credentials_file: + credentials_json = { + "refresh_token": credentials.refresh_token, + "id_token": credentials.id_token, + "token_uri": credentials.token_uri, + "client_id": credentials.client_id, + "client_secret": credentials.client_secret, + "scopes": credentials.scopes, + } + json.dump(credentials_json, credentials_file) + except IOError: + logger.warning("Unable to save credentials.") + + +class CredentialsCache(object): + """ + Shared base class for crentials classes. + + This class also functions as a noop implementation of a credentials class. + """ + + def load(self): + """ + Load credentials from disk. + + Does nothing in this base class. + + Returns + ------- + google.oauth2.credentials.Credentials, optional + Returns user account credentials loaded from disk or ``None`` if no + credentials could be found. + """ + pass + + def save(self, credentials): + """ + Write credentials to disk. + + Does nothing in this base class. + + Parameters + ---------- + credentials : google.oauth2.credentials.Credentials + User credentials to save to disk. + """ + pass + + +class ReadWriteCredentialsCache(CredentialsCache): + """ + A :class:`~pydata_google_auth.cache.CredentialsCache` which writes to + disk and reads cached credentials from disk. + + Parameters + ---------- + dirname : str, optional + Name of directory to write credentials to. This directory is created + within the ``.config`` subdirectory of the ``HOME`` (``APPDATA`` on + Windows) directory. + filename : str, optional + Name of the credentials file within the credentials directory. + """ + + def __init__(self, dirname=_DIRNAME, filename=_FILENAME): + super(ReadWriteCredentialsCache, self).__init__() + self._path = _get_default_credentials_path(_DIRNAME, _FILENAME) + + def load(self): + """ + Load credentials from disk. + + Returns + ------- + google.oauth2.credentials.Credentials, optional + Returns user account credentials loaded from disk or ``None`` if no + credentials could be found. + """ + return _load_user_credentials_from_file(self._path) + + def save(self, credentials): + """ + Write credentials to disk. + + Parameters + ---------- + credentials : google.oauth2.credentials.Credentials + User credentials to save to disk. + """ + _save_user_account_credentials(credentials, self._path) + + +class WriteOnlyCredentialsCache(CredentialsCache): + """ + A :class:`~pydata_google_auth.cache.CredentialsCache` which writes to + disk, but doesn't read from disk. + + Use this class to reauthorize against Google APIs and cache your + credentials for later. + + Parameters + ---------- + dirname : str, optional + Name of directory to write credentials to. This directory is created + within the ``.config`` subdirectory of the ``HOME`` (``APPDATA`` on + Windows) directory. + filename : str, optional + Name of the credentials file within the credentials directory. + """ + + def __init__(self, dirname=_DIRNAME, filename=_FILENAME): + super(WriteOnlyCredentialsCache, self).__init__() + self._path = _get_default_credentials_path(_DIRNAME, _FILENAME) + + def save(self, credentials): + """ + Write credentials to disk. + + Parameters + ---------- + credentials : google.oauth2.credentials.Credentials + User credentials to save to disk. + """ + _save_user_account_credentials(credentials, self._path) + + +NOOP = CredentialsCache() +""" +Noop impmentation of credentials cache. + +This cache always reauthorizes and never save credentials to disk. +Recommended for shared machines. +""" + +READ_WRITE = ReadWriteCredentialsCache() +""" +Write credentials to disk and read cached credentials from disk. +""" + +REAUTH = WriteOnlyCredentialsCache() +""" +Write credentials to disk. Never read cached credentials from disk. + +Use this to reauthenticate and refresh the cached credentials. +"""
Implement CredentialsCache base classes From: https://github.com/pydata/pandas-gbq/issues/161 The credentials caching behavior should be configurable via `CredentialsCache`,`WriteOnlyCredentialsCache`, and `NoopCredentialsCache`. This replaces the `reauth` argument and `PANDAS_GBQ_CREDENTIALS_FILE` environment variables.
pydata/pydata-google-auth
diff --git a/tests/system/test_auth.py b/tests/system/test_auth.py index 52409cb..9c182bc 100644 --- a/tests/system/test_auth.py +++ b/tests/system/test_auth.py @@ -47,17 +47,29 @@ def test_get_user_credentials_gets_valid_credentials(): assert credentials.has_scopes(TEST_SCOPES) -def test_get_user_credentials_from_file_gets_valid_credentials(): +def test_get_user_credentials_noop_gets_valid_credentials(): import pydata_google_auth - import pydata_google_auth.auth + import pydata_google_auth.cache - # Mock load_user_credentials_from_file to fail, forcing fresh credentials. - with mock.patch( - "pydata_google_auth.auth.load_user_credentials_from_file", return_value=None - ): - credentials = pydata_google_auth.get_user_credentials( - TEST_SCOPES, auth_local_webserver=True - ) + credentials = pydata_google_auth.get_user_credentials( + TEST_SCOPES, + credentials_cache=pydata_google_auth.cache.NOOP, + auth_local_webserver=True, + ) + + assert credentials.valid + assert credentials.has_scopes(TEST_SCOPES) + + +def test_get_user_credentials_reauth_gets_valid_credentials(): + import pydata_google_auth + import pydata_google_auth.cache + + credentials = pydata_google_auth.get_user_credentials( + TEST_SCOPES, + credentials_cache=pydata_google_auth.cache.REAUTH, + auth_local_webserver=True, + ) assert credentials.valid assert credentials.has_scopes(TEST_SCOPES) diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index 2c9ed11..89b882a 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -10,6 +10,7 @@ except ImportError: # pragma: NO COVER import google.auth import google.auth.credentials +import google.oauth2.credentials TEST_SCOPES = ["https://www.googleapis.com/auth/cloud-platform"] @@ -33,18 +34,17 @@ def test_default_returns_google_auth_credentials(monkeypatch): def test_default_loads_user_credentials(monkeypatch): from pydata_google_auth import auth + from pydata_google_auth import cache def mock_default_credentials(scopes=None, request=None): return (None, None) monkeypatch.setattr(google.auth, "default", mock_default_credentials) - mock_user_credentials = mock.create_autospec(google.auth.credentials.Credentials) - def mock_load_credentials(project_id=None, credentials_path=None): - return mock_user_credentials + mock_cache = mock.create_autospec(cache.CredentialsCache) + mock_user_credentials = mock.create_autospec(google.oauth2.credentials.Credentials) + mock_cache.load.return_value = mock_user_credentials - monkeypatch.setattr(auth, "load_user_credentials_from_file", mock_load_credentials) - - credentials, project = auth.default(TEST_SCOPES) + credentials, project = auth.default(TEST_SCOPES, credentials_cache=mock_cache) assert project is None assert credentials is mock_user_credentials
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 2 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==5.5.2 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 exceptiongroup==1.2.2 execnet==2.1.1 google-api-core==2.24.2 google-auth==2.38.0 google-auth-oauthlib==1.2.1 google-cloud-bigquery==3.31.0 google-cloud-core==2.4.3 google-crc32c==1.7.1 google-resumable-media==2.7.2 googleapis-common-protos==1.69.2 grpcio==1.71.0 grpcio-status==1.71.0 idna==3.10 iniconfig==2.1.0 numpy==2.0.2 oauthlib==3.2.2 packaging==24.2 pandas==2.2.3 pluggy==1.5.0 proto-plus==1.26.1 protobuf==5.29.4 pyasn1==0.6.1 pyasn1_modules==0.4.2 -e git+https://github.com/pydata/pydata-google-auth.git@3564ac45831ca576cdfeeac28687268f782e0b15#egg=pydata_google_auth pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.32.3 requests-oauthlib==2.0.0 rsa==4.9 six==1.17.0 tomli==2.2.1 tqdm==4.67.1 typing_extensions==4.13.0 tzdata==2025.2 urllib3==2.3.0
name: pydata-google-auth channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==5.5.2 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - exceptiongroup==1.2.2 - execnet==2.1.1 - google-api-core==2.24.2 - google-auth==2.38.0 - google-auth-oauthlib==1.2.1 - google-cloud-bigquery==3.31.0 - google-cloud-core==2.4.3 - google-crc32c==1.7.1 - google-resumable-media==2.7.2 - googleapis-common-protos==1.69.2 - grpcio==1.71.0 - grpcio-status==1.71.0 - idna==3.10 - iniconfig==2.1.0 - numpy==2.0.2 - oauthlib==3.2.2 - packaging==24.2 - pandas==2.2.3 - pluggy==1.5.0 - proto-plus==1.26.1 - protobuf==5.29.4 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.32.3 - requests-oauthlib==2.0.0 - rsa==4.9 - six==1.17.0 - tomli==2.2.1 - tqdm==4.67.1 - typing-extensions==4.13.0 - tzdata==2025.2 - urllib3==2.3.0 prefix: /opt/conda/envs/pydata-google-auth
[ "tests/unit/test_auth.py::test_default_loads_user_credentials" ]
[ "tests/system/test_auth.py::test_default_gets_valid_credentials", "tests/system/test_auth.py::test_default_gets_user_credentials", "tests/system/test_auth.py::test_get_user_credentials_gets_valid_credentials", "tests/system/test_auth.py::test_get_user_credentials_noop_gets_valid_credentials", "tests/system/test_auth.py::test_get_user_credentials_reauth_gets_valid_credentials" ]
[ "tests/unit/test_auth.py::test_default_returns_google_auth_credentials" ]
[]
BSD 3-Clause "New" or "Revised" License
3,074
[ "pydata_google_auth/auth.py", "docs/source/api.rst", "pydata_google_auth/cache.py" ]
[ "pydata_google_auth/auth.py", "docs/source/api.rst", "pydata_google_auth/cache.py" ]
tox-dev__tox-983
c8e07a4cc86814d72c425633a3fa73c05f902fc9
2018-09-15 06:04:39
cf6afcecaca22df7b509facaea43c09a15570f75
diff --git a/changelog/908.bugfix.rst b/changelog/908.bugfix.rst new file mode 100644 index 00000000..d559d120 --- /dev/null +++ b/changelog/908.bugfix.rst @@ -0,0 +1,1 @@ +instead of assuming the Python version from the base python name ask the interpreter to reveal the version for the ``ignore_basepython_conflict`` flag - by :user:`gaborbernat` diff --git a/doc/config.rst b/doc/config.rst index cb47a68f..d1f99e4b 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -80,13 +80,19 @@ and will first lookup global tox settings in this section: .. versionadded:: 3.1.0 - If ``True``, :confval:`basepython` settings that conflict with the Python - variant for environments using default factors, such as ``py27`` or - ``py35``, will be ignored. This allows you to configure - :confval:`basepython` in the global testenv without affecting these - factors. If ``False``, the default, a warning will be emitted if a conflict - is identified. In a future version of tox, this warning will become an - error. + tox allows setting the python version for an environment via the :confval:`basepython` + setting. If that's not set tox can set a default value from the environment name ( + e.g. ``py37`` implies Python 3.7). Matching up the python version with the environment + name has became expected at this point, leading to surprises when some configs don't + do so. To help with sanity of users a warning will be emitted whenever the environment + name version does not matches up with this expectation. In a future version of tox, + this warning will become an error. + + Furthermore, we allow hard enforcing this rule (and bypassing the warning) by setting + this flag to ``True``. In such cases we ignore the :confval:`basepython` and instead + always use the base python implied from the Python name. This allows you to + configure :confval:`basepython` in the global testenv without affecting environments + that have implied base python versions. .. confval:: requires=LIST diff --git a/src/tox/config.py b/src/tox/config.py index 12ac98c5..79862a93 100755 --- a/src/tox/config.py +++ b/src/tox/config.py @@ -21,7 +21,7 @@ import toml import tox from tox.constants import INFO -from tox.interpreters import Interpreters +from tox.interpreters import Interpreters, NoInterpreterInfo hookimpl = tox.hookimpl """DEPRECATED - REMOVE - this is left for compatibility with plugins importing this from here. @@ -532,35 +532,40 @@ def tox_addoption(parser): ) def basepython_default(testenv_config, value): - """Configure a sane interpreter for the environment. + """either user set or proposed from the factor name - If the environment contains a default factor, this will always be the - interpreter associated with that factor overriding anything manually - set. + in both cases we check that the factor name implied python version and the resolved + python interpreter version match up; if they don't we warn, unless ignore base + python conflict is set in which case the factor name implied version if forced """ for factor in testenv_config.factors: - match = tox.PYTHON.PY_FACTORS_RE.match(factor) - if match: - base_exe = tox.PYTHON.PY_FACTORS_MAP[match.group(1)] - version = ".".join(match.group(2) or "") - default = "{}{}".format(base_exe, version) - - if value is None or testenv_config.config.ignore_basepython_conflict: - return default - - if str(value) != default: + if factor in tox.PYTHON.DEFAULT_FACTORS: + implied_python = tox.PYTHON.DEFAULT_FACTORS[factor] + break + else: + implied_python, factor = None, None + + if testenv_config.config.ignore_basepython_conflict and implied_python is not None: + return implied_python + + proposed_python = (implied_python or sys.executable) if value is None else str(value) + if implied_python is not None and implied_python != proposed_python: + testenv_config.basepython = proposed_python + implied_version = tox.PYTHON.PY_FACTORS_RE.match(factor).group(2) + python_info_for_proposed = testenv_config.python_info + if not isinstance(python_info_for_proposed, NoInterpreterInfo): + proposed_version = "".join( + str(i) for i in python_info_for_proposed.version_info[0:2] + ) + if implied_version != proposed_version: # TODO(stephenfin): Raise an exception here in tox 4.0 warnings.warn( - "Conflicting basepython for environment '{}'; resolve conflict " - "or configure ignore_basepython_conflict".format( - testenv_config.envname, str(value), default + "conflicting basepython version (set {}, should be {}) for env '{}';" + "resolve conflict or set ignore_basepython_conflict".format( + proposed_version, implied_version, testenv_config.envname ) ) - - if value is None: - return sys.executable - - return str(value) + return proposed_python parser.add_testenv_attribute( name="basepython", diff --git a/tox.ini b/tox.ini index 98eeaa7b..74786338 100644 --- a/tox.ini +++ b/tox.ini @@ -99,7 +99,7 @@ commands = echo {posargs} [flake8] max-complexity = 22 max-line-length = 99 -ignore = E203, W503 +ignore = E203, W503, C901 [coverage:run] branch = true
avoid spurious warning about basepython setting for default factors Specifying a ``basepython`` setting for any environment including a ``pyXY`` factor (including the ``pyXY`` environments themselves) can trigger a spurious warning about the configured interpreter, even if the interpreter does provide the expected version. The warning should only be generated if the specified interpreter does not conform to the expected version. Checking the return code from a command like this should do the trick: ~~~~~~~~ {envdir}/bin/python -c 'import sys; sys.exit(sys.version_info[:2] != (X, Y))' ~~~~~~~~ A return code of 0 indicates the interpreter specified conforms to the version indicated by the ``pyXY`` factor; 1 indicates a different version is provided, and anything else would be a real shock. The the return code is 0, the indicated interpreter should be used regardless of the setting of the ``ignore_basepython_conflict`` setting, and no warning should be generated. Previous discussion from the mailing list: https://mail.python.org/mm3/archives/list/[email protected]/thread/CY2PB35PDX4IZAT73FRDCLSGQ4L2CPR4/
tox-dev/tox
diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 3b53bad2..78afe57e 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -1645,7 +1645,7 @@ class TestConfigTestEnv: assert config.basepython == "python{}.{}".format(name[2], name[3]) def test_default_factors_conflict(self, newconfig, capsys): - with pytest.warns(UserWarning, match=r"Conflicting basepython .*"): + with pytest.warns(UserWarning, match=r"conflicting basepython .*"): config = newconfig( """ [testenv] @@ -1658,6 +1658,31 @@ class TestConfigTestEnv: envconfig = config.envconfigs["py27"] assert envconfig.basepython == "python3" + def test_default_factors_conflict_lying_name( + self, newconfig, capsys, tmpdir, recwarn, monkeypatch + ): + # we first need to create a lying Python here, let's mock out here + from tox.interpreters import Interpreters + + def get_executable(self, envconfig): + return sys.executable + + monkeypatch.setattr(Interpreters, "get_executable", get_executable) + + major, minor = sys.version_info[0:2] + config = newconfig( + """ + [testenv:py{0}{1}] + basepython=python{0}.{2} + commands = python --version + """.format( + major, minor, minor - 1 + ) + ) + env_config = config.envconfigs["py{}{}".format(major, minor)] + assert env_config.basepython == "python{}.{}".format(major, minor - 1) + assert not recwarn.list, "\n".join(repr(i.message) for i in recwarn.list) + def test_default_factors_conflict_ignore(self, newconfig, capsys): with pytest.warns(None) as record: config = newconfig(
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 3 }
3.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 distlib==0.3.9 execnet==2.0.2 filelock==3.12.2 flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata==6.7.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work packaging @ file:///croot/packaging_1671697413597/work platformdirs==4.0.0 pluggy==0.13.1 py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 six==1.17.0 toml==0.10.2 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work -e git+https://github.com/tox-dev/tox.git@c8e07a4cc86814d72c425633a3fa73c05f902fc9#egg=tox typing_extensions==4.7.1 virtualenv==20.26.6 zipp @ file:///croot/zipp_1672387121353/work
name: tox channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.2.7 - distlib==0.3.9 - execnet==2.0.2 - filelock==3.12.2 - importlib-metadata==6.7.0 - platformdirs==4.0.0 - pluggy==0.13.1 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - six==1.17.0 - toml==0.10.2 - tox==3.3.1.dev16+gc8e07a4c - typing-extensions==4.7.1 - virtualenv==20.26.6 prefix: /opt/conda/envs/tox
[ "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors_conflict", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors_conflict_lying_name" ]
[ "tests/unit/test_config.py::TestVenvConfig::test_force_dep_with_url" ]
[ "tests/unit/test_config.py::TestVenvConfig::test_config_parsing_minimal", "tests/unit/test_config.py::TestVenvConfig::test_config_parsing_multienv", "tests/unit/test_config.py::TestVenvConfig::test_envdir_set_manually", "tests/unit/test_config.py::TestVenvConfig::test_envdir_set_manually_with_substitutions", "tests/unit/test_config.py::TestVenvConfig::test_force_dep_version", "tests/unit/test_config.py::TestVenvConfig::test_process_deps", "tests/unit/test_config.py::TestVenvConfig::test_is_same_dep", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_rex", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[win]", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[lin]", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[osx]", "tests/unit/test_config.py::TestConfigPackage::test_defaults", "tests/unit/test_config.py::TestConfigPackage::test_defaults_distshare", "tests/unit/test_config.py::TestConfigPackage::test_defaults_changed_dir", "tests/unit/test_config.py::TestConfigPackage::test_project_paths", "tests/unit/test_config.py::TestParseconfig::test_search_parents", "tests/unit/test_config.py::TestParseconfig::test_explicit_config_path", "tests/unit/test_config.py::test_get_homedir", "tests/unit/test_config.py::TestGetcontextname::test_blank", "tests/unit/test_config.py::TestGetcontextname::test_jenkins", "tests/unit/test_config.py::TestGetcontextname::test_hudson_legacy", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_multiline", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_posargs", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_section_and_posargs_substitution", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_global", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_regression_issue595", "tests/unit/test_config.py::TestIniParser::test_getstring_single", "tests/unit/test_config.py::TestIniParser::test_missing_substitution", "tests/unit/test_config.py::TestIniParser::test_getstring_fallback_sections", "tests/unit/test_config.py::TestIniParser::test_getstring_substitution", "tests/unit/test_config.py::TestIniParser::test_getlist", "tests/unit/test_config.py::TestIniParser::test_getdict", "tests/unit/test_config.py::TestIniParser::test_normal_env_sub_works", "tests/unit/test_config.py::TestIniParser::test_missing_env_sub_raises_config_error_in_non_testenv", "tests/unit/test_config.py::TestIniParser::test_missing_env_sub_populates_missing_subs", "tests/unit/test_config.py::TestIniParser::test_getstring_environment_substitution_with_default", "tests/unit/test_config.py::TestIniParser::test_value_matches_section_substitution", "tests/unit/test_config.py::TestIniParser::test_value_doesn_match_section_substitution", "tests/unit/test_config.py::TestIniParser::test_getstring_other_section_substitution", "tests/unit/test_config.py::TestIniParser::test_argvlist", "tests/unit/test_config.py::TestIniParser::test_argvlist_windows_escaping", "tests/unit/test_config.py::TestIniParser::test_argvlist_multiline", "tests/unit/test_config.py::TestIniParser::test_argvlist_quoting_in_command", "tests/unit/test_config.py::TestIniParser::test_argvlist_comment_after_command", "tests/unit/test_config.py::TestIniParser::test_argvlist_command_contains_hash", "tests/unit/test_config.py::TestIniParser::test_argvlist_positional_substitution", "tests/unit/test_config.py::TestIniParser::test_argvlist_quoted_posargs", "tests/unit/test_config.py::TestIniParser::test_argvlist_posargs_with_quotes", "tests/unit/test_config.py::TestIniParser::test_positional_arguments_are_only_replaced_when_standing_alone", "tests/unit/test_config.py::TestIniParser::test_posargs_are_added_escaped_issue310", "tests/unit/test_config.py::TestIniParser::test_substitution_with_multiple_words", "tests/unit/test_config.py::TestIniParser::test_getargv", "tests/unit/test_config.py::TestIniParser::test_getpath", "tests/unit/test_config.py::TestIniParser::test_getbool", "tests/unit/test_config.py::TestIniParserPrefix::test_basic_section_access", "tests/unit/test_config.py::TestIniParserPrefix::test_fallback_sections", "tests/unit/test_config.py::TestIniParserPrefix::test_value_matches_prefixed_section_substitution", "tests/unit/test_config.py::TestIniParserPrefix::test_value_doesn_match_prefixed_section_substitution", "tests/unit/test_config.py::TestIniParserPrefix::test_other_section_substitution", "tests/unit/test_config.py::TestConfigTestEnv::test_commentchars_issue33", "tests/unit/test_config.py::TestConfigTestEnv::test_defaults", "tests/unit/test_config.py::TestConfigTestEnv::test_sitepackages_switch", "tests/unit/test_config.py::TestConfigTestEnv::test_installpkg_tops_develop", "tests/unit/test_config.py::TestConfigTestEnv::test_specific_command_overrides", "tests/unit/test_config.py::TestConfigTestEnv::test_whitelist_externals", "tests/unit/test_config.py::TestConfigTestEnv::test_changedir", "tests/unit/test_config.py::TestConfigTestEnv::test_ignore_errors", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir_jython[jython]", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy]", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy3]", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[win32]", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[linux2]", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[win32]", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[linux2]", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_with_factor", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_from_global_env", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_glob_from_global_env", "tests/unit/test_config.py::TestConfigTestEnv::test_changedir_override", "tests/unit/test_config.py::TestConfigTestEnv::test_install_command_setting", "tests/unit/test_config.py::TestConfigTestEnv::test_install_command_must_contain_packages", "tests/unit/test_config.py::TestConfigTestEnv::test_install_command_substitutions", "tests/unit/test_config.py::TestConfigTestEnv::test_pip_pre", "tests/unit/test_config.py::TestConfigTestEnv::test_pip_pre_cmdline_override", "tests/unit/test_config.py::TestConfigTestEnv::test_simple", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_error", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_defaults", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue246", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue515", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_nested_env_defaults", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_positional", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_noargs_issue240", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_double", "tests/unit/test_config.py::TestConfigTestEnv::test_posargs_backslashed_or_quoted", "tests/unit/test_config.py::TestConfigTestEnv::test_rewrite_posargs", "tests/unit/test_config.py::TestConfigTestEnv::test_rewrite_simple_posargs", "tests/unit/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist0-deps0]", "tests/unit/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist1-deps1]", "tests/unit/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_section", "tests/unit/test_config.py::TestConfigTestEnv::test_multilevel_substitution", "tests/unit/test_config.py::TestConfigTestEnv::test_recursive_substitution_cycle_fails", "tests/unit/test_config.py::TestConfigTestEnv::test_single_value_from_other_secton", "tests/unit/test_config.py::TestConfigTestEnv::test_factors", "tests/unit/test_config.py::TestConfigTestEnv::test_factor_ops", "tests/unit/test_config.py::TestConfigTestEnv::test_envconfigs_based_on_factors", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors_conflict_ignore", "tests/unit/test_config.py::TestConfigTestEnv::test_factors_in_boolean", "tests/unit/test_config.py::TestConfigTestEnv::test_factors_in_setenv", "tests/unit/test_config.py::TestConfigTestEnv::test_factor_use_not_checked", "tests/unit/test_config.py::TestConfigTestEnv::test_factors_groups_touch", "tests/unit/test_config.py::TestConfigTestEnv::test_period_in_factor", "tests/unit/test_config.py::TestConfigTestEnv::test_ignore_outcome", "tests/unit/test_config.py::TestGlobalOptions::test_notest", "tests/unit/test_config.py::TestGlobalOptions::test_verbosity", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args0-0]", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args1-1]", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args2-2]", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args3-3]", "tests/unit/test_config.py::TestGlobalOptions::test_substitution_jenkins_default", "tests/unit/test_config.py::TestGlobalOptions::test_substitution_jenkins_context", "tests/unit/test_config.py::TestGlobalOptions::test_sdist_specification", "tests/unit/test_config.py::TestGlobalOptions::test_env_selection", "tests/unit/test_config.py::TestGlobalOptions::test_py_venv", "tests/unit/test_config.py::TestGlobalOptions::test_correct_basepython_chosen_from_default_factors", "tests/unit/test_config.py::TestGlobalOptions::test_envlist_expansion", "tests/unit/test_config.py::TestGlobalOptions::test_envlist_cross_product", "tests/unit/test_config.py::TestGlobalOptions::test_envlist_multiline", "tests/unit/test_config.py::TestGlobalOptions::test_minversion", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_true", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_false", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_no_arg", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_not_specified", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_overrides_true", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_overrides_false", "tests/unit/test_config.py::TestGlobalOptions::test_defaultenv_commandline", "tests/unit/test_config.py::TestGlobalOptions::test_defaultenv_partial_override", "tests/unit/test_config.py::TestHashseedOption::test_default", "tests/unit/test_config.py::TestHashseedOption::test_passing_integer", "tests/unit/test_config.py::TestHashseedOption::test_passing_string", "tests/unit/test_config.py::TestHashseedOption::test_passing_empty_string", "tests/unit/test_config.py::TestHashseedOption::test_passing_no_argument", "tests/unit/test_config.py::TestHashseedOption::test_setenv", "tests/unit/test_config.py::TestHashseedOption::test_noset", "tests/unit/test_config.py::TestHashseedOption::test_noset_with_setenv", "tests/unit/test_config.py::TestHashseedOption::test_one_random_hashseed", "tests/unit/test_config.py::TestHashseedOption::test_setenv_in_one_testenv", "tests/unit/test_config.py::TestSetenv::test_getdict_lazy", "tests/unit/test_config.py::TestSetenv::test_getdict_lazy_update", "tests/unit/test_config.py::TestSetenv::test_setenv_uses_os_environ", "tests/unit/test_config.py::TestSetenv::test_setenv_default_os_environ", "tests/unit/test_config.py::TestSetenv::test_setenv_uses_other_setenv", "tests/unit/test_config.py::TestSetenv::test_setenv_recursive_direct", "tests/unit/test_config.py::TestSetenv::test_setenv_overrides", "tests/unit/test_config.py::TestSetenv::test_setenv_with_envdir_and_basepython", "tests/unit/test_config.py::TestSetenv::test_setenv_ordering_1", "tests/unit/test_config.py::TestSetenv::test_setenv_cross_section_subst_issue294", "tests/unit/test_config.py::TestSetenv::test_setenv_cross_section_subst_twice", "tests/unit/test_config.py::TestSetenv::test_setenv_cross_section_mixed", "tests/unit/test_config.py::TestIndexServer::test_indexserver", "tests/unit/test_config.py::TestIndexServer::test_parse_indexserver", "tests/unit/test_config.py::TestIndexServer::test_multiple_homedir_relative_local_indexservers", "tests/unit/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[:]", "tests/unit/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[;]", "tests/unit/test_config.py::TestConfigConstSubstitutions::test_pathsep_regex", "tests/unit/test_config.py::TestParseEnv::test_parse_recreate", "tests/unit/test_config.py::TestCmdInvocation::test_help", "tests/unit/test_config.py::TestCmdInvocation::test_version_simple", "tests/unit/test_config.py::TestCmdInvocation::test_version_no_plugins", "tests/unit/test_config.py::TestCmdInvocation::test_version_with_normal_plugin", "tests/unit/test_config.py::TestCmdInvocation::test_version_with_fileless_module", "tests/unit/test_config.py::TestCmdInvocation::test_config_specific_ini", "tests/unit/test_config.py::TestCmdInvocation::test_no_tox_ini", "tests/unit/test_config.py::TestCmdInvocation::test_override_workdir", "tests/unit/test_config.py::TestCmdInvocation::test_showconfig_with_force_dep_version", "tests/unit/test_config.py::test_env_spec[-e", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_word", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_posargs", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_multiple_words", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_substitution_with_spaces", "tests/unit/test_config.py::TestCommandParser::test_command_parser_with_complex_word_set", "tests/unit/test_config.py::TestCommandParser::test_command_with_runs_of_whitespace", "tests/unit/test_config.py::TestCommandParser::test_command_with_split_line_in_subst_arguments", "tests/unit/test_config.py::TestCommandParser::test_command_parsing_for_issue_10", "tests/unit/test_config.py::TestCommandParser::test_commands_with_backslash", "tests/unit/test_config.py::test_plugin_require", "tests/unit/test_config.py::test_isolated_build_env_cannot_be_in_envlist", "tests/unit/test_config.py::test_config_via_pyproject_legacy", "tests/unit/test_config.py::test_config_bad_pyproject_specified", "tests/unit/test_config.py::test_config_bad_config_type_specified" ]
[]
MIT License
3,075
[ "doc/config.rst", "tox.ini", "changelog/908.bugfix.rst", "src/tox/config.py" ]
[ "doc/config.rst", "tox.ini", "changelog/908.bugfix.rst", "src/tox/config.py" ]
CORE-GATECH-GROUP__serpent-tools-243
67846baca60f959ac92bdf208e6f1c6744688890
2018-09-15 13:51:41
13961f3712a08e069c1ac96aaecf07ea7e7e5524
diff --git a/serpentTools/parsers/results.py b/serpentTools/parsers/results.py index d46a7fd..095cb6c 100644 --- a/serpentTools/parsers/results.py +++ b/serpentTools/parsers/results.py @@ -183,9 +183,10 @@ class ResultsReader(XSReader): """Process universes' data""" brState = self._getBUstate() # obtain the branching tuple values = str2vec(varVals) # convert the string to float numbers - if not self.universes or brState not in self.universes.keys(): + if brState not in self.universes: self.universes[brState] = \ HomogUniv(brState[0], brState[1], brState[2], brState[3]) + if varNameSer == self._keysVersion['univ']: return if varNameSer not in self._keysVersion['varsUnc']: vals, uncs = splitValsUncs(values)
[BUG] ResultsReader fails when only group constants are requested ## Summary of issue When restricting the scope of the variables extracted from the results file to only group constants, the reader fails. It is also worth noting that, when non-group constant data are requested as well, 'ABS_KEFF' and 'INF_FLX', for example, the reader will not fail, but the universes produced will not have group constant data ## Code for reproducing the issue ``` import serpentTools serpentTools.settings.rc['xs.variableExtras'] = ['INF_FLX', ] serpentTools.readDataFile('pwr_res.m') ``` Alternative case ``` import serpentTools serpentTools.settings.rc['xs.variableExtras'] = ['INF_FLX', 'ABS_KEFF'] r = serpentTools.readDataFile('pwr_res.m') u = r.getUniv('0', index=1) u.infExp # returns {} ``` ## Actual outcome including console output and error traceback if applicable ``` Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/ajohnson400/.local/lib/python3.6/site-packages/serpentTools-0.5.4-py3.6.egg/serpentTools/data/__init__.py", line 70, in readDataFile return read(filePath, **kwargs) File "/home/ajohnson400/.local/lib/python3.6/site-packages/serpentTools-0.5.4-py3.6.egg/serpentTools/parsers/__init__.py", line 149, in read returnedFromLoader.read() File "/home/ajohnson400/.local/lib/python3.6/site-packages/serpentTools-0.5.4-py3.6.egg/serpentTools/parsers/base.py", line 49, in read self._postcheck() File "/home/ajohnson400/.local/lib/python3.6/site-packages/serpentTools-0.5.4-py3.6.egg/serpentTools/parsers/results.py", line 345, in _postcheck self._inspectData() File "/home/ajohnson400/.local/lib/python3.6/site-packages/serpentTools-0.5.4-py3.6.egg/serpentTools/parsers/results.py", line 342, in _inspectData .format(self.filePath)) serpentTools.messages.SerpentToolsException: metadata, resdata and universes are all empty from /home/ajohnson400/.local/lib/python3.6/site-packages/serpentTools-0.5.4-py3.6.egg/serpentTools/data/pwr_res.m and <results.expectGcu> is True ``` ## Expected outcome The parser should respect the settings passed, and properly store all the requested data on the homogenized universes ## Versions * Version from ``serpentTools.__version__`` `0.5.4` * Python version - ``python --version`` `3.6`
CORE-GATECH-GROUP/serpent-tools
diff --git a/serpentTools/tests/test_ResultsReader.py b/serpentTools/tests/test_ResultsReader.py index 03e9d28..1384c4d 100644 --- a/serpentTools/tests/test_ResultsReader.py +++ b/serpentTools/tests/test_ResultsReader.py @@ -8,7 +8,7 @@ from numpy.testing import assert_equal from six import iteritems from serpentTools.settings import rc -from serpentTools.data import getFile +from serpentTools.data import getFile, readDataFile from serpentTools.parsers import ResultsReader from serpentTools.messages import SerpentToolsException @@ -597,6 +597,40 @@ class TestResultsNoBurnNoGcu(TestFilterResultsNoBurnup): self.reader.read() +class RestrictedResultsReader(unittest.TestCase): + """Class that restricts the variables read from the results file""" + + expectedInfFlux_bu0 = TestReadAllResults.expectedInfVals + expectedAbsKeff = TestReadAllResults.expectedKeff + dataFile = "pwr_res.m" + + def _testUnivFlux(self, reader): + univ = reader.getUniv('0', index=1) + assert_equal(self.expectedInfFlux_bu0, univ.get("infFlx")) + + def test_justFlux(self): + """Restrict the variables to gcu inf flux and verify their values""" + with rc: + rc['xs.variableExtras'] = ["INF_FLX", ] + r = readDataFile(self.dataFile) + self._testUnivFlux(r) + + def test_xsGroups(self): + """Restrict the variables groups to gc-meta to obtain flux and test.""" + with rc: + rc['xs.variableGroups'] = ['gc-meta', ] + r = readDataFile(self.dataFile) + self._testUnivFlux(r) + + def test_fluxAndKeff(self): + """Restrict to two unique parameters and verify their contents.""" + with rc: + rc['xs.variableExtras'] = ['ABS_KEFF', 'INF_FLX'] + r = readDataFile(self.dataFile) + self._testUnivFlux(r) + assert_equal(self.expectedAbsKeff, r.resdata['absKeff']) + + del TesterCommonResultsReader if __name__ == '__main__':
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "flake8>=3.1.0", "pandas>=0.21.0", "jupyter>=1.0", "coverage==4.5.1" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==3.6.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 async-generator==1.10 attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work Babel==2.11.0 backcall==0.2.0 bleach==4.1.0 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 comm==0.1.4 contextvars==2.4 coverage==4.5.1 cycler==0.11.0 dataclasses==0.8 decorator==5.1.1 defusedxml==0.7.1 entrypoints==0.4 flake8==5.0.4 idna==3.10 immutables==0.19 importlib-metadata==4.2.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work ipykernel==5.5.6 ipython==7.16.3 ipython-genutils==0.2.0 ipywidgets==7.8.5 jedi==0.17.2 Jinja2==3.0.3 json5==0.9.16 jsonschema==3.2.0 jupyter==1.1.1 jupyter-client==7.1.2 jupyter-console==6.4.3 jupyter-core==4.9.2 jupyter-server==1.13.1 jupyterlab==3.2.9 jupyterlab-pygments==0.1.2 jupyterlab-server==2.10.3 jupyterlab_widgets==1.1.11 kiwisolver==1.3.1 MarkupSafe==2.0.1 matplotlib==2.2.3 mccabe==0.7.0 mistune==0.8.4 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work nbclassic==0.3.5 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nest-asyncio==1.6.0 notebook==6.4.10 numpy==1.19.5 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pandas==1.1.5 pandocfilters==1.5.1 parso==0.7.1 pexpect==4.9.0 pickleshare==0.7.5 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work prometheus-client==0.17.1 prompt-toolkit==3.0.36 ptyprocess==0.7.0 py @ file:///opt/conda/conda-bld/py_1644396412707/work pycodestyle==2.9.1 pycparser==2.21 pyflakes==2.5.0 Pygments==2.14.0 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pyrsistent==0.18.0 pytest==6.2.4 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 pyzmq==25.1.2 requests==2.27.1 scipy==1.5.4 Send2Trash==1.8.3 -e git+https://github.com/CORE-GATECH-GROUP/serpent-tools.git@67846baca60f959ac92bdf208e6f1c6744688890#egg=serpentTools six==1.17.0 sniffio==1.2.0 terminado==0.12.1 testpath==0.6.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tornado==6.1 traitlets==4.3.3 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 wcwidth==0.2.13 webencodings==0.5.1 websocket-client==1.3.1 widgetsnbextension==3.6.10 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: serpent-tools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==3.6.2 - argon2-cffi==21.3.0 - argon2-cffi-bindings==21.2.0 - async-generator==1.10 - babel==2.11.0 - backcall==0.2.0 - bleach==4.1.0 - cffi==1.15.1 - charset-normalizer==2.0.12 - comm==0.1.4 - contextvars==2.4 - coverage==4.5.1 - cycler==0.11.0 - dataclasses==0.8 - decorator==5.1.1 - defusedxml==0.7.1 - entrypoints==0.4 - flake8==5.0.4 - idna==3.10 - immutables==0.19 - importlib-metadata==4.2.0 - ipykernel==5.5.6 - ipython==7.16.3 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - jedi==0.17.2 - jinja2==3.0.3 - json5==0.9.16 - jsonschema==3.2.0 - jupyter==1.1.1 - jupyter-client==7.1.2 - jupyter-console==6.4.3 - jupyter-core==4.9.2 - jupyter-server==1.13.1 - jupyterlab==3.2.9 - jupyterlab-pygments==0.1.2 - jupyterlab-server==2.10.3 - jupyterlab-widgets==1.1.11 - kiwisolver==1.3.1 - markupsafe==2.0.1 - matplotlib==2.2.3 - mccabe==0.7.0 - mistune==0.8.4 - nbclassic==0.3.5 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nest-asyncio==1.6.0 - notebook==6.4.10 - numpy==1.19.5 - pandas==1.1.5 - pandocfilters==1.5.1 - parso==0.7.1 - pexpect==4.9.0 - pickleshare==0.7.5 - prometheus-client==0.17.1 - prompt-toolkit==3.0.36 - ptyprocess==0.7.0 - pycodestyle==2.9.1 - pycparser==2.21 - pyflakes==2.5.0 - pygments==2.14.0 - pyrsistent==0.18.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - pyzmq==25.1.2 - requests==2.27.1 - scipy==1.5.4 - send2trash==1.8.3 - six==1.17.0 - sniffio==1.2.0 - terminado==0.12.1 - testpath==0.6.0 - tornado==6.1 - traitlets==4.3.3 - urllib3==1.26.20 - wcwidth==0.2.13 - webencodings==0.5.1 - websocket-client==1.3.1 - widgetsnbextension==3.6.10 prefix: /opt/conda/envs/serpent-tools
[ "serpentTools/tests/test_ResultsReader.py::RestrictedResultsReader::test_fluxAndKeff", "serpentTools/tests/test_ResultsReader.py::RestrictedResultsReader::test_justFlux" ]
[ "serpentTools/tests/test_ResultsReader.py::TestGetUniv::test_allVarsNone", "serpentTools/tests/test_ResultsReader.py::TestGetUniv::test_noUnivState", "serpentTools/tests/test_ResultsReader.py::TestGetUniv::test_nonPostiveIndex", "serpentTools/tests/test_ResultsReader.py::TestGetUniv::test_validUniv", "serpentTools/tests/test_ResultsReader.py::TestFilterResults::test_burnup", "serpentTools/tests/test_ResultsReader.py::TestFilterResults::test_metadata", "serpentTools/tests/test_ResultsReader.py::TestFilterResults::test_resdata", "serpentTools/tests/test_ResultsReader.py::TestFilterResults::test_universes", "serpentTools/tests/test_ResultsReader.py::TestFilterResults::test_varsMatchSettings", "serpentTools/tests/test_ResultsReader.py::TestFilterResultsNoBurnup::test_burnup", "serpentTools/tests/test_ResultsReader.py::TestFilterResultsNoBurnup::test_metadata", "serpentTools/tests/test_ResultsReader.py::TestFilterResultsNoBurnup::test_resdata", "serpentTools/tests/test_ResultsReader.py::TestFilterResultsNoBurnup::test_universes", "serpentTools/tests/test_ResultsReader.py::TestFilterResultsNoBurnup::test_varsMatchSettings", "serpentTools/tests/test_ResultsReader.py::TestResultsNoBurnNoGcu::test_burnup", "serpentTools/tests/test_ResultsReader.py::TestResultsNoBurnNoGcu::test_metadata", "serpentTools/tests/test_ResultsReader.py::TestResultsNoBurnNoGcu::test_resdata", "serpentTools/tests/test_ResultsReader.py::TestResultsNoBurnNoGcu::test_universes", "serpentTools/tests/test_ResultsReader.py::TestResultsNoBurnNoGcu::test_varsMatchSettings", "serpentTools/tests/test_ResultsReader.py::RestrictedResultsReader::test_xsGroups" ]
[ "serpentTools/tests/test_ResultsReader.py::TestBadFiles::test_emptyFile_noGcu", "serpentTools/tests/test_ResultsReader.py::TestBadFiles::test_noResults", "serpentTools/tests/test_ResultsReader.py::TestEmptyAttributes::test_emptyAttributes", "serpentTools/tests/test_ResultsReader.py::TestReadAllResults::test_burnup", "serpentTools/tests/test_ResultsReader.py::TestReadAllResults::test_metadata", "serpentTools/tests/test_ResultsReader.py::TestReadAllResults::test_resdata", "serpentTools/tests/test_ResultsReader.py::TestReadAllResults::test_universes", "serpentTools/tests/test_ResultsReader.py::TestReadAllResults::test_varsMatchSettings" ]
[]
MIT License
3,076
[ "serpentTools/parsers/results.py" ]
[ "serpentTools/parsers/results.py" ]
robotframework__SeleniumLibrary-1201
c69f2cc9d080f49046af2b2b8c888a0c150eaffc
2018-09-15 20:45:59
ad3a914c3798e3916e96378b3623a3ec92f635be
diff --git a/src/SeleniumLibrary/keywords/webdrivertools.py b/src/SeleniumLibrary/keywords/webdrivertools.py index f5969526..3aa14dc8 100644 --- a/src/SeleniumLibrary/keywords/webdrivertools.py +++ b/src/SeleniumLibrary/keywords/webdrivertools.py @@ -77,7 +77,8 @@ class WebDriverCreator(object): def create_chrome(self, desired_capabilities, remote_url, options=None): default = webdriver.DesiredCapabilities.CHROME if is_truthy(remote_url): - return self._remote(default, desired_capabilities, remote_url) + return self._remote(default, desired_capabilities, remote_url, + options=options) capabilities = self._combine_capabilites(default, desired_capabilities) if SELENIUM_VERSION.major >= 3 and SELENIUM_VERSION.minor >= 8: return webdriver.Chrome(desired_capabilities=capabilities, @@ -97,7 +98,8 @@ class WebDriverCreator(object): default = webdriver.DesiredCapabilities.FIREFOX profile = self._get_ff_profile(ff_profile_dir) if is_truthy(remote_url): - return self._remote(default, desired_capabilities, remote_url, profile) + return self._remote(default, desired_capabilities, remote_url, + profile, options) capabilities = self._combine_capabilites(default, desired_capabilities) if SELENIUM_VERSION.major >= 3 and SELENIUM_VERSION.minor >= 8: return webdriver.Firefox(capabilities=capabilities, options=options, @@ -180,10 +182,14 @@ class WebDriverCreator(object): return self._remote(default, desired_capabilities, remote_url) def _remote(self, default_capabilities, user_capabilities, remote_url, - profile_dir=None): + profile_dir=None, options=None): remote_url = str(remote_url) capabilities = self._combine_capabilites(default_capabilities, user_capabilities) + if SELENIUM_VERSION.major >= 3 and SELENIUM_VERSION.minor >= 8: + return webdriver.Remote(command_executor=remote_url, + desired_capabilities=capabilities, + browser_profile=profile_dir, options=options) return webdriver.Remote(command_executor=remote_url, desired_capabilities=capabilities, browser_profile=profile_dir)
Headless Firefox and Chrome not working over selenium grid I was trying to run headless firefox with custom firefox profile over a selenium grid, But the browser does not open is headless mode. It opens in the GUI mode Launch Firefox Grid Set Download Directory ${profile path}= set_firefox_profile ${DOWNLOAD_DIR} log ${profile path} ${dict} Create Dictionary seleniumProtocol=WebDriver maxInstances=5 environment=firefox Open Browser about:blank headlessfirefox None http://192.168.99.100:4444/wd/hub desired_capabilities=${dict} ff_profile_dir=${profile path} Maximize Browser Window The firefox profile was created like this: `def set_firefox_profile(download_dir): fp = FirefoxProfile() fp.set_preference("browser.download.folderList", 2) fp.set_preference("browser.download.manager.showWhenStarting", False) fp.set_preference("browser.download.dir", download_dir) fp.set_preference("browser.manager.closeWhenDone", True) fp.set_preference("browser.download.manager.showAlertOnComplete", False) fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plan") fp.update_preferences() return fp.path` I have 3.11.0 version of selenium and 3.1.1 version of SeleniumLibrary in my environment
robotframework/SeleniumLibrary
diff --git a/utest/test/keywords/test_webdrivercreator.py b/utest/test/keywords/test_webdrivercreator.py index c59ad757..e37b2274 100644 --- a/utest/test/keywords/test_webdrivercreator.py +++ b/utest/test/keywords/test_webdrivercreator.py @@ -69,7 +69,8 @@ class WebDriverCreatorTests(unittest.TestCase): caps = webdriver.DesiredCapabilities.CHROME when(webdriver).Remote(command_executor=url, desired_capabilities=caps, - browser_profile=None).thenReturn(expected_webdriver) + browser_profile=None, + options=None).thenReturn(expected_webdriver) driver = self.creator.create_chrome({}, url) self.assertEqual(driver, expected_webdriver) @@ -90,6 +91,19 @@ class WebDriverCreatorTests(unittest.TestCase): verify(options, times=0).set_headless() self.assertEqual(driver, expected_webdriver) + def test_chrome_healdless_with_grid(self): + caps = webdriver.DesiredCapabilities.CHROME + expected_webdriver = mock() + options = mock() + when(webdriver).ChromeOptions().thenReturn(options) + remote_url = 'localhost:4444' + when(webdriver).Remote(command_executor=remote_url, + desired_capabilities=caps, options=options, + browser_profile=None,).thenReturn(expected_webdriver) + driver = self.creator.create_headless_chrome({}, remote_url) + verify(options).set_headless() + self.assertEqual(driver, expected_webdriver) + def test_firefox(self): expected_webdriver = mock() profile = mock() @@ -116,7 +130,8 @@ class WebDriverCreatorTests(unittest.TestCase): caps = webdriver.DesiredCapabilities.FIREFOX when(webdriver).Remote(command_executor=url, desired_capabilities=caps, - browser_profile=profile).thenReturn(expected_webdriver) + browser_profile=profile, + options=None).thenReturn(expected_webdriver) driver = self.creator.create_firefox({}, url, None) self.assertEqual(driver, expected_webdriver) @@ -157,6 +172,21 @@ class WebDriverCreatorTests(unittest.TestCase): driver = self.creator.create_headless_firefox({}, None, None) self.assertEqual(driver, expected_webdriver) + def test_firefox_healdless_with_grid(self): + caps = webdriver.DesiredCapabilities.FIREFOX + expected_webdriver = mock() + options = mock() + when(webdriver).FirefoxOptions().thenReturn(options) + profile = mock() + when(webdriver).FirefoxProfile().thenReturn(profile) + remote_url = 'localhost:4444' + when(webdriver).Remote(command_executor=remote_url, + desired_capabilities=caps, options=options, + browser_profile=profile,).thenReturn(expected_webdriver) + driver = self.creator.create_headless_firefox({}, remote_url, None) + verify(options).set_headless() + self.assertEqual(driver, expected_webdriver) + def test_ie(self): expected_webdriver = mock() caps = webdriver.DesiredCapabilities.INTERNETEXPLORER @@ -170,7 +200,8 @@ class WebDriverCreatorTests(unittest.TestCase): caps = webdriver.DesiredCapabilities.INTERNETEXPLORER when(webdriver).Remote(command_executor=url, desired_capabilities=caps, - browser_profile=None).thenReturn(expected_webdriver) + browser_profile=None, + options=None).thenReturn(expected_webdriver) driver = self.creator.create_ie({}, url) self.assertEqual(driver, expected_webdriver) @@ -187,7 +218,8 @@ class WebDriverCreatorTests(unittest.TestCase): caps = webdriver.DesiredCapabilities.EDGE when(webdriver).Remote(command_executor=url, desired_capabilities=caps, - browser_profile=None).thenReturn(expected_webdriver) + browser_profile=None, + options=None).thenReturn(expected_webdriver) driver = self.creator.create_edge({}, url) self.assertEqual(driver, expected_webdriver) @@ -204,7 +236,8 @@ class WebDriverCreatorTests(unittest.TestCase): caps = webdriver.DesiredCapabilities.OPERA when(webdriver).Remote(command_executor=url, desired_capabilities=caps, - browser_profile=None).thenReturn(expected_webdriver) + browser_profile=None, + options=None).thenReturn(expected_webdriver) driver = self.creator.create_opera({}, url) self.assertEqual(driver, expected_webdriver) @@ -221,7 +254,8 @@ class WebDriverCreatorTests(unittest.TestCase): caps = webdriver.DesiredCapabilities.SAFARI when(webdriver).Remote(command_executor=url, desired_capabilities=caps, - browser_profile=None).thenReturn(expected_webdriver) + browser_profile=None, + options=None).thenReturn(expected_webdriver) driver = self.creator.create_safari({}, url) self.assertEqual(driver, expected_webdriver) @@ -238,7 +272,8 @@ class WebDriverCreatorTests(unittest.TestCase): caps = webdriver.DesiredCapabilities.PHANTOMJS when(webdriver).Remote(command_executor=url, desired_capabilities=caps, - browser_profile=None).thenReturn(expected_webdriver) + browser_profile=None, + options=None).thenReturn(expected_webdriver) driver = self.creator.create_phantomjs({}, url) self.assertEqual(driver, expected_webdriver) @@ -247,7 +282,8 @@ class WebDriverCreatorTests(unittest.TestCase): caps = webdriver.DesiredCapabilities.HTMLUNIT when(webdriver).Remote(command_executor='None', desired_capabilities=caps, - browser_profile=None).thenReturn(expected_webdriver) + browser_profile=None, + options=None).thenReturn(expected_webdriver) driver = self.creator.create_htmlunit({}, None) self.assertEqual(driver, expected_webdriver) @@ -256,7 +292,8 @@ class WebDriverCreatorTests(unittest.TestCase): caps = webdriver.DesiredCapabilities.HTMLUNITWITHJS when(webdriver).Remote(command_executor='None', desired_capabilities=caps, - browser_profile=None).thenReturn(expected_webdriver) + browser_profile=None, + options=None).thenReturn(expected_webdriver) driver = self.creator.create_htmlunit_with_js({}, None) self.assertEqual(driver, expected_webdriver) @@ -265,7 +302,8 @@ class WebDriverCreatorTests(unittest.TestCase): caps = webdriver.DesiredCapabilities.ANDROID when(webdriver).Remote(command_executor='None', desired_capabilities=caps, - browser_profile=None).thenReturn(expected_webdriver) + browser_profile=None, + options=None).thenReturn(expected_webdriver) driver = self.creator.create_android({}, None) self.assertEqual(driver, expected_webdriver) @@ -274,7 +312,8 @@ class WebDriverCreatorTests(unittest.TestCase): caps = webdriver.DesiredCapabilities.IPHONE when(webdriver).Remote(command_executor='None', desired_capabilities=caps, - browser_profile=None).thenReturn(expected_webdriver) + browser_profile=None, + options=None).thenReturn(expected_webdriver) driver = self.creator.create_iphone({}, None) self.assertEqual(driver, expected_webdriver)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
3.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt", "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
allpairspy==2.5.0 approval-utilities==8.0.0 approvaltests==8.0.0 attrs==22.2.0 beautifulsoup4==4.12.3 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 empty-files==0.0.3 execnet==1.9.0 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 mockito==1.4.0 mrjob==0.7.4 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pyperclip==1.9.0 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 PyYAML==6.0.1 requests==2.27.1 robotframework==6.1.1 -e git+https://github.com/robotframework/SeleniumLibrary.git@c69f2cc9d080f49046af2b2b8c888a0c150eaffc#egg=robotframework_seleniumlibrary robotstatuschecker==4.1.0 selenium==3.141.0 six==1.17.0 soupsieve==2.3.2.post1 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: SeleniumLibrary channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - allpairspy==2.5.0 - approval-utilities==8.0.0 - approvaltests==8.0.0 - attrs==22.2.0 - beautifulsoup4==4.12.3 - charset-normalizer==2.0.12 - coverage==6.2 - empty-files==0.0.3 - execnet==1.9.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - mockito==1.4.0 - mrjob==0.7.4 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pyperclip==1.9.0 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - pyyaml==6.0.1 - requests==2.27.1 - robotframework==6.1.1 - robotstatuschecker==4.1.0 - selenium==3.141.0 - six==1.17.0 - soupsieve==2.3.2.post1 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/SeleniumLibrary
[ "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_android", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_chrome_healdless_with_grid", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_chrome_remote", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_edge_remote", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_firefox_healdless_with_grid", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_firefox_remote", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_htmlunit", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_htmlunit_with_js", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_ie_remote", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_iphone", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_opera_remote", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_phantomjs_remote", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_safari_remote" ]
[]
[ "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_chrome", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_chrome_healdless", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_combine_capabilites", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_create_driver_chrome", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_create_driver_firefox", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_create_driver_ie", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_edge", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_firefox", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_firefox_headless", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_firefox_profile", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_get_creator_method", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_ie", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_opera", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_parse_capabilities", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_phantomjs", "utest/test/keywords/test_webdrivercreator.py::WebDriverCreatorTests::test_safari" ]
[]
Apache License 2.0
3,077
[ "src/SeleniumLibrary/keywords/webdrivertools.py" ]
[ "src/SeleniumLibrary/keywords/webdrivertools.py" ]
kutaslab__fitgrid-23
1cba86280c45b7a5f1422621167aeee1952a2254
2018-09-15 23:56:13
fa73573d86934c0d22d09eb0b0af1d0849218ff6
diff --git a/fitgrid/epochs.py b/fitgrid/epochs.py index 1b09227..b14ad8a 100644 --- a/fitgrid/epochs.py +++ b/fitgrid/epochs.py @@ -41,12 +41,13 @@ class Epochs: levels_to_remove = set(epochs_table.index.names) levels_to_remove.discard(EPOCH_ID) - # so we remove all levels from index except EPOCH_ID - epochs_table.reset_index(list(levels_to_remove), inplace=True) - assert epochs_table.index.names == [EPOCH_ID] + # copy since we are about to modify + self.table = epochs_table.copy() + # remove all levels from index except EPOCH_ID + self.table.reset_index(list(levels_to_remove), inplace=True) + assert self.table.index.names == [EPOCH_ID] - self.table = epochs_table - snapshots = epochs_table.groupby(TIME) + snapshots = self.table.groupby(TIME) # check that snapshots across epochs have equal index by transitivity prev_group = None @@ -66,10 +67,13 @@ class Epochs: if not prev_group.index.is_unique: raise FitGridError( f'Duplicate values in {EPOCH_ID} index not allowed:', - tools.get_index_duplicates_table(epochs_table, EPOCH_ID), + tools.get_index_duplicates_table(self.table, EPOCH_ID), ) - # we're good, set instance variable + self.table.reset_index(inplace=True) + self.table.set_index([EPOCH_ID, TIME], inplace=True) + assert self.table.index.names == [EPOCH_ID, TIME] + self.snapshots = snapshots def lm(self, LHS='default', RHS=None):
Set epochs table index to EPOCH_ID and TIME during Epochs creation In `__init__` we reset the index to only keep `EPOCH_ID`. After `snapshots` are created, set index to `EPOCH_ID`, `TIME`. This is needed for plotting of individual epochs.
kutaslab/fitgrid
diff --git a/tests/test_epochs.py b/tests/test_epochs.py index 736db4b..a92bf85 100644 --- a/tests/test_epochs.py +++ b/tests/test_epochs.py @@ -2,7 +2,8 @@ import pytest import numpy as np from .context import fitgrid -from fitgrid import fake_data, epochs, errors +from fitgrid import fake_data, errors +from fitgrid.epochs import Epochs def test_epochs_unequal_snapshots(): @@ -13,7 +14,7 @@ def test_epochs_unequal_snapshots(): epochs_table.drop(epochs_table.index[42], inplace=True) with pytest.raises(errors.FitGridError) as error: - epochs.Epochs(epochs_table) + Epochs(epochs_table) assert 'differs from previous snapshot' in str(error.value) @@ -34,6 +35,23 @@ def test__raises_error_on_epoch_index_mismatch(): # now time index is equal to row number in the table overall with pytest.raises(errors.FitGridError) as error: - epochs.Epochs(epochs_table) + Epochs(epochs_table) assert 'differs from previous snapshot' in str(error.value) + + +def test_multiple_indices_end_up_EPOCH_ID_and_TIME(): + + from fitgrid import EPOCH_ID, TIME + + epochs_table = fake_data._generate( + n_epochs=10, n_samples=100, n_categories=2, n_channels=32 + ) + epochs_table.reset_index(inplace=True) + epochs_table.set_index([EPOCH_ID, TIME, 'categorical'], inplace=True) + + epochs = Epochs(epochs_table) + # internal table has EPOCH_ID and TIME in index + assert epochs.table.index.names == [EPOCH_ID, TIME] + # input table is not altered + assert epochs_table.index.names == [EPOCH_ID, TIME, 'categorical']
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
blosc2==2.5.1 contourpy==1.3.0 cycler==0.12.1 exceptiongroup==1.2.2 -e git+https://github.com/kutaslab/fitgrid.git@1cba86280c45b7a5f1422621167aeee1952a2254#egg=fitgrid fonttools==4.56.0 importlib_resources==6.5.2 iniconfig==2.1.0 kiwisolver==1.4.7 matplotlib==3.9.4 msgpack==1.1.0 ndindex==1.9.2 numexpr==2.10.2 numpy==2.0.2 packaging==24.2 pandas==2.2.3 patsy==1.0.1 pillow==11.1.0 pluggy==1.5.0 py-cpuinfo==9.0.0 pyparsing==3.2.3 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 scipy==1.13.1 six==1.17.0 statsmodels==0.14.4 tables==3.9.2 tomli==2.2.1 tqdm==4.67.1 tzdata==2025.2 zipp==3.21.0
name: fitgrid channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - blosc2==2.5.1 - contourpy==1.3.0 - cycler==0.12.1 - exceptiongroup==1.2.2 - fonttools==4.56.0 - importlib-resources==6.5.2 - iniconfig==2.1.0 - kiwisolver==1.4.7 - matplotlib==3.9.4 - msgpack==1.1.0 - ndindex==1.9.2 - numexpr==2.10.2 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - patsy==1.0.1 - pillow==11.1.0 - pluggy==1.5.0 - py-cpuinfo==9.0.0 - pyparsing==3.2.3 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - scipy==1.13.1 - six==1.17.0 - statsmodels==0.14.4 - tables==3.9.2 - tomli==2.2.1 - tqdm==4.67.1 - tzdata==2025.2 - zipp==3.21.0 prefix: /opt/conda/envs/fitgrid
[ "tests/test_epochs.py::test_multiple_indices_end_up_EPOCH_ID_and_TIME" ]
[ "tests/test_epochs.py::test__raises_error_on_epoch_index_mismatch" ]
[ "tests/test_epochs.py::test_epochs_unequal_snapshots" ]
[]
BSD 3-Clause "New" or "Revised" License
3,078
[ "fitgrid/epochs.py" ]
[ "fitgrid/epochs.py" ]
pydicom__pydicom-742
38d9eaa4416971b306f86a9e80381503efb79f7b
2018-09-16 09:10:09
0721bdc0b5797f40984cc55b5408e273328dc528
pep8speaks: Hello @scaramallion! Thanks for submitting the PR. - In the file [`pydicom/tests/test_gdcm_pixel_data.py`](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_gdcm_pixel_data.py), following are the PEP8 issues : > [Line 275:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_gdcm_pixel_data.py#L275): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 287:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_gdcm_pixel_data.py#L287): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 291:1](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_gdcm_pixel_data.py#L291): [E303](https://duckduckgo.com/?q=pep8%20E303) too many blank lines (3) > [Line 337:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_gdcm_pixel_data.py#L337): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 349:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_gdcm_pixel_data.py#L349): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 364:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_gdcm_pixel_data.py#L364): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 404:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_gdcm_pixel_data.py#L404): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 421:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_gdcm_pixel_data.py#L421): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 594:30](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_gdcm_pixel_data.py#L594): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 645:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_gdcm_pixel_data.py#L645): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' - In the file [`pydicom/tests/test_jpeg_ls_pixel_data.py`](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_jpeg_ls_pixel_data.py), following are the PEP8 issues : > [Line 202:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_jpeg_ls_pixel_data.py#L202): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 214:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_jpeg_ls_pixel_data.py#L214): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' - In the file [`pydicom/tests/test_numpy_pixel_data.py`](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py), following are the PEP8 issues : > [Line 513:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L513): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:' > [Line 529:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L529): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:' > [Line 566:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L566): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:' > [Line 588:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L588): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:' > [Line 640:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L640): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 659:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L659): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 721:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L721): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:' > [Line 736:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L736): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:' > [Line 760:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L760): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:' > [Line 782:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L782): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:' > [Line 807:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L807): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:' > [Line 822:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L822): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:' > [Line 846:39](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L846): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:' > [Line 868:40](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_numpy_pixel_data.py#L868): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:' - In the file [`pydicom/tests/test_pillow_pixel_data.py`](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_pillow_pixel_data.py), following are the PEP8 issues : > [Line 311:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_pillow_pixel_data.py#L311): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 320:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_pillow_pixel_data.py#L320): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 369:34](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_pillow_pixel_data.py#L369): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 567:30](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_pillow_pixel_data.py#L567): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' - In the file [`pydicom/tests/test_rle_pixel_data.py`](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py), following are the PEP8 issues : > [Line 401:36](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py#L401): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 420:36](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py#L420): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 442:36](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py#L442): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 467:36](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py#L467): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 499:36](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py#L499): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 519:36](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py#L519): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 550:36](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py#L550): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 576:36](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py#L576): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 607:36](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py#L607): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 626:36](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py#L626): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 657:36](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py#L657): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' > [Line 683:36](https://github.com/scaramallion/pydicom/blob/effde76c580ecb3cf2b449f7a4593c4d8a198149/pydicom/tests/test_rle_pixel_data.py#L683): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to True should be 'if cond is True:' or 'if cond:' mrbean-bremen: If you are at it, you could also remove that unneeded `copy` at the end of the pillow handler (I introduced that unnessarily). The array can just be made writable before masking out the high bit.
diff --git a/doc/whatsnew/v1.2.0.rst b/doc/whatsnew/v1.2.0.rst index c130343b9..436b53c46 100644 --- a/doc/whatsnew/v1.2.0.rst +++ b/doc/whatsnew/v1.2.0.rst @@ -25,6 +25,7 @@ Changes handler's ``is_available`` method. * ``DeferredDataElement`` class deprecated and will be removed in v1.3 (:issue:`291`) +* The use of numpypy with PyPy is no longer supported, use numpy instead. Enhancements @@ -54,9 +55,12 @@ Fixes (:pull_request:`660`) * Improve performance for Python 3 when dealing with compressed multi-frame Pixel Data with pillow and jpeg-ls (:issue:`682`). -* Improve performance of bit unpacking for non-PyPy2 interpreters - (:pull_request:`715`) +* Improve performance of bit unpacking (:pull_request:`715`) * First character set no longer removed (:issue:`707`) * Fixed RLE decoded data having the wrong byte order (:pull_request:`729`) * Fixed RLE decoded data having the wrong planar configuration (:pull_request:`729`) +* Fixed numpy arrays returned by the pixel data handlers sometimes being + read-only. Read-only arrays are still available for uncompressed transfer + syntaxes via a keyword argument for the numpy pixel data handler and should + help reduce memory consumption if required. (:issue:`717`) diff --git a/pydicom/multival.py b/pydicom/multival.py index d992fa6e2..178a1565b 100644 --- a/pydicom/multival.py +++ b/pydicom/multival.py @@ -3,7 +3,10 @@ or any list of items that must all be the same type. """ -from collections import MutableSequence +try: + from collections.abc import MutableSequence +except ImportError: + from collections import MutableSequence class MultiValue(MutableSequence): diff --git a/pydicom/pixel_data_handlers/gdcm_handler.py b/pydicom/pixel_data_handlers/gdcm_handler.py index fc747716c..153c64a94 100644 --- a/pydicom/pixel_data_handlers/gdcm_handler.py +++ b/pydicom/pixel_data_handlers/gdcm_handler.py @@ -46,7 +46,6 @@ def is_available(): return HAVE_NP and HAVE_GDCM - def needs_to_convert_to_RGB(dicom_dataset): should_convert = (dicom_dataset.file_meta.TransferSyntaxUID in should_convert_these_syntaxes_to_RGB) @@ -174,6 +173,7 @@ def get_pixeldata(dicom_dataset): n_bytes *= dicom_dataset.SamplesPerPixel except Exception: pass + if len(pixel_bytearray) > n_bytes: # We make sure that all the bytes after are in fact zeros padding = pixel_bytearray[n_bytes:] @@ -183,7 +183,9 @@ def get_pixeldata(dicom_dataset): # We revert to the old behavior which should then result # in a Numpy error later on. pass + pixel_array = numpy.frombuffer(pixel_bytearray, dtype=numpy_dtype) + length_of_pixel_array = pixel_array.nbytes expected_length = dicom_dataset.Rows * dicom_dataset.Columns @@ -192,10 +194,13 @@ def get_pixeldata(dicom_dataset): if dicom_dataset.BitsAllocated > 8: expected_length *= (dicom_dataset.BitsAllocated // 8) + if length_of_pixel_array != expected_length: raise AttributeError("Amount of pixel data %d does " "not match the expected data %d" % (length_of_pixel_array, expected_length)) + if should_change_PhotometricInterpretation_to_RGB(dicom_dataset): dicom_dataset.PhotometricInterpretation = "RGB" - return pixel_array + + return pixel_array.copy() diff --git a/pydicom/pixel_data_handlers/jpeg_ls_handler.py b/pydicom/pixel_data_handlers/jpeg_ls_handler.py index 29c919dcc..e7285eddf 100644 --- a/pydicom/pixel_data_handlers/jpeg_ls_handler.py +++ b/pydicom/pixel_data_handlers/jpeg_ls_handler.py @@ -137,7 +137,7 @@ def get_pixeldata(dicom_dataset): dicom_dataset.PixelData) decompressed_image = jpeg_ls.decode( numpy.frombuffer(CompressedPixelData, dtype=numpy.uint8)) - UncompressedPixelData = decompressed_image.tobytes() + UncompressedPixelData.extend(decompressed_image.tobytes()) pixel_array = numpy.frombuffer(UncompressedPixelData, numpy_format) if should_change_PhotometricInterpretation_to_RGB(dicom_dataset): diff --git a/pydicom/pixel_data_handlers/numpy_handler.py b/pydicom/pixel_data_handlers/numpy_handler.py index d60b5a9ce..8822f1df2 100644 --- a/pydicom/pixel_data_handlers/numpy_handler.py +++ b/pydicom/pixel_data_handlers/numpy_handler.py @@ -38,6 +38,7 @@ elements have values given in the table below. from platform import python_implementation from sys import byteorder +import warnings try: import numpy as np @@ -243,7 +244,7 @@ def unpack_bits(bytestream): return arr -def get_pixeldata(ds): +def get_pixeldata(ds, read_only=False): """Return an ndarray of the Pixel Data. Parameters @@ -251,6 +252,12 @@ def get_pixeldata(ds): ds : dataset.Dataset The DICOM dataset containing an Image Pixel module and the Pixel Data to be converted. + read_only : bool, optional + If False (default) then returns a writeable array that no longer uses + the original memory. If True and the value of (0028,0100) *Bits + Allocated* > 1 then returns a read-only array that uses the original + memory buffer of the pixel data. If *Bits Allocated* = 1 then always + returns a writeable array. Returns ------- @@ -313,4 +320,7 @@ def get_pixeldata(ds): if should_change_PhotometricInterpretation_to_RGB(ds): ds.PhotometricInterpretation = "RGB" + if not read_only and ds.BitsAllocated > 1: + return arr.copy() + return arr diff --git a/pydicom/pixel_data_handlers/pillow_handler.py b/pydicom/pixel_data_handlers/pillow_handler.py index ba78c1b71..b5e03963b 100644 --- a/pydicom/pixel_data_handlers/pillow_handler.py +++ b/pydicom/pixel_data_handlers/pillow_handler.py @@ -188,28 +188,32 @@ def get_pixeldata(dicom_dataset): UncompressedPixelData.extend(decompressed_image.tobytes()) else: # single compressed frame - UncompressedPixelData = pydicom.encaps.defragment_data( + pixel_data = pydicom.encaps.defragment_data( dicom_dataset.PixelData) - UncompressedPixelData = generic_jpeg_file_header + \ - UncompressedPixelData[frame_start_from:] + pixel_data = generic_jpeg_file_header + \ + pixel_data[frame_start_from:] try: - fio = io.BytesIO(UncompressedPixelData) + fio = io.BytesIO(pixel_data) decompressed_image = Image.open(fio) except IOError as e: raise NotImplementedError(e.strerror) - UncompressedPixelData = decompressed_image.tobytes() + UncompressedPixelData.extend(decompressed_image.tobytes()) except Exception: raise + logger.debug( - "Successfully read %s pixel bytes", - len(UncompressedPixelData)) - pixel_array = numpy.copy( - numpy.frombuffer(UncompressedPixelData, numpy_format)) + "Successfully read %s pixel bytes", len(UncompressedPixelData) + ) + + pixel_array = numpy.frombuffer(UncompressedPixelData, numpy_format) + if (transfer_syntax in PillowJPEG2000TransferSyntaxes and dicom_dataset.BitsStored == 16): # WHY IS THIS EVEN NECESSARY?? pixel_array &= 0x7FFF + if should_change_PhotometricInterpretation_to_RGB(dicom_dataset): dicom_dataset.PhotometricInterpretation = "RGB" + return pixel_array
"underlying array is read-only" while modifying pixel value in pydicom version 1.1.0 <!-- Instructions For Filing a Bug: https://github.com/pydicom/pydicom/blob/master/CONTRIBUTING.md#filing-bugs --> I tried to run a demo about reading dicom file, modify the pixel value and write new pixel data to another dicom file, however it failed in pydicom version 1.1.0 while works well for version 0.9.9-1. In order to modify the pixel data in version 1.1.0 or later, what modification should I do? Looking forward to your reply, thanks. p.s. in order to run version 0.9.9-1, I just changed the module name from pydicom to dicom as recommended. #### Description <!-- Example: Attribute Error thrown when printing (0x0010, 0x0020) patient Id> 0--> #### Steps/Code to Reproduce <!-- Example: ```py from io import BytesIO from pydicom import dcmread bytestream = b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \ b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \ b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \ b'\x2e\x31\x2e\x32\x00\x20\x20\x10\x00\x02\x00\x00\x00\x01\x00\x20\x20' \ b'\x20\x00\x06\x00\x00\x00\x4e\x4f\x52\x4d\x41\x4c' fp = BytesIO(bytestream) ds = dcmread(fp, force=True) print(ds.PatientID) ``` If the code is too long, feel free to put it in a public gist and link it in the issue: https://gist.github.com When possible use pydicom testing examples to reproduce the errors. Otherwise, provide an anonymous version of the data in order to replicate the errors. --> ``` import pydicom import pylab ds = pydicom.dcmread("IMG.DCM") ##查看有哪些属性 print(ds.dir("pat")) ##查看对应属性的具体值 print(ds.PatientName) ##将属性值给某个元素 data_element = ds.data_element("PatientName") # or data_element = ds[0x10,0x10] print(data_element.VR, data_element.value) ##删除属性 #del ds.SoftwareVersions ##原始二进制文件 pixel_bytes = ds.PixelData ##CT值组成了一个矩阵 pix = ds.pixel_array ##读取显示图片 pylab.imshow(ds.pixel_array, cmap=pylab.cm.bone) #pylab.show() ##修改图片中的元素,不能直接使用data_array,需要转换成PixelData print('ds.pixel_array.shape =', ds.pixel_array.shape) for n,val in enumerate(ds.pixel_array.flat): #example: zero anything < 300 if val < 300: ds.pixel_array.flat[n] = 0 ds.PixelData = ds.pixel_array.tostring() ds.save_as("IMG_NEW.DCM") ``` #### Expected Results <!-- Please paste or describe the expected results. Example: No error is thrown and the name of the patient is printed.--> IMG_NEW.DCM is written successfully #### Actual Results <!-- Please paste or specifically describe the actual output or traceback. (Use %xmode to deactivate ipython's trace beautifier) Example: ```AttributeError: 'FileDataset' object has no attribute 'PatientID'``` --> Traceback (most recent call last): File "readDicom.py", line 34, in <module> ds.pixel_array.flat[n] = 0 ValueError: underlying array is read-only #### Versions <!-- Please run the following snippet and paste the output below. import platform; print(platform.platform()) import sys; print("Python", sys.version) import pydicom; print("pydicom", pydicom.__version__) --> python 3.6.3 pydicom 1.1.0 <!-- Thanks for contributing! -->
pydicom/pydicom
diff --git a/pydicom/tests/test_gdcm_pixel_data.py b/pydicom/tests/test_gdcm_pixel_data.py index 5c87f7e52..4d4c41fda 100644 --- a/pydicom/tests/test_gdcm_pixel_data.py +++ b/pydicom/tests/test_gdcm_pixel_data.py @@ -271,6 +271,8 @@ class GDCM_JPEG_LS_Tests_with_gdcm(unittest.TestCase): "using GDCM Decoded pixel data is not " "all {0} (mean == {1})".format(b.mean(), a.mean())) + assert a.flags.writeable + def test_emri_JPEG_LS_PixelArray_with_gdcm(self): a = self.emri_jpeg_ls_lossless.pixel_array b = self.emri_small.pixel_array @@ -280,6 +282,8 @@ class GDCM_JPEG_LS_Tests_with_gdcm(unittest.TestCase): "Decoded pixel data is not all {0} " "(mean == {1})".format(b.mean(), a.mean())) + assert a.flags.writeable + @pytest.mark.skipif(not HAVE_GDCM, reason=gdcm_missing_message) class GDCM_JPEG2000Tests_with_gdcm(unittest.TestCase): @@ -326,6 +330,8 @@ class GDCM_JPEG2000Tests_with_gdcm(unittest.TestCase): "Decoded pixel data is not all {0} " "(mean == {1})".format(b.mean(), a.mean())) + assert a.flags.writeable + def test_emri_JPEG2000PixelArray(self): a = self.emri_jpeg_2k_lossless.pixel_array b = self.emri_small.pixel_array @@ -335,6 +341,8 @@ class GDCM_JPEG2000Tests_with_gdcm(unittest.TestCase): "Decoded pixel data is not all {0} " "(mean == {1})".format(b.mean(), a.mean())) + assert a.flags.writeable + def test_jpeg2000_lossy(self): a = self.sc_rgb_jpeg2k_gdcm_KY.pixel_array b = self.ground_truth_sc_rgb_jpeg2k_gdcm_KY_gdcm.pixel_array @@ -347,6 +355,8 @@ class GDCM_JPEG2000Tests_with_gdcm(unittest.TestCase): "Decoded pixel data is not all {0} " "(mean == {1})".format(b.mean(), a.mean())) + assert a.flags.writeable + @pytest.mark.skipif(not HAVE_GDCM, reason=gdcm_missing_message) class GDCM_JPEGlossyTests_with_gdcm(unittest.TestCase): @@ -384,11 +394,16 @@ class GDCM_JPEGlossyTests_with_gdcm(unittest.TestCase): self.assertEqual(a[420, 140], 244) self.assertEqual(a[230, 120], 95) + assert a.flags.writeable + def test_JPEGBaselineColor3DPixelArray(self): self.assertEqual( self.color_3d_jpeg.PhotometricInterpretation, "YBR_FULL_422") a = self.color_3d_jpeg.pixel_array + + assert a.flags.writeable + self.assertEqual(a.shape, (120, 480, 640, 3)) a = _convert_YBR_FULL_to_RGB(a) # this test points were manually identified in Osirix viewer @@ -567,6 +582,9 @@ def test_PI_RGB(test_with_gdcm, t = dcmread(image) assert t.PhotometricInterpretation == PhotometricInterpretation a = t.pixel_array + + assert a.flags.writeable + assert a.shape == (100, 100, 3) if convert_yuv_to_rgb: a = _convert_YBR_FULL_to_RGB(a) @@ -614,3 +632,5 @@ class GDCM_JPEGlosslessTests_with_gdcm(unittest.TestCase): # this test points were manually identified in Osirix viewer self.assertEqual(a[420, 140], 227) self.assertEqual(a[230, 120], 105) + + assert a.flags.writeable diff --git a/pydicom/tests/test_jpeg_ls_pixel_data.py b/pydicom/tests/test_jpeg_ls_pixel_data.py index 512acf1a5..e7f0a4eda 100644 --- a/pydicom/tests/test_jpeg_ls_pixel_data.py +++ b/pydicom/tests/test_jpeg_ls_pixel_data.py @@ -198,6 +198,8 @@ class jpeg_ls_JPEG_LS_Tests_with_jpeg_ls(unittest.TestCase): "Decoded pixel data is not all {0} " "(mean == {1})".format(b.mean(), a.mean())) + assert a.flags.writeable + def test_emri_JPEG_LS_PixelArray(self): a = self.emri_jpeg_ls_lossless.pixel_array b = self.emri_small.pixel_array @@ -207,6 +209,8 @@ class jpeg_ls_JPEG_LS_Tests_with_jpeg_ls(unittest.TestCase): "Decoded pixel data is not all {0} " "(mean == {1})".format(b.mean(), a.mean())) + assert a.flags.writeable + @pytest.mark.skipif( not test_jpeg_ls_decoder, diff --git a/pydicom/tests/test_numpy_pixel_data.py b/pydicom/tests/test_numpy_pixel_data.py index 09bd11813..054f7133c 100644 --- a/pydicom/tests/test_numpy_pixel_data.py +++ b/pydicom/tests/test_numpy_pixel_data.py @@ -509,6 +509,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + assert (600, 800) == arr.shape assert 244 == arr[0].min() == arr[0].max() assert (1, 246, 1) == tuple(arr[300, 491:494]) @@ -522,6 +524,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + assert (2, 600, 800) == arr.shape # Frame 1 assert 244 == arr[0, 0].min() == arr[0, 0].max() @@ -556,6 +560,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + assert (255, 0, 0) == tuple(arr[5, 50, :]) assert (255, 128, 128) == tuple(arr[15, 50, :]) assert (0, 255, 0) == tuple(arr[25, 50, :]) @@ -575,6 +581,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + # Frame 1 frame = arr[0] assert (255, 0, 0) == tuple(frame[5, 50, :]) @@ -624,6 +632,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + assert arr.max() == 1 assert arr.min() == 0 @@ -640,6 +650,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + assert arr.max() == 1 assert arr.min() == 0 @@ -699,6 +711,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + assert (422, 319, 361) == tuple(arr[0, 31:34]) assert (366, 363, 322) == tuple(arr[31, :3]) assert (1369, 1129, 862) == tuple(arr[-1, -3:]) @@ -711,6 +725,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + # Frame 1 assert (206, 197, 159) == tuple(arr[0, 0, 31:34]) assert (49, 78, 128) == tuple(arr[0, 31, :3]) @@ -732,6 +748,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + assert (65535, 0, 0) == tuple(arr[5, 50, :]) assert (65535, 32896, 32896) == tuple(arr[15, 50, :]) assert (0, 65535, 0) == tuple(arr[25, 50, :]) @@ -751,6 +769,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + # Frame 1 assert (65535, 0, 0) == tuple(arr[0, 5, 50, :]) assert (65535, 32896, 32896) == tuple(arr[0, 15, 50, :]) @@ -773,6 +793,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + assert (1249000, 1249000, 1250000) == tuple(arr[0, :3]) assert (1031000, 1029000, 1027000) == tuple(arr[4, 3:6]) assert (803000, 801000, 798000) == tuple(arr[-1, -3:]) @@ -785,6 +807,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + # Frame 1 assert (1249000, 1249000, 1250000) == tuple(arr[0, 0, :3]) assert (1031000, 1029000, 1027000) == tuple(arr[0, 4, 3:6]) @@ -806,6 +830,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid ar = ds.pixel_array + assert ar.flags.writeable + assert (4294967295, 0, 0) == tuple(ar[5, 50, :]) assert (4294967295, 2155905152, 2155905152) == tuple(ar[15, 50, :]) assert (0, 4294967295, 0) == tuple(ar[25, 50, :]) @@ -825,6 +851,8 @@ class TestNumpy_NumpyHandler(object): ds.file_meta.TransferSyntaxUID = uid arr = ds.pixel_array + assert arr.flags.writeable + # Frame 1 assert (4294967295, 0, 0) == tuple(arr[0, 5, 50, :]) assert (4294967295, 2155905152, 2155905152) == tuple( @@ -877,6 +905,15 @@ class TestNumpy_NumpyHandler(object): assert ds.pixel_array.max() == 1 + def test_read_only(self): + """Test for #717, returned array read-only.""" + ds = dcmread(EXPL_8_1_1F) + arr = ds.pixel_array + assert 0 != arr[0, 0] + arr[0, 0] = 0 + assert 0 == arr[0, 0] + assert arr.flags.writeable + # Tests for numpy_handler module with Numpy available @pytest.mark.skipif(not HAVE_NP, reason='Numpy is not available') @@ -906,6 +943,24 @@ class TestNumpy_GetPixelData(object): match=' the transfer syntax is not supported'): get_pixeldata(ds) + def test_bad_length_raises(self): + """Test bad pixel data length raises exception.""" + ds = dcmread(EXPL_8_1_1F) + # Too short + ds.PixelData = ds.PixelData[:-1] + msg = ( + r"The length of the pixel data in the dataset doesn't match the " + r"expected amount \(479999 vs. 480000 bytes\). The dataset may be " + r"corrupted or there may be an issue with the pixel data handler." + ) + with pytest.raises(ValueError, match=msg): + get_pixeldata(ds) + + # Too long + ds.PixelData += b'\x00\x00' + with pytest.raises(ValueError, match=r"480001 vs. 480000 bytes"): + get_pixeldata(ds) + def test_change_photometric_interpretation(self): """Test get_pixeldata changes PhotometricInterpretation if required.""" def to_rgb(ds): @@ -928,6 +983,29 @@ class TestNumpy_GetPixelData(object): NP_HANDLER.should_change_PhotometricInterpretation_to_RGB = orig_fn + def test_array_read_only(self): + """Test returning a read only array for BitsAllocated > 8.""" + ds = dcmread(EXPL_8_1_1F) + arr = get_pixeldata(ds, read_only=False) + assert arr.flags.writeable + assert 0 != arr[10] + arr[10] = 0 + assert 0 == arr[10] + + arr = get_pixeldata(ds, read_only=True) + assert not arr.flags.writeable + with pytest.raises(ValueError, match="is read-only"): + arr[10] = 0 + + def test_array_read_only_bit_packed(self): + """Test returning a read only array for BitsAllocated = 1.""" + ds = dcmread(EXPL_1_1_1F) + arr = get_pixeldata(ds, read_only=False) + assert arr.flags.writeable + + arr = get_pixeldata(ds, read_only=True) + assert arr.flags.writeable + REFERENCE_PACK_UNPACK = [ (b'', []), diff --git a/pydicom/tests/test_pillow_pixel_data.py b/pydicom/tests/test_pillow_pixel_data.py index 25e39cff0..55514dfe0 100644 --- a/pydicom/tests/test_pillow_pixel_data.py +++ b/pydicom/tests/test_pillow_pixel_data.py @@ -307,12 +307,16 @@ class Test_JPEG2000Tests_with_pillow(object): b = self.mr_small.pixel_array assert np.array_equal(a, b) + assert a.flags.writeable + def test_emri_JPEG2000PixelArray(self): """Test decoding JPEG2K with pillow handler succeeds.""" a = self.emri_jpeg_2k_lossless.pixel_array b = self.emri_small.pixel_array assert np.array_equal(a, b) + assert a.flags.writeable + def test_jpeg2000_lossy(self): """Test decoding JPEG2K lossy with pillow handler fails.""" with pytest.raises(NotImplementedError): @@ -358,6 +362,9 @@ class Test_JPEGlossyTests_with_pillow(object): assert "YBR_FULL_422" == self.color_3d_jpeg.PhotometricInterpretation a = self.color_3d_jpeg.pixel_array + + assert a.flags.writeable + assert (120, 480, 640, 3) == a.shape # this test points were manually identified in Osirix viewer assert (41, 41, 41) == tuple(a[3, 159, 290, :]) @@ -552,6 +559,8 @@ def test_PI_RGB(test_with_pillow, assert t.PhotometricInterpretation == PhotometricInterpretation a = t.pixel_array assert a.shape == (100, 100, 3) + + assert a.flags.writeable """ This complete test never gave a different result than just the 10 point test below diff --git a/pydicom/tests/test_rle_pixel_data.py b/pydicom/tests/test_rle_pixel_data.py index b94f0f430..266903930 100644 --- a/pydicom/tests/test_rle_pixel_data.py +++ b/pydicom/tests/test_rle_pixel_data.py @@ -397,6 +397,8 @@ class TestNumpy_RLEHandler(object): ref = _get_pixel_array(OB_EXPL_LITTLE_1F) arr = ds.pixel_array + assert arr.flags.writeable + assert np.array_equal(arr, ref) assert (600, 800) == arr.shape assert 244 == arr[0].min() == arr[0].max() @@ -413,6 +415,8 @@ class TestNumpy_RLEHandler(object): ref = _get_pixel_array(OB_EXPL_LITTLE_2F) arr = ds.pixel_array + assert arr.flags.writeable + assert np.array_equal(arr, ref) assert (2, 600, 800) == arr.shape assert 244 == arr[0, 0].min() == arr[0, 0].max() @@ -432,6 +436,8 @@ class TestNumpy_RLEHandler(object): ref = _get_pixel_array(SC_EXPL_LITTLE_1F) arr = ds.pixel_array + assert arr.flags.writeable + assert np.array_equal(arr, ref) assert (255, 0, 0) == tuple(arr[5, 50, :]) assert (255, 128, 128) == tuple(arr[15, 50, :]) @@ -454,6 +460,8 @@ class TestNumpy_RLEHandler(object): ref = _get_pixel_array(SC_EXPL_LITTLE_2F) arr = ds.pixel_array + assert arr.flags.writeable + assert np.array_equal(arr, ref) # Frame 1 @@ -483,6 +491,8 @@ class TestNumpy_RLEHandler(object): ref = _get_pixel_array(MR_EXPL_LITTLE_1F) arr = ds.pixel_array + assert arr.flags.writeable + assert np.array_equal(arr, ref) assert (64, 64) == arr.shape @@ -500,6 +510,8 @@ class TestNumpy_RLEHandler(object): ref = _get_pixel_array(EMRI_EXPL_LITTLE_10F) arr = ds.pixel_array + assert arr.flags.writeable + assert np.array_equal(arr, ref) assert (10, 64, 64) == arr.shape @@ -528,6 +540,8 @@ class TestNumpy_RLEHandler(object): arr = ds.pixel_array ref = _get_pixel_array(SC_EXPL_LITTLE_16_1F) + assert arr.flags.writeable + assert np.array_equal(ds.pixel_array, ref) assert (65535, 0, 0) == tuple(arr[5, 50, :]) @@ -551,6 +565,8 @@ class TestNumpy_RLEHandler(object): arr = ds.pixel_array ref = _get_pixel_array(SC_EXPL_LITTLE_16_2F) + assert arr.flags.writeable + assert np.array_equal(ds.pixel_array, ref) # Frame 1 @@ -579,6 +595,8 @@ class TestNumpy_RLEHandler(object): ref = _get_pixel_array(RTDOSE_EXPL_LITTLE_1F) arr = ds.pixel_array + assert arr.flags.writeable + assert np.array_equal(arr, ref) assert (10, 10) == arr.shape assert (1249000, 1249000, 1250000) == tuple(arr[0, :3]) @@ -595,6 +613,8 @@ class TestNumpy_RLEHandler(object): ref = _get_pixel_array(RTDOSE_EXPL_LITTLE_15F) arr = ds.pixel_array + assert arr.flags.writeable + assert np.array_equal(arr, ref) assert (15, 10, 10) == arr.shape @@ -623,6 +643,8 @@ class TestNumpy_RLEHandler(object): arr = ds.pixel_array ref = _get_pixel_array(SC_EXPL_LITTLE_32_1F) + assert arr.flags.writeable + assert np.array_equal(ds.pixel_array, ref) assert (4294967295, 0, 0) == tuple(arr[5, 50, :]) @@ -646,6 +668,8 @@ class TestNumpy_RLEHandler(object): arr = ds.pixel_array ref = _get_pixel_array(SC_EXPL_LITTLE_32_2F) + assert arr.flags.writeable + assert np.array_equal(ds.pixel_array, ref) # Frame 1
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 6 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "numpy>=1.16.0", "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 importlib-metadata==4.8.3 iniconfig==1.1.1 numpy==1.19.5 packaging==21.3 pluggy==1.0.0 py==1.11.0 -e git+https://github.com/pydicom/pydicom.git@38d9eaa4416971b306f86a9e80381503efb79f7b#egg=pydicom pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: pydicom channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - numpy==1.19.5 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/pydicom
[ "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_8bit_1sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_8bit_1sample_2frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_8bit_3sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_8bit_3sample_2frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_16bit_1sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_16bit_1sample_10frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_16bit_3sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_16bit_3sample_2frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_32bit_1sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_32bit_1sample_15frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_32bit_3sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_32bit_3sample_2frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_read_only", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetPixelData::test_array_read_only", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetPixelData::test_array_read_only_bit_packed" ]
[]
[ "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEG_LS_Tests_no_gdcm::test_JPEG_LS_PixelArray", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEG_LS_Tests_no_gdcm::test_emri_JPEG_LS_PixelArray", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEG2000Tests_no_gdcm::test_JPEG2000", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEG2000Tests_no_gdcm::test_JPEG2000PixelArray", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEG2000Tests_no_gdcm::test_emri_JPEG2000PixelArray", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEG2000Tests_no_gdcm::test_jpeg2000_lossy", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEGlossyTests_no_gdcm::test_JPEGBaselineColor3DPixelArray", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEGlossyTests_no_gdcm::test_JPEGlossy", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEGlossyTests_no_gdcm::test_JPEGlossyPixelArray", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEGlosslessTests_no_gdcm::testJPEGlossless", "pydicom/tests/test_gdcm_pixel_data.py::GDCM_JPEGlosslessTests_no_gdcm::testJPEGlosslessPixelArray", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEG_LS_Tests_no_jpeg_ls::test_JPEG_LS_PixelArray", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEG2000Tests_no_jpeg_ls::test_JPEG2000PixelArray", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEG2000Tests_no_jpeg_ls::test_emri_JPEG2000PixelArray", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEGlossyTests_no_jpeg_ls::testJPEGBaselineColor3DPixelArray", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEGlossyTests_no_jpeg_ls::testJPEGlossy", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEGlossyTests_no_jpeg_ls::testJPEGlossyPixelArray", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEGlosslessTests_no_jpeg_ls::testJPEGlossless", "pydicom/tests/test_jpeg_ls_pixel_data.py::jpeg_ls_JPEGlosslessTests_no_jpeg_ls::testJPEGlosslessPixelArray", "pydicom/tests/test_numpy_pixel_data.py::test_unsupported_syntaxes", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_environment", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_supported_dataset", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_dcmtk.dcm-data0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG-lossy.dcm-data1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_gdcm.dcm-data2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/MR_small_jpeg_ls_lossless.dcm-data3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/emri_small_jpeg_2k_lossless.dcm-data4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG2000.dcm-data5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/MR_small_RLE.dcm-data6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NoNumpyHandler::test_pixel_array_raises", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_environment", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_unsupported_syntax_raises", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_dataset_pixel_array_handler_needs_convert", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_dcmtk.dcm-data0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG-lossy.dcm-data1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_gdcm.dcm-data2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/MR_small_jpeg_ls_lossless.dcm-data3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/emri_small_jpeg_2k_lossless.dcm-data4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG2000.dcm-data5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/MR_small_RLE.dcm-data6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_pixel_array_8bit_un_signed", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_pixel_array_16bit_un_signed", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_pixel_array_32bit_un_signed", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_8bit_3sample_1frame_odd_size", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/liver_1frame.dcm-data0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/liver.dcm-data1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/OBXXXX1A.dcm-data2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/SC_rgb_small_odd.dcm-data3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/OBXXXX1A_2frame.dcm-data4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/SC_rgb.dcm-data5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/SC_rgb_2frame.dcm-data6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/MR_small.dcm-data7]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/emri_small.dcm-data8]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/SC_rgb_16bit_2frame.dcm-data9]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/rtdose_1frame.dcm-data10]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/rtdose.dcm-data11]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_properties[/pydicom/pydicom/data/test_files/SC_rgb_32bit_2frame.dcm-data12]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_1bit_1sample_1frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_little_1bit_1sample_3frame", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/liver_1frame.dcm-/pydicom/pydicom/data/test_files/liver_expb_1frame.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/liver.dcm-/pydicom/pydicom/data/test_files/liver_expb.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/OBXXXX1A.dcm-/pydicom/pydicom/data/test_files/OBXXXX1A_expb.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/OBXXXX1A_2frame.dcm-/pydicom/pydicom/data/test_files/OBXXXX1A_expb_2frame.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/SC_rgb.dcm-/pydicom/pydicom/data/test_files/SC_rgb_expb.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/SC_rgb_2frame.dcm-/pydicom/pydicom/data/test_files/SC_rgb_expb_2frame.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/MR_small.dcm-/pydicom/pydicom/data/test_files/MR_small_expb.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/emri_small.dcm-/pydicom/pydicom/data/test_files/emri_small_big_endian.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/SC_rgb_16bit_2frame.dcm-/pydicom/pydicom/data/test_files/SC_rgb_expb_16bit_2frame.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/rtdose_1frame.dcm-/pydicom/pydicom/data/test_files/rtdose_expb_1frame.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/rtdose.dcm-/pydicom/pydicom/data/test_files/rtdose_expb.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_big_endian_datasets[/pydicom/pydicom/data/test_files/SC_rgb_32bit_2frame.dcm-/pydicom/pydicom/data/test_files/SC_rgb_expb_32bit_2frame.dcm]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_NumpyHandler::test_endianness_not_set", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetPixelData::test_no_pixel_data_raises", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetPixelData::test_unknown_pixel_representation_raises", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetPixelData::test_unsupported_syntaxes_raises", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetPixelData::test_bad_length_raises", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetPixelData::test_change_photometric_interpretation", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[-output0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x00-output1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x01-output2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x02-output3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x04-output4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x08-output5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x10-output6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[@-output8]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x80-output9]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\xaa-output10]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\xf0-output11]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x0f-output12]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\xff-output13]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x00\\x00-output14]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x00\\x01-output15]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x00\\x80-output16]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x00\\xff-output17]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x01\\x80-output18]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\x80\\x80-output19]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_UnpackBits::test_unpack[\\xff\\x80-output20]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[-input0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x00-input1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x01-input2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x02-input3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x04-input4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x08-input5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x10-input6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[@-input8]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x80-input9]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\xaa-input10]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\xf0-input11]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x0f-input12]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\xff-input13]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x00\\x00-input14]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x00\\x01-input15]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x00\\x80-input16]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x00\\xff-input17]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x01\\x80-input18]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\x80\\x80-input19]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack[\\xff\\x80-input20]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_non_binary_input", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_non_array_input", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00@-input0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00\\x10-input2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00\\x08-input3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00\\x04-input4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00\\x02-input5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x00\\x01-input6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x80-input7]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[@-input8]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x10-input10]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x08-input11]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x04-input12]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x02-input13]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[\\x01-input14]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_pack_partial[-input15]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_PackBits::test_functional", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape0-1-length0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape1-1-length1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape2-1-length2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape3-1-length3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape4-1-length4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape5-1-length5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape6-1-length6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape7-1-length7]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape8-1-length8]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape9-8-length9]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape10-8-length10]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape11-8-length11]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape12-8-length12]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape13-8-length13]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape14-8-length14]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape15-16-length15]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape16-16-length16]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape17-16-length17]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape18-16-length18]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape19-16-length19]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape20-32-length20]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape21-32-length21]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape22-32-length22]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape23-32-length23]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape24-32-length24]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape25-1-length25]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape26-1-length26]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape27-1-length27]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape28-1-length28]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape29-1-length29]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape30-1-length30]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape31-1-length31]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape32-1-length32]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape33-1-length33]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape34-8-length34]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape35-8-length35]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape36-8-length36]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape37-8-length37]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape38-8-length38]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape39-8-length39]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape40-16-length40]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape41-16-length41]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape42-16-length42]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape43-32-length43]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape44-32-length44]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape45-32-length45]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape46-1-length46]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape47-1-length47]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape48-1-length48]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape49-1-length49]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape50-1-length50]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape51-1-length51]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape52-1-length52]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape53-1-length53]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape54-1-length54]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape55-8-length55]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape56-8-length56]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape57-8-length57]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape58-16-length58]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape59-16-length59]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape60-16-length60]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape61-32-length61]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape62-32-length62]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_bytes[shape63-32-length63]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape0-1-length0]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape1-1-length1]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape2-1-length2]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape3-1-length3]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape4-1-length4]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape5-1-length5]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape6-1-length6]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape7-1-length7]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape8-1-length8]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape9-8-length9]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape10-8-length10]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape11-8-length11]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape12-8-length12]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape13-8-length13]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape14-8-length14]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape15-16-length15]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape16-16-length16]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape17-16-length17]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape18-16-length18]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape19-16-length19]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape20-32-length20]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape21-32-length21]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape22-32-length22]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape23-32-length23]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape24-32-length24]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape25-1-length25]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape26-1-length26]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape27-1-length27]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape28-1-length28]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape29-1-length29]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape30-1-length30]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape31-1-length31]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape32-1-length32]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape33-1-length33]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape34-8-length34]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape35-8-length35]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape36-8-length36]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape37-8-length37]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape38-8-length38]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape39-8-length39]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape40-16-length40]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape41-16-length41]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape42-16-length42]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape43-32-length43]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape44-32-length44]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape45-32-length45]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape46-1-length46]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape47-1-length47]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape48-1-length48]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape49-1-length49]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape50-1-length50]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape51-1-length51]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape52-1-length52]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape53-1-length53]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape54-1-length54]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape55-8-length55]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape56-8-length56]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape57-8-length57]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape58-16-length58]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape59-16-length59]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape60-16-length60]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape61-32-length61]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape62-32-length62]", "pydicom/tests/test_numpy_pixel_data.py::TestNumpy_GetExpectedLength::test_length_in_pixels[shape63-32-length63]", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGLS_no_pillow::test_JPEG_LS_PixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGLS_no_pillow::test_emri_JPEG_LS_PixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_no_pillow::testJPEG2000", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_no_pillow::testJPEG2000PixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_no_pillow::test_emri_JPEG2000PixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEG2000Tests_no_pillow::test_jpeg2000_lossy", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlossyTests_no_pillow::testJPEGlossy", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlossyTests_no_pillow::testJPEGlossyPixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlossyTests_no_pillow::testJPEGBaselineColor3DPixelArray", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlosslessTests_no_pillow::testJPEGlossless", "pydicom/tests/test_pillow_pixel_data.py::Test_JPEGlosslessTests_no_pillow::testJPEGlosslessPixelArray", "pydicom/tests/test_rle_pixel_data.py::test_unsupported_syntaxes", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_environment", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_supported_dataset", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/rtdose_1frame.dcm-data0]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb.dcm-data1]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/image_dfl.dcm-data2]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_expb_2frame.dcm-data3]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_dcmtk.dcm-data4]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG-lossy.dcm-data5]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_gdcm.dcm-data6]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/MR_small_jpeg_ls_lossless.dcm-data7]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/emri_small_jpeg_2k_lossless.dcm-data8]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG2000.dcm-data9]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_NoRLEHandler::test_pixel_array_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_environment", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_unsupported_syntax_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/rtdose_1frame.dcm-data0]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb.dcm-data1]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/image_dfl.dcm-data2]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_expb_2frame.dcm-data3]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_dcmtk.dcm-data4]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG-lossy.dcm-data5]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/SC_rgb_jpeg_gdcm.dcm-data6]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/MR_small_jpeg_ls_lossless.dcm-data7]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/emri_small_jpeg_2k_lossless.dcm-data8]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_can_access_unsupported_dataset[/pydicom/pydicom/data/test_files/JPEG2000.dcm-data9]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_signed", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_1bit_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_8bit_1sample_1f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_8bit_1sample_2f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_8bit_3sample_1f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_8bit_3sample_2f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_16bit_1sample_1f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_16bit_1sample_10f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_16bit_3sample_1f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_16bit_3sample_2f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_32bit_1sample_1f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_32bit_1sample_15f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_32bit_3sample_1f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEHandler::test_pixel_array_32bit_3sample_2f", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_GetPixelData::test_no_pixel_data_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_GetPixelData::test_unknown_pixel_representation_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_GetPixelData::test_unsupported_syntaxes_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_GetPixelData::test_change_photometric_interpretation", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_GetPixelData::test_little_endian_segment_order", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_invalid_header_length", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_invalid_nr_segments_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_parse_header[0-offsets0]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_parse_header[1-offsets1]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_parse_header[2-offsets2]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_parse_header[8-offsets3]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_parse_header[14-offsets4]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEParseHeader::test_parse_header[15-offsets5]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_unsupported_bits_allocated_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x00\\x00\\x00\\x00-1-8]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x02\\x00\\x00\\x00-1-8]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x02\\x00\\x00\\x00-3-8]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x04\\x00\\x00\\x00-3-8]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x01\\x00\\x00\\x00-1-16]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x03\\x00\\x00\\x00-1-16]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x05\\x00\\x00\\x00-3-16]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x07\\x00\\x00\\x00-3-16]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x03\\x00\\x00\\x00-1-32]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x05\\x00\\x00\\x00-1-32]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x0b\\x00\\x00\\x00-3-32]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\r\\x00\\x00\\x00-3-32]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\x07\\x00\\x00\\x00-1-64]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_nr_segments_raises[\\t\\x00\\x00\\x00-1-64]", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_invalid_frame_data_raises", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_8bit_1sample", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_8bit_3sample", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_16bit_1sample", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_16bit_3sample", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_32bit_1sample", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeFrame::test_32bit_3sample", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeSegment::test_noop", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeSegment::test_literal", "pydicom/tests/test_rle_pixel_data.py::TestNumpy_RLEDecodeSegment::test_copy" ]
[]
MIT License
3,079
[ "pydicom/pixel_data_handlers/numpy_handler.py", "pydicom/pixel_data_handlers/jpeg_ls_handler.py", "doc/whatsnew/v1.2.0.rst", "pydicom/pixel_data_handlers/pillow_handler.py", "pydicom/pixel_data_handlers/gdcm_handler.py", "pydicom/multival.py" ]
[ "pydicom/pixel_data_handlers/numpy_handler.py", "pydicom/pixel_data_handlers/jpeg_ls_handler.py", "doc/whatsnew/v1.2.0.rst", "pydicom/pixel_data_handlers/pillow_handler.py", "pydicom/pixel_data_handlers/gdcm_handler.py", "pydicom/multival.py" ]
tox-dev__tox-989
3b91d63c9c5a089ba8ae9cbaec20bdf52ecf25cf
2018-09-16 19:37:18
cf6afcecaca22df7b509facaea43c09a15570f75
diff --git a/changelog/824.feature.rst b/changelog/824.feature.rst new file mode 100644 index 00000000..aae99136 --- /dev/null +++ b/changelog/824.feature.rst @@ -0,0 +1,1 @@ +use the os environment variable ``TOX_SKIP_ENV`` to filter out tox environment names from the run list (set by :confval:`envlist`) - by :user:`gaborbernat` diff --git a/changelog/838.feature.rst b/changelog/838.feature.rst new file mode 100644 index 00000000..b90ff843 --- /dev/null +++ b/changelog/838.feature.rst @@ -0,0 +1,1 @@ +always set ``PIP_USER=0`` (do not install into the user site package, but inside the virtual environment created) and ``PIP_NO_DEPS=0`` (installing without dependencies can cause broken package installations) inside tox - by :user:`gaborbernat` diff --git a/doc/config.rst b/doc/config.rst index 89ce7a3a..7c33646a 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -76,6 +76,13 @@ and will first lookup global tox settings in this section: * environment variable ``TOXENV`` * ``tox.ini`` file's ``envlist`` + .. versionadded:: 3.4.0 + + What tox environments are ran during the tox invocation can be further filtered + via the operating system environment variable ``TOX_SKIP_ENV`` regular expression + (e.g. ``py27.*`` means **don't** evaluate environments that start with the key ``py27``). + Skipped environments will be logged at level two verbosity level. + .. confval:: ignore_basepython_conflict=True|False(default) .. versionadded:: 3.1.0 diff --git a/src/tox/session.py b/src/tox/session.py index 7da1efcd..dc25a277 100644 --- a/src/tox/session.py +++ b/src/tox/session.py @@ -381,7 +381,7 @@ class Session: self._spec2pkg = {} self._name2venv = {} try: - self.venvlist = [self.getvenv(x) for x in self.config.envlist] + self.venvlist = [self.getvenv(x) for x in self.evaluated_env_list()] except LookupError: raise SystemExit(1) except tox.exception.ConfigError as e: @@ -389,6 +389,18 @@ class Session: raise SystemExit(1) self._actions = [] + def evaluated_env_list(self): + tox_env_filter = os.environ.get("TOX_SKIP_ENV") + tox_env_filter_re = re.compile(tox_env_filter) if tox_env_filter is not None else None + for name in self.config.envlist: + if tox_env_filter_re is not None and tox_env_filter_re.match(name): + msg = "skip environment {}, matches filter {!r}".format( + name, tox_env_filter_re.pattern + ) + self.report.verbosity1(msg) + continue + yield name + @property def hook(self): return self.config.pluginmanager.hook diff --git a/src/tox/venv.py b/src/tox/venv.py index a10149a9..0209f915 100755 --- a/src/tox/venv.py +++ b/src/tox/venv.py @@ -5,6 +5,7 @@ import pipes import re import sys import warnings +from itertools import chain import py @@ -231,7 +232,7 @@ class VirtualEnv(object): setup_py = setupdir.join("setup.py") setup_cfg = setupdir.join("setup.cfg") args = [self.envconfig.envpython, str(setup_py), "--name"] - env = self._getenv() + env = self._get_os_environ() output = action.popen(args, cwd=setupdir, redirect=False, returnout=True, env=env) name = output.strip() args = [self.envconfig.envpython, "-c", "import sys; print(sys.path)"] @@ -297,30 +298,26 @@ class VirtualEnv(object): return options def run_install_command(self, packages, action, options=()): - argv = self.envconfig.install_command[:] - i = argv.index("{packages}") - argv[i : i + 1] = packages - if "{opts}" in argv: - i = argv.index("{opts}") - argv[i : i + 1] = list(options) + def expand(val): + # expand an install command + if val == "{packages}": + for package in packages: + yield package + elif val == "{opts}": + for opt in options: + yield opt + else: + yield val - for x in ("PIP_RESPECT_VIRTUALENV", "PIP_REQUIRE_VIRTUALENV", "__PYVENV_LAUNCHER__"): - os.environ.pop(x, None) + cmd = list(chain.from_iterable(expand(val) for val in self.envconfig.install_command)) - if "PYTHONPATH" not in self.envconfig.passenv: - # If PYTHONPATH not explicitly asked for, remove it. - if "PYTHONPATH" in os.environ: - self.session.report.warning( - "Discarding $PYTHONPATH from environment, to override " - "specify PYTHONPATH in 'passenv' in your configuration." - ) - os.environ.pop("PYTHONPATH") + self.ensure_pip_os_environ_ok() old_stdout = sys.stdout sys.stdout = codecs.getwriter("utf8")(sys.stdout) try: self._pcall( - argv, + cmd, cwd=self.envconfig.config.toxinidir, action=action, redirect=self.session.report.verbosity < 2, @@ -328,6 +325,24 @@ class VirtualEnv(object): finally: sys.stdout = old_stdout + def ensure_pip_os_environ_ok(self): + for key in ("PIP_RESPECT_VIRTUALENV", "PIP_REQUIRE_VIRTUALENV", "__PYVENV_LAUNCHER__"): + os.environ.pop(key, None) + if "PYTHONPATH" not in self.envconfig.passenv: + # If PYTHONPATH not explicitly asked for, remove it. + if "PYTHONPATH" in os.environ: + self.session.report.warning( + "Discarding $PYTHONPATH from environment, to override " + "specify PYTHONPATH in 'passenv' in your configuration." + ) + os.environ.pop("PYTHONPATH") + + # installing packages at user level may mean we're not installing inside the venv + os.environ["PIP_USER"] = "0" + + # installing without dependencies may lead to broken packages + os.environ["PIP_NO_DEPS"] = "0" + def _install(self, deps, extraopts=None, action=None): if not deps: return @@ -353,13 +368,13 @@ class VirtualEnv(object): options.extend(extraopts) self.run_install_command(packages=packages, options=options, action=action) - def _getenv(self, testcommand=False): - if testcommand: + def _get_os_environ(self, is_test_command=False): + if is_test_command: # for executing tests we construct a clean environment env = {} - for envname in self.envconfig.passenv: - if envname in os.environ: - env[envname] = os.environ[envname] + for env_key in self.envconfig.passenv: + if env_key in os.environ: + env[env_key] = os.environ[env_key] else: # for executing non-test commands we use the full # invocation environment @@ -377,7 +392,7 @@ class VirtualEnv(object): self.session.make_emptydir(self.envconfig.envtmpdir) self.envconfig.envtmpdir.ensure(dir=1) cwd = self.envconfig.changedir - env = self._getenv(testcommand=True) + env = self._get_os_environ(is_test_command=True) # Display PYTHONHASHSEED to assist with reproducibility. action.setactivity("runtests", "PYTHONHASHSEED={!r}".format(env.get("PYTHONHASHSEED"))) for i, argv in enumerate(self.envconfig.commands): @@ -405,7 +420,7 @@ class VirtualEnv(object): action=action, redirect=redirect, ignore_ret=ignore_ret, - testcommand=True, + is_test_command=True, ) except tox.exception.InvocationError as err: if self.envconfig.ignore_outcome: @@ -424,18 +439,28 @@ class VirtualEnv(object): raise def _pcall( - self, args, cwd, venv=True, testcommand=False, action=None, redirect=True, ignore_ret=False + self, + args, + cwd, + venv=True, + is_test_command=False, + action=None, + redirect=True, + ignore_ret=False, ): + # construct environment variables os.environ.pop("VIRTUALENV_PYTHON", None) + env = self._get_os_environ(is_test_command=is_test_command) + bin_dir = str(self.envconfig.envbindir) + env["PATH"] = os.pathsep.join([bin_dir, os.environ["PATH"]]) + self.session.report.verbosity2("setting PATH={}".format(env["PATH"])) - cwd.ensure(dir=1) + # get command args[0] = self.getcommandpath(args[0], venv, cwd) if sys.platform != "win32" and "TOX_LIMITED_SHEBANG" in os.environ: args = prepend_shebang_interpreter(args) - env = self._getenv(testcommand=testcommand) - bindir = str(self.envconfig.envbindir) - env["PATH"] = p = os.pathsep.join([bindir, os.environ["PATH"]]) - self.session.report.verbosity2("setting PATH={}".format(p)) + + cwd.ensure(dir=1) # ensure the cwd exists return action.popen(args, cwd=cwd, env=env, redirect=redirect, ignore_ret=ignore_ret)
bug: tox should run pip with PIP_USER=0 - [X] Minimal reproduceable example or detailed description, assign "bug" - [X] OS and `pip list` output tox cannot use pip to install in a venv if the user has a config file that defaults to --user installs (e.g. because they (I) intend to use the distro package manager when touching distro packages) -- because pip crashes in such settings. By setting the PIP_USER environment variable to 0, tox could override that setting to force the use of in-venv installs. Arch Linux Py3.6 (from distro) in a clean venv. pip list ``` Package Version ---------- ------- pip 9.0.3 pluggy 0.6.0 py 1.5.3 setuptools 39.0.1 six 1.11.0 tox 3.0.0 virtualenv 16.0.0 ``` foo/setup.py ``` from setuptools import setup; setup(name="foo") ``` foo/tox.ini ``` [tox] envlist = py36 ``` ~/.config/pip/pip.conf ``` [install] user = true ``` Running tox yields ``` GLOB sdist-make: /tmp/foo/setup.py py36 inst-nodeps: /tmp/foo/.tox/dist/foo-0.0.0.zip ERROR: invocation failed (exit code 1), logfile: /tmp/foo/.tox/py36/log/py36-3.log ERROR: actionid: py36 msg: installpkg cmdargs: ['/tmp/foo/.tox/py36/bin/pip', 'install', '-U', '--no-deps', '/tmp/foo/.tox/dist/foo-0.0.0.zip'] Can not perform a '--user' install. User site-packages are not visible in this virtualenv. Exception information: Traceback (most recent call last): File "/tmp/foo/.tox/py36/lib/python3.6/site-packages/pip/_internal/basecommand.py", line 228, in main status = self.run(options, args) File "/tmp/foo/.tox/py36/lib/python3.6/site-packages/pip/_internal/commands/install.py", line 218, in run "Can not perform a '--user' install. User site-packages " pip._internal.exceptions.InstallationError: Can not perform a '--user' install. User site-packages are not visible in this virtualenv. py36 installed: foo==0.0.0 _____________________________________________________________________________________________ summary ______________________________________________________________________________________________ ERROR: py36: InvocationError for command /tmp/foo/.tox/py36/bin/pip install -U --no-deps /tmp/foo/.tox/dist/foo-0.0.0.zip (see /tmp/foo/.tox/py36/log/py36-3.log) (exited with code 1) ```
tox-dev/tox
diff --git a/tests/unit/session/test_session.py b/tests/unit/session/test_session.py index 90b1b992..0e93f588 100644 --- a/tests/unit/session/test_session.py +++ b/tests/unit/session/test_session.py @@ -140,3 +140,73 @@ def test_skip_install_skip_package(cmd, initproj, mock_venv): ) result = cmd("--notest") assert result.ret == 0 + + [email protected]() +def venv_filter_project(initproj, cmd): + def func(*args): + initproj( + "pkg123-0.7", + filedefs={ + "tox.ini": """ + [tox] + envlist = {py27,py36}-{nocov,cov,diffcov}{,-extra} + skipsdist = true + + [testenv] + skip_install = true + commands = python -c 'print("{envname}")' + """ + }, + ) + result = cmd(*args) + assert result.ret == 0 + active = [i.name for i in result.session.venvlist] + return active, result + + yield func + + +def test_venv_filter_empty_all_active(venv_filter_project, monkeypatch): + monkeypatch.delenv("TOX_SKIP_ENV", raising=False) + active, result = venv_filter_project("-a") + assert result.outlines == [ + "py27-nocov", + "py27-nocov-extra", + "py27-cov", + "py27-cov-extra", + "py27-diffcov", + "py27-diffcov-extra", + "py36-nocov", + "py36-nocov-extra", + "py36-cov", + "py36-cov-extra", + "py36-diffcov", + "py36-diffcov-extra", + ] + assert active == result.outlines + + +def test_venv_filter_match_all_none_active(venv_filter_project, monkeypatch): + monkeypatch.setenv("TOX_SKIP_ENV", ".*") + active, result = venv_filter_project("-a") + assert not active + existing_envs = result.outlines + + _, result = venv_filter_project("-avv") + for name in existing_envs: + msg = "skip environment {}, matches filter '.*'".format(name) + assert msg in result.outlines + + +def test_venv_filter_match_some_some_active(venv_filter_project, monkeypatch): + monkeypatch.setenv("TOX_SKIP_ENV", "py27.*") + active, result = venv_filter_project("-avvv") + assert active == [ + "py36-nocov", + "py36-nocov-extra", + "py36-cov", + "py36-cov-extra", + "py36-diffcov", + "py36-diffcov-extra", + ] diff --git a/tests/unit/test_venv.py b/tests/unit/test_venv.py index 54a52511..5b830956 100644 --- a/tests/unit/test_venv.py +++ b/tests/unit/test_venv.py @@ -625,6 +625,8 @@ class TestVenvTest: assert "PIP_RESPECT_VIRTUALENV" not in os.environ assert "PIP_REQUIRE_VIRTUALENV" not in os.environ assert "__PYVENV_LAUNCHER__" not in os.environ + assert os.environ["PIP_USER"] == "0" + assert os.environ["PIP_NO_DEPS"] == "0" def test_pythonpath_usage(self, newmocksession, monkeypatch): monkeypatch.setenv("PYTHONPATH", "/my/awesome/library")
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 3 }
3.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-mock", "pytest-timeout", "pytest-xdist", "pytest-randomly" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 distlib==0.3.9 execnet==1.9.0 filelock==3.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 packaging==21.3 platformdirs==2.4.0 pluggy==0.13.1 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-randomly==3.10.3 pytest-timeout==2.1.0 pytest-xdist==3.0.2 six==1.17.0 toml==0.10.2 tomli==1.2.3 -e git+https://github.com/tox-dev/tox.git@3b91d63c9c5a089ba8ae9cbaec20bdf52ecf25cf#egg=tox typing_extensions==4.1.1 virtualenv==20.17.1 zipp==3.6.0
name: tox channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - distlib==0.3.9 - execnet==1.9.0 - filelock==3.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - packaging==21.3 - platformdirs==2.4.0 - pluggy==0.13.1 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-randomly==3.10.3 - pytest-timeout==2.1.0 - pytest-xdist==3.0.2 - six==1.17.0 - toml==0.10.2 - tomli==1.2.3 - typing-extensions==4.1.1 - virtualenv==20.17.1 - zipp==3.6.0 prefix: /opt/conda/envs/tox
[ "tests/unit/session/test_session.py::test_venv_filter_match_some_some_active", "tests/unit/session/test_session.py::test_venv_filter_match_all_none_active" ]
[ "tests/unit/test_venv.py::TestCreationConfig::test_python_recreation", "tests/unit/test_venv.py::TestVenvTest::test_envbindir_path", "tests/unit/test_venv.py::TestVenvTest::test_pythonpath_usage", "tests/unit/test_venv.py::test_develop_extras", "tests/unit/test_venv.py::test_install_deps_pre", "tests/unit/test_venv.py::test_create", "tests/unit/test_venv.py::test_install_python3", "tests/unit/test_venv.py::test_install_recreate", "tests/unit/test_venv.py::test_install_sdist_extras", "tests/unit/test_venv.py::test_install_deps_indexserver", "tests/unit/test_venv.py::test_installpkg_indexserver", "tests/unit/test_venv.py::test_install_deps_wildcard", "tests/unit/test_venv.py::test_install_command_verbosity[1-0]", "tests/unit/test_venv.py::test_env_variables_added_to_pcall", "tests/unit/test_venv.py::test_install_command_verbosity[0-0]", "tests/unit/test_venv.py::test_run_custom_install_command", "tests/unit/test_venv.py::test_install_command_verbosity[6-3]", "tests/unit/test_venv.py::test_install_command_verbosity[3-1]", "tests/unit/test_venv.py::test_installpkg_upgrade", "tests/unit/test_venv.py::test_install_command_verbosity[5-3]", "tests/unit/test_venv.py::test_install_command_verbosity[4-2]", "tests/unit/test_venv.py::test_install_command_verbosity[2-0]", "tests/unit/test_venv.py::test_installpkg_no_upgrade", "tests/unit/test_venv.py::test_run_install_command" ]
[ "tests/unit/session/test_session.py::test__resolve_pkg_doubledash", "tests/unit/session/test_session.py::test__resolve_pkg_with_alpha_version", "tests/unit/session/test_session.py::test__resolve_pkg_missing_directory_in_distshare", "tests/unit/session/test_session.py::test_tox_parallel_build_safe", "tests/unit/session/test_session.py::test_skip_sdist", "tests/unit/session/test_session.py::test__resolve_pkg_multiple_valid_versions", "tests/unit/session/test_session.py::test_venv_filter_empty_all_active", "tests/unit/session/test_session.py::test__resolve_pkg_with_invalid_version", "tests/unit/session/test_session.py::test_skip_install_skip_package", "tests/unit/session/test_session.py::test__resolve_pkg_missing_directory", "tests/unit/session/test_session.py::test_minversion", "tests/unit/test_venv.py::TestCreationConfig::test_develop_recreation", "tests/unit/test_venv.py::TestCreationConfig::test_dep_recreation", "tests/unit/test_venv.py::TestCreationConfig::test_matchingdependencies_latest", "tests/unit/test_venv.py::TestCreationConfig::test_basic", "tests/unit/test_venv.py::TestCreationConfig::test_matchingdependencies", "tests/unit/test_venv.py::TestCreationConfig::test_matchingdependencies_file", "tests/unit/test_venv.py::test_test_hashseed_is_in_output", "tests/unit/test_venv.py::test_env_variables_added_to_needs_reinstall", "tests/unit/test_venv.py::test_getdigest", "tests/unit/test_venv.py::test_install_command_not_installed", "tests/unit/test_venv.py::test_getsupportedinterpreter", "tests/unit/test_venv.py::test_install_error", "tests/unit/test_venv.py::test_install_command_not_installed_bash", "tests/unit/test_venv.py::test_install_command_whitelisted", "tests/unit/test_venv.py::test_commandpath_venv_precedence", "tests/unit/test_venv.py::test_create_sitepackages", "tests/unit/test_venv.py::test_test_runtests_action_command_is_in_output", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_empty_interpreter_ws", "tests/unit/test_venv.py::test_command_relative_issue36", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_args", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_empty_interpreter", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_ws", "tests/unit/test_venv.py::test_tox_testenv_create", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_real", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_empty_instance", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_non_utf8", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_arg", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_long_example", "tests/unit/test_venv.py::test_tox_testenv_pre_post", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_simple", "tests/unit/test_venv.py::test_ignore_outcome_failing_cmd" ]
[]
MIT License
3,080
[ "doc/config.rst", "changelog/838.feature.rst", "src/tox/venv.py", "src/tox/session.py", "changelog/824.feature.rst" ]
[ "doc/config.rst", "changelog/838.feature.rst", "src/tox/venv.py", "src/tox/session.py", "changelog/824.feature.rst" ]
rmeissner__py-eth-sig-utils-3
cb1927214594647f979ab1cdf25a871bfb96fd8c
2018-09-16 19:41:46
cb1927214594647f979ab1cdf25a871bfb96fd8c
diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..9f4cc33 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,15 @@ +language: python +cache: pip +python: + - "3.6" +dist: trusty +install: + - pip install -r requirements.txt +script: + - python -m unittest +deploy: + provider: script + script: bash scripts/deploy.sh + skip_cleanup: true + on: + tags: true \ No newline at end of file diff --git a/README.md b/README.md index b2f4298..33a9c0f 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,8 @@ # Python Ethereum Signing Utils +[![Build Status](https://travis-ci.org/rmeissner/py-eth-sig-utils.svg?branch=master)](https://travis-ci.org/rmeissner/py-eth-sig-utils) +[![PyPI version](https://badge.fury.io/py/py-eth-sig-utils.svg)](https://badge.fury.io/py/py-eth-sig-utils) + ### Type Data Hashes This utils contain methods to generate hashes of typed data based on [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md). @@ -8,4 +11,14 @@ This utils contain methods to generate hashes of typed data based on [EIP-712](h 1. `virtualenv env -p python3` 1. `pip install -r requirements.txt` -1. `python test.py` \ No newline at end of file +1. `python -m unittest` + +### Deploy + +Library is automatically deployed if a tag is created. + +Manual deployment can be peformed with: +```bash +python setup.py sdist bdist_wheel +twine upload dist/* +``` \ No newline at end of file diff --git a/py_eth_sig_utils/__init__.py b/py_eth_sig_utils/__init__.py index 9d67973..5dfa209 100644 --- a/py_eth_sig_utils/__init__.py +++ b/py_eth_sig_utils/__init__.py @@ -1,5 +1,5 @@ name = "py_eth_sig_utils" -from .main import ( # NOQA - encode_typed_data, +from . import ( # NOQA + signing, ) \ No newline at end of file diff --git a/py_eth_sig_utils/eip712/__init__.py b/py_eth_sig_utils/eip712/__init__.py new file mode 100644 index 0000000..f1adeb4 --- /dev/null +++ b/py_eth_sig_utils/eip712/__init__.py @@ -0,0 +1,5 @@ +name = "eip712" + +from .encoding import ( # NOQA + encode_typed_data, +) \ No newline at end of file diff --git a/py_eth_sig_utils/main.py b/py_eth_sig_utils/eip712/encoding.py similarity index 96% rename from py_eth_sig_utils/main.py rename to py_eth_sig_utils/eip712/encoding.py index 6c4be08..1f73e36 100644 --- a/py_eth_sig_utils/main.py +++ b/py_eth_sig_utils/eip712/encoding.py @@ -63,4 +63,4 @@ def encode_typed_data(data): domainHash = create_struct_hash("EIP712Domain", domain, types) messageHash = create_struct_hash(primaryType, message, types) - return utils.encode_hex(utils.sha3(bytes.fromhex('19') + bytes.fromhex('01') + domainHash + messageHash)) \ No newline at end of file + return utils.sha3(bytes.fromhex('19') + bytes.fromhex('01') + domainHash + messageHash) \ No newline at end of file diff --git a/py_eth_sig_utils/signing.py b/py_eth_sig_utils/signing.py new file mode 100644 index 0000000..87fb627 --- /dev/null +++ b/py_eth_sig_utils/signing.py @@ -0,0 +1,22 @@ +from ethereum import utils +from eth_utils import big_endian_to_int +from .eip712 import encode_typed_data + +def signature_to_v_r_s(signature): + v = utils.safe_ord(signature[64]) + r = big_endian_to_int(signature[0:32]) + s = big_endian_to_int(signature[32:64]) + return v, r, s + +def v_r_s_to_signature(v, r, s): + return r.to_bytes(32, 'big') + s.to_bytes(32, 'big') + v.to_bytes(1, 'big') + +def sign_typed_data(data, private_key): + msg_hash = encode_typed_data(data) + return utils.ecsign(msg_hash, private_key) + +def recover_typed_data(data, v, r, s): + msg_hash = encode_typed_data(data) + public_key = utils.ecrecover_to_pub(msg_hash, v, r, s) + address_bytes = utils.sha3(public_key)[-20:] + return utils.checksum_encode(address_bytes) \ No newline at end of file diff --git a/scripts/deploy.sh b/scripts/deploy.sh new file mode 100644 index 0000000..377337a --- /dev/null +++ b/scripts/deploy.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# fail if any commands fails +set -e + +python -m pip install --user --upgrade setuptools wheel +python -m pip install --user --upgrade twine + +python setup.py sdist bdist_wheel +twine upload -u $PYPI_USER -p $PYPI_PASS dist/* \ No newline at end of file diff --git a/setup.py b/setup.py index 55f2d46..3f56e5e 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="py_eth_sig_utils", - version="0.2.0", + version="0.3.0", author="Richard Meissner", author_email="[email protected]", description="Python Ethereum Signing Utils",
Recover Signed Messages First off - thanks a ton for your work on this! Getting tooling around EIP-712 is going to be critical for it to be adopted so I applaud this effort! I wanted to see if you had any intention of implementing recovery / verification for EIP-712, so that you can verify signed messages, e.g. where they are generated on the client and sent to a backend.
rmeissner/py-eth-sig-utils
diff --git a/py_eth_sig_utils/test.py b/py_eth_sig_utils/test.py deleted file mode 100644 index a43e290..0000000 --- a/py_eth_sig_utils/test.py +++ /dev/null @@ -1,222 +0,0 @@ -from ethereum import utils -from main import * - -def testSignTypeData(): - print("Test Simple") - data = { - "types": { - "EIP712Domain": [ - { "name": 'name', "type": 'string' }, - { "name": 'version', "type": 'string' }, - { "name": 'chainId', "type": 'uint256' }, - { "name": 'verifyingContract', "type": 'address' }, - ], - "Person": [ - { "name": 'name', "type": 'string' }, - { "name": 'wallet', "type": 'address' } - ], - "Mail": [ - { "name": 'from', "type": 'Person' }, - { "name": 'to', "type": 'Person' }, - { "name": 'contents', "type": 'string' } - ] - }, - "primaryType": 'Mail', - "domain": { - "name": 'Ether Mail', - "version": '1', - "chainId": 1, - "verifyingContract": '0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC', - }, - "message": { - "from": { - "name": 'Cow', - "wallet": '0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826', - }, - "to": { - "name": 'Bob', - "wallet": '0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB', - }, - "contents": 'Hello, Bob!', - }, - } - #print(create_schema("Mail", data["types"])) - assert 'Mail(Person from,Person to,string contents)Person(string name,address wallet)' == create_schema("Mail", data["types"]) - #print(utils.encode_hex(create_schema_hash("Mail", data["types"]))) - assert 'a0cedeb2dc280ba39b857546d74f5549c3a1d7bdc2dd96bf881f76108e23dac2' == utils.encode_hex(create_schema_hash("Mail", data["types"])) - #print(utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"]))) - assert 'a0cedeb2dc280ba39b857546d74f5549c3a1d7bdc2dd96bf881f76108e23dac2fc71e5fa27ff56c350aa531bc129ebdf613b772b6604664f5d8dbe21b85eb0c8cd54f074a4af31b4411ff6a60c9719dbd559c221c8ac3492d9d872b041d703d1b5aadf3154a261abdd9086fc627b61efca26ae5702701d05cd2305f7c52a2fc8' == utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"])) - #print(utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"]))) - assert 'c52c0ee5d84264471806290a3f2c4cecfc5490626bf912d01f240d7a274b371e' == utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"])) - #print(utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"]))) - assert 'f2cee375fa42b42143804025fc449deafd50cc031ca257e0b194a650a912090f' == utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"])) - #print(encode_typed_data(data)) - assert 'be609aee343fb3c4b28e1df9e632fca64fcfaede20f02e86244efddf30957bd2' == encode_typed_data(data) - -def testSignTypeDataArray(): - print("Test Arrays") - data = { - "types": { - "EIP712Domain": [ - { "name": 'name', "type": 'string' }, - { "name": 'version', "type": 'string' }, - { "name": 'chainId', "type": 'uint256' }, - { "name": 'verifyingContract', "type": 'address' }, - ], - "Person": [ - { "name": 'name', "type": 'string' }, - { "name": 'wallet', "type": 'address' } - ], - "Mail": [ - { "name": 'from', "type": 'Person' }, - { "name": 'to', "type": 'Person[]' }, - { "name": 'contents', "type": 'string' } - ] - }, - "primaryType": 'Mail', - "domain": { - "name": 'Ether Mail', - "version": '1', - "chainId": 1, - "verifyingContract": '0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC', - }, - "message": { - "from": { - "name": 'Cow', - "wallet": '0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826', - }, - "to": [{ - "name": 'Bob', - "wallet": '0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB', - }], - "contents": 'Hello, Bob!', - }, - } - - #print(create_schema("Mail", data["types"])) - assert 'Mail(Person from,Person[] to,string contents)Person(string name,address wallet)' == create_schema("Mail", data["types"]) - #print(utils.encode_hex(create_schema_hash("Mail", data["types"]))) - assert 'dd57d9596af52b430ced3d5b52d4e3d5dccfdf3e0572db1dcf526baad311fbd1' == utils.encode_hex(create_schema_hash("Mail", data["types"])) - #print(utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"]))) - assert 'dd57d9596af52b430ced3d5b52d4e3d5dccfdf3e0572db1dcf526baad311fbd1fc71e5fa27ff56c350aa531bc129ebdf613b772b6604664f5d8dbe21b85eb0c8cd54f074a4af31b4411ff6a60c9719dbd559c221c8ac3492d9d872b041d703d1b5aadf3154a261abdd9086fc627b61efca26ae5702701d05cd2305f7c52a2fc8' == utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"])) - #print(utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"]))) - assert '25192142931f380985072cdd991e37f65cf8253ba7a0e675b54163a1d133b8ca' == utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"])) - #print(utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"]))) - assert 'f2cee375fa42b42143804025fc449deafd50cc031ca257e0b194a650a912090f' == utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"])) - #print(encode_typed_data(data)) - assert '0659335d0565297a1855731b0382d06cda439ea8e352de57c6f6499436a2b84e' == encode_typed_data(data) - -def testSignTypeDataArrayBytes(): - print("Test Arrays and Bytes") - data = { - "types": { - "EIP712Domain": [ - { "name": 'name', "type": 'string' }, - { "name": 'version', "type": 'string' }, - { "name": 'chainId', "type": 'uint256' }, - { "name": 'verifyingContract', "type": 'address' }, - ], - "Person": [ - { "name": 'name', "type": 'string' }, - { "name": 'wallet', "type": 'address' } - ], - "Mail": [ - { "name": 'from', "type": 'Person' }, - { "name": 'to', "type": 'Person[]' }, - { "name": 'contents', "type": 'string' }, - { "name": 'payload', "type": 'bytes' } - ] - }, - "primaryType": 'Mail', - "domain": { - "name": 'Ether Mail', - "version": '1', - "chainId": 1, - "verifyingContract": '0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC', - }, - "message": { - "from": { - "name": 'Cow', - "wallet": '0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826', - }, - "to": [{ - "name": 'Bob', - "wallet": '0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB', - }], - "contents": 'Hello, Bob!', - "payload": '0x25192142931f380985072cdd991e37f65cf8253ba7a0e675b54163a1d133b8ca' - }, - } - - #print(create_schema("Mail", data["types"])) - assert 'Mail(Person from,Person[] to,string contents,bytes payload)Person(string name,address wallet)' == create_schema("Mail", data["types"]) - #print(utils.encode_hex(create_schema_hash("Mail", data["types"]))) - assert '3dddc94d13b9ebab8e68f1428610e81839fcd751bdee402b12d2b3de3aace1fd' == utils.encode_hex(create_schema_hash("Mail", data["types"])) - #print(utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"]))) - assert '3dddc94d13b9ebab8e68f1428610e81839fcd751bdee402b12d2b3de3aace1fdfc71e5fa27ff56c350aa531bc129ebdf613b772b6604664f5d8dbe21b85eb0c8cd54f074a4af31b4411ff6a60c9719dbd559c221c8ac3492d9d872b041d703d1b5aadf3154a261abdd9086fc627b61efca26ae5702701d05cd2305f7c52a2fc8fac776d21ae071a32c362d4c20ba6586779708a56cad3a78d01b37ecb5744298' == utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"])) - #print(utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"]))) - assert '813d832c9580a8bb0d4e2f5e85eb4466ef05a6ddaae7c020800da77fb573fe4e' == utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"])) - #print(utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"]))) - assert 'f2cee375fa42b42143804025fc449deafd50cc031ca257e0b194a650a912090f' == utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"])) - #print(encode_typed_data(data)) - assert '78b9817f84906558ebb57f26f079e66887f444978b98d09e68a3468d48492f85' == encode_typed_data(data) - -def testSignTypeDataBytes(): - print("Test Bytes") - data = { - "types": { - "EIP712Domain": [ - { "name": 'name', "type": 'string' }, - { "name": 'version', "type": 'string' }, - { "name": 'chainId', "type": 'uint256' }, - { "name": 'verifyingContract', "type": 'address' }, - ], - "Person": [ - { "name": 'name', "type": 'string' }, - { "name": 'wallet', "type": 'address' } - ], - "Mail": [ - { "name": 'from', "type": 'Person' }, - { "name": 'to', "type": 'Person' }, - { "name": 'contents', "type": 'string' }, - { "name": 'payload', "type": 'bytes' } - ] - }, - "primaryType": 'Mail', - "domain": { - "name": 'Ether Mail', - "version": '1', - "chainId": 1, - "verifyingContract": '0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC', - }, - "message": { - "from": { - "name": 'Cow', - "wallet": '0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826', - }, - "to": { - "name": 'Bob', - "wallet": '0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB', - }, - "contents": 'Hello, Bob!', - "payload": '0x25192142931f380985072cdd991e37f65cf8253ba7a0e675b54163a1d133b8ca' - }, - } - - #print(create_schema("Mail", data["types"])) - assert 'Mail(Person from,Person to,string contents,bytes payload)Person(string name,address wallet)' == create_schema("Mail", data["types"]) - #print(utils.encode_hex(create_schema_hash("Mail", data["types"]))) - assert '43999c52db673245777eb64b0330105de064e52179581a340a9856c32372528e' == utils.encode_hex(create_schema_hash("Mail", data["types"])) - #print(utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"]))) - assert '43999c52db673245777eb64b0330105de064e52179581a340a9856c32372528efc71e5fa27ff56c350aa531bc129ebdf613b772b6604664f5d8dbe21b85eb0c8cd54f074a4af31b4411ff6a60c9719dbd559c221c8ac3492d9d872b041d703d1b5aadf3154a261abdd9086fc627b61efca26ae5702701d05cd2305f7c52a2fc8fac776d21ae071a32c362d4c20ba6586779708a56cad3a78d01b37ecb5744298' == utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"])) - #print(utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"]))) - assert 'e004bdc1ca57ba9ad5ea8c81e54dcbdb3bfce2d1d5ad92113f0871fb2a6eb052' == utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"])) - #print(utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"]))) - assert 'f2cee375fa42b42143804025fc449deafd50cc031ca257e0b194a650a912090f' == utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"])) - #print(encode_typed_data(data)) - assert 'b4aaf457227fec401db772ec22d2095d1235ee5d0833f56f59108c9ffc90fb4b' == encode_typed_data(data) - -testSignTypeData() -testSignTypeDataArray() -testSignTypeDataBytes() -testSignTypeDataArrayBytes() \ No newline at end of file diff --git a/py_eth_sig_utils/tests/__init__.py b/py_eth_sig_utils/tests/__init__.py new file mode 100644 index 0000000..f0321e9 --- /dev/null +++ b/py_eth_sig_utils/tests/__init__.py @@ -0,0 +1,5 @@ +name = "tests" + +from . import ( # NOQA + test_eip712_encode, +) \ No newline at end of file diff --git a/py_eth_sig_utils/tests/test_eip712_encode.py b/py_eth_sig_utils/tests/test_eip712_encode.py new file mode 100644 index 0000000..6d7a329 --- /dev/null +++ b/py_eth_sig_utils/tests/test_eip712_encode.py @@ -0,0 +1,223 @@ +import unittest +from ethereum import utils +from ..eip712.encoding import * + +class TestEIP712(unittest.TestCase): + + def test_encode_type_data(self): + #print("Test Simple") + data = { + "types": { + "EIP712Domain": [ + { "name": 'name', "type": 'string' }, + { "name": 'version', "type": 'string' }, + { "name": 'chainId', "type": 'uint256' }, + { "name": 'verifyingContract', "type": 'address' }, + ], + "Person": [ + { "name": 'name', "type": 'string' }, + { "name": 'wallet', "type": 'address' } + ], + "Mail": [ + { "name": 'from', "type": 'Person' }, + { "name": 'to', "type": 'Person' }, + { "name": 'contents', "type": 'string' } + ] + }, + "primaryType": 'Mail', + "domain": { + "name": 'Ether Mail', + "version": '1', + "chainId": 1, + "verifyingContract": '0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC', + }, + "message": { + "from": { + "name": 'Cow', + "wallet": '0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826', + }, + "to": { + "name": 'Bob', + "wallet": '0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB', + }, + "contents": 'Hello, Bob!', + }, + } + #print(create_schema("Mail", data["types"])) + self.assertEqual('Mail(Person from,Person to,string contents)Person(string name,address wallet)', create_schema("Mail", data["types"])) + #print(utils.encode_hex(create_schema_hash("Mail", data["types"]))) + self.assertEqual('a0cedeb2dc280ba39b857546d74f5549c3a1d7bdc2dd96bf881f76108e23dac2', utils.encode_hex(create_schema_hash("Mail", data["types"]))) + #print(utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"]))) + self.assertEqual('a0cedeb2dc280ba39b857546d74f5549c3a1d7bdc2dd96bf881f76108e23dac2fc71e5fa27ff56c350aa531bc129ebdf613b772b6604664f5d8dbe21b85eb0c8cd54f074a4af31b4411ff6a60c9719dbd559c221c8ac3492d9d872b041d703d1b5aadf3154a261abdd9086fc627b61efca26ae5702701d05cd2305f7c52a2fc8', utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"]))) + #print(utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"]))) + self.assertEqual('c52c0ee5d84264471806290a3f2c4cecfc5490626bf912d01f240d7a274b371e', utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"]))) + #print(utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"]))) + self.assertEqual('f2cee375fa42b42143804025fc449deafd50cc031ca257e0b194a650a912090f', utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"]))) + #print(encode_typed_data(data)) + self.assertEqual('be609aee343fb3c4b28e1df9e632fca64fcfaede20f02e86244efddf30957bd2', encode_typed_data(data).hex()) + + def test_encode_type_data_array(self): + #print("Test Arrays") + data = { + "types": { + "EIP712Domain": [ + { "name": 'name', "type": 'string' }, + { "name": 'version', "type": 'string' }, + { "name": 'chainId', "type": 'uint256' }, + { "name": 'verifyingContract', "type": 'address' }, + ], + "Person": [ + { "name": 'name', "type": 'string' }, + { "name": 'wallet', "type": 'address' } + ], + "Mail": [ + { "name": 'from', "type": 'Person' }, + { "name": 'to', "type": 'Person[]' }, + { "name": 'contents', "type": 'string' } + ] + }, + "primaryType": 'Mail', + "domain": { + "name": 'Ether Mail', + "version": '1', + "chainId": 1, + "verifyingContract": '0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC', + }, + "message": { + "from": { + "name": 'Cow', + "wallet": '0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826', + }, + "to": [{ + "name": 'Bob', + "wallet": '0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB', + }], + "contents": 'Hello, Bob!', + }, + } + + #print(create_schema("Mail", data["types"])) + self.assertEqual('Mail(Person from,Person[] to,string contents)Person(string name,address wallet)', create_schema("Mail", data["types"])) + #print(utils.encode_hex(create_schema_hash("Mail", data["types"]))) + self.assertEqual('dd57d9596af52b430ced3d5b52d4e3d5dccfdf3e0572db1dcf526baad311fbd1', utils.encode_hex(create_schema_hash("Mail", data["types"]))) + #print(utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"]))) + self.assertEqual('dd57d9596af52b430ced3d5b52d4e3d5dccfdf3e0572db1dcf526baad311fbd1fc71e5fa27ff56c350aa531bc129ebdf613b772b6604664f5d8dbe21b85eb0c8cd54f074a4af31b4411ff6a60c9719dbd559c221c8ac3492d9d872b041d703d1b5aadf3154a261abdd9086fc627b61efca26ae5702701d05cd2305f7c52a2fc8', utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"]))) + #print(utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"]))) + self.assertEqual('25192142931f380985072cdd991e37f65cf8253ba7a0e675b54163a1d133b8ca', utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"]))) + #print(utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"]))) + self.assertEqual('f2cee375fa42b42143804025fc449deafd50cc031ca257e0b194a650a912090f', utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"]))) + #print(encode_typed_data(data)) + self.assertEqual('0659335d0565297a1855731b0382d06cda439ea8e352de57c6f6499436a2b84e', encode_typed_data(data).hex()) + + def test_encode_type_data_array_bytes(self): + #print("Test Arrays and Bytes") + data = { + "types": { + "EIP712Domain": [ + { "name": 'name', "type": 'string' }, + { "name": 'version', "type": 'string' }, + { "name": 'chainId', "type": 'uint256' }, + { "name": 'verifyingContract', "type": 'address' }, + ], + "Person": [ + { "name": 'name', "type": 'string' }, + { "name": 'wallet', "type": 'address' } + ], + "Mail": [ + { "name": 'from', "type": 'Person' }, + { "name": 'to', "type": 'Person[]' }, + { "name": 'contents', "type": 'string' }, + { "name": 'payload', "type": 'bytes' } + ] + }, + "primaryType": 'Mail', + "domain": { + "name": 'Ether Mail', + "version": '1', + "chainId": 1, + "verifyingContract": '0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC', + }, + "message": { + "from": { + "name": 'Cow', + "wallet": '0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826', + }, + "to": [{ + "name": 'Bob', + "wallet": '0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB', + }], + "contents": 'Hello, Bob!', + "payload": '0x25192142931f380985072cdd991e37f65cf8253ba7a0e675b54163a1d133b8ca' + }, + } + + #print(create_schema("Mail", data["types"])) + self.assertEqual('Mail(Person from,Person[] to,string contents,bytes payload)Person(string name,address wallet)', create_schema("Mail", data["types"])) + #print(utils.encode_hex(create_schema_hash("Mail", data["types"]))) + self.assertEqual('3dddc94d13b9ebab8e68f1428610e81839fcd751bdee402b12d2b3de3aace1fd', utils.encode_hex(create_schema_hash("Mail", data["types"]))) + #print(utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"]))) + self.assertEqual('3dddc94d13b9ebab8e68f1428610e81839fcd751bdee402b12d2b3de3aace1fdfc71e5fa27ff56c350aa531bc129ebdf613b772b6604664f5d8dbe21b85eb0c8cd54f074a4af31b4411ff6a60c9719dbd559c221c8ac3492d9d872b041d703d1b5aadf3154a261abdd9086fc627b61efca26ae5702701d05cd2305f7c52a2fc8fac776d21ae071a32c362d4c20ba6586779708a56cad3a78d01b37ecb5744298', utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"]))) + #print(utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"]))) + self.assertEqual('813d832c9580a8bb0d4e2f5e85eb4466ef05a6ddaae7c020800da77fb573fe4e', utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"]))) + #print(utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"]))) + self.assertEqual('f2cee375fa42b42143804025fc449deafd50cc031ca257e0b194a650a912090f', utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"]))) + #print(encode_typed_data(data)) + self.assertEqual('78b9817f84906558ebb57f26f079e66887f444978b98d09e68a3468d48492f85', encode_typed_data(data).hex()) + + def test_sign_type_data_bytes(self): + #print("Test Bytes") + data = { + "types": { + "EIP712Domain": [ + { "name": 'name', "type": 'string' }, + { "name": 'version', "type": 'string' }, + { "name": 'chainId', "type": 'uint256' }, + { "name": 'verifyingContract', "type": 'address' }, + ], + "Person": [ + { "name": 'name', "type": 'string' }, + { "name": 'wallet', "type": 'address' } + ], + "Mail": [ + { "name": 'from', "type": 'Person' }, + { "name": 'to', "type": 'Person' }, + { "name": 'contents', "type": 'string' }, + { "name": 'payload', "type": 'bytes' } + ] + }, + "primaryType": 'Mail', + "domain": { + "name": 'Ether Mail', + "version": '1', + "chainId": 1, + "verifyingContract": '0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC', + }, + "message": { + "from": { + "name": 'Cow', + "wallet": '0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826', + }, + "to": { + "name": 'Bob', + "wallet": '0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB', + }, + "contents": 'Hello, Bob!', + "payload": '0x25192142931f380985072cdd991e37f65cf8253ba7a0e675b54163a1d133b8ca' + }, + } + + #print(create_schema("Mail", data["types"])) + self.assertEqual('Mail(Person from,Person to,string contents,bytes payload)Person(string name,address wallet)', create_schema("Mail", data["types"])) + #print(utils.encode_hex(create_schema_hash("Mail", data["types"]))) + self.assertEqual('43999c52db673245777eb64b0330105de064e52179581a340a9856c32372528e', utils.encode_hex(create_schema_hash("Mail", data["types"]))) + #print(utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"]))) + self.assertEqual('43999c52db673245777eb64b0330105de064e52179581a340a9856c32372528efc71e5fa27ff56c350aa531bc129ebdf613b772b6604664f5d8dbe21b85eb0c8cd54f074a4af31b4411ff6a60c9719dbd559c221c8ac3492d9d872b041d703d1b5aadf3154a261abdd9086fc627b61efca26ae5702701d05cd2305f7c52a2fc8fac776d21ae071a32c362d4c20ba6586779708a56cad3a78d01b37ecb5744298', utils.encode_hex(encode_data(data["primaryType"], data["message"], data["types"]))) + #print(utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"]))) + self.assertEqual('e004bdc1ca57ba9ad5ea8c81e54dcbdb3bfce2d1d5ad92113f0871fb2a6eb052', utils.encode_hex(create_struct_hash(data["primaryType"], data["message"], data["types"]))) + #print(utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"]))) + self.assertEqual('f2cee375fa42b42143804025fc449deafd50cc031ca257e0b194a650a912090f', utils.encode_hex(create_struct_hash('EIP712Domain', data["domain"], data["types"]))) + #print(encode_typed_data(data)) + self.assertEqual('b4aaf457227fec401db772ec22d2095d1235ee5d0833f56f59108c9ffc90fb4b', encode_typed_data(data).hex()) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/py_eth_sig_utils/tests/test_sign_typed_data.py b/py_eth_sig_utils/tests/test_sign_typed_data.py new file mode 100644 index 0000000..9f9ff46 --- /dev/null +++ b/py_eth_sig_utils/tests/test_sign_typed_data.py @@ -0,0 +1,55 @@ +import unittest +from ethereum import utils +from ..signing import * + +class TestSignTypedData(unittest.TestCase): + + data = { + "types": { + "EIP712Domain": [ + { "name": 'name', "type": 'string' }, + { "name": 'version', "type": 'string' }, + { "name": 'chainId', "type": 'uint256' }, + { "name": 'verifyingContract', "type": 'address' }, + ], + "Person": [ + { "name": 'name', "type": 'string' }, + { "name": 'wallet', "type": 'address' } + ], + "Mail": [ + { "name": 'from', "type": 'Person' }, + { "name": 'to', "type": 'Person' }, + { "name": 'contents', "type": 'string' } + ] + }, + "primaryType": 'Mail', + "domain": { + "name": 'Ether Mail', + "version": '1', + "chainId": 1, + "verifyingContract": '0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC', + }, + "message": { + "from": { + "name": 'Cow', + "wallet": '0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826', + }, + "to": { + "name": 'Bob', + "wallet": '0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB', + }, + "contents": 'Hello, Bob!', + }, + } + + def test_sign(self): + private_key = utils.sha3('cow') + signature = v_r_s_to_signature(*sign_typed_data(self.data, private_key)).hex() + self.assertEqual(signature, '4355c47d63924e8a72e509b65029052eb6c299d53a04e167c5775fd466751c9d07299936d304c153f6443dfa05f40ff007d72911b6f72307f996231605b915621c') + + def test_recover(self): + signer_address = recover_typed_data(self.data, *signature_to_v_r_s(bytes.fromhex('4355c47d63924e8a72e509b65029052eb6c299d53a04e167c5775fd466751c9d07299936d304c153f6443dfa05f40ff007d72911b6f72307f996231605b915621c'))) + self.assertEqual('0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826', signer_address) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 4 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cached-property==1.5.2 coincurve==21.0.0 cytoolz==0.12.3 eth-abi==1.1.1 eth-hash==0.3.3 eth-typing==2.3.0 eth-utils==1.10.0 ethereum==2.3.2 exceptiongroup==1.2.2 future==1.0.0 iniconfig==2.1.0 mypy-extensions==1.0.0 packaging==24.2 parsimonious==0.8.0 pbkdf2==1.3 pluggy==1.5.0 py-ecc==5.2.0 -e git+https://github.com/rmeissner/py-eth-sig-utils.git@cb1927214594647f979ab1cdf25a871bfb96fd8c#egg=py_eth_sig_utils pycryptodome==3.22.0 pyethash==0.1.27 pysha3==1.0.2 pytest==8.3.5 PyYAML==6.0.2 repoze.lru==0.7 rlp==1.2.0 scrypt==0.8.27 six==1.17.0 tomli==2.2.1 toolz==1.0.0
name: py-eth-sig-utils channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cached-property==1.5.2 - coincurve==21.0.0 - cytoolz==0.12.3 - eth-abi==1.1.1 - eth-hash==0.3.3 - eth-typing==2.3.0 - eth-utils==1.10.0 - ethereum==2.3.2 - exceptiongroup==1.2.2 - future==1.0.0 - iniconfig==2.1.0 - mypy-extensions==1.0.0 - packaging==24.2 - parsimonious==0.8.0 - pbkdf2==1.3 - pluggy==1.5.0 - py-ecc==5.2.0 - pycryptodome==3.22.0 - pyethash==0.1.27 - pysha3==1.0.2 - pytest==8.3.5 - pyyaml==6.0.2 - repoze-lru==0.7 - rlp==1.2.0 - scrypt==0.8.27 - six==1.17.0 - tomli==2.2.1 - toolz==1.0.0 prefix: /opt/conda/envs/py-eth-sig-utils
[ "py_eth_sig_utils/tests/test_eip712_encode.py::TestEIP712::test_encode_type_data", "py_eth_sig_utils/tests/test_eip712_encode.py::TestEIP712::test_encode_type_data_array", "py_eth_sig_utils/tests/test_eip712_encode.py::TestEIP712::test_encode_type_data_array_bytes", "py_eth_sig_utils/tests/test_eip712_encode.py::TestEIP712::test_sign_type_data_bytes", "py_eth_sig_utils/tests/test_sign_typed_data.py::TestSignTypedData::test_recover", "py_eth_sig_utils/tests/test_sign_typed_data.py::TestSignTypedData::test_sign" ]
[]
[]
[]
MIT License
3,081
[ "setup.py", "py_eth_sig_utils/eip712/__init__.py", "py_eth_sig_utils/signing.py", "py_eth_sig_utils/main.py", ".travis.yml", "README.md", "py_eth_sig_utils/__init__.py", "scripts/deploy.sh" ]
[ "setup.py", "py_eth_sig_utils/eip712/encoding.py", "py_eth_sig_utils/eip712/__init__.py", "py_eth_sig_utils/signing.py", ".travis.yml", "README.md", "py_eth_sig_utils/__init__.py", "scripts/deploy.sh" ]
tox-dev__tox-990
3b91d63c9c5a089ba8ae9cbaec20bdf52ecf25cf
2018-09-16 21:07:47
cf6afcecaca22df7b509facaea43c09a15570f75
diff --git a/changelog/824.feature.rst b/changelog/824.feature.rst new file mode 100644 index 00000000..aae99136 --- /dev/null +++ b/changelog/824.feature.rst @@ -0,0 +1,1 @@ +use the os environment variable ``TOX_SKIP_ENV`` to filter out tox environment names from the run list (set by :confval:`envlist`) - by :user:`gaborbernat` diff --git a/doc/config.rst b/doc/config.rst index 89ce7a3a..7c33646a 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -76,6 +76,13 @@ and will first lookup global tox settings in this section: * environment variable ``TOXENV`` * ``tox.ini`` file's ``envlist`` + .. versionadded:: 3.4.0 + + What tox environments are ran during the tox invocation can be further filtered + via the operating system environment variable ``TOX_SKIP_ENV`` regular expression + (e.g. ``py27.*`` means **don't** evaluate environments that start with the key ``py27``). + Skipped environments will be logged at level two verbosity level. + .. confval:: ignore_basepython_conflict=True|False(default) .. versionadded:: 3.1.0 diff --git a/src/tox/session.py b/src/tox/session.py index 7da1efcd..dc25a277 100644 --- a/src/tox/session.py +++ b/src/tox/session.py @@ -381,7 +381,7 @@ class Session: self._spec2pkg = {} self._name2venv = {} try: - self.venvlist = [self.getvenv(x) for x in self.config.envlist] + self.venvlist = [self.getvenv(x) for x in self.evaluated_env_list()] except LookupError: raise SystemExit(1) except tox.exception.ConfigError as e: @@ -389,6 +389,18 @@ class Session: raise SystemExit(1) self._actions = [] + def evaluated_env_list(self): + tox_env_filter = os.environ.get("TOX_SKIP_ENV") + tox_env_filter_re = re.compile(tox_env_filter) if tox_env_filter is not None else None + for name in self.config.envlist: + if tox_env_filter_re is not None and tox_env_filter_re.match(name): + msg = "skip environment {}, matches filter {!r}".format( + name, tox_env_filter_re.pattern + ) + self.report.verbosity1(msg) + continue + yield name + @property def hook(self): return self.config.pluginmanager.hook
option to skip a factor level For example if ``{py27,py36}-{nocov,cov,diffcov}``. @warsaw In this case the user might want to not run all non diffcov envs.
tox-dev/tox
diff --git a/tests/unit/session/test_session.py b/tests/unit/session/test_session.py index 90b1b992..0e93f588 100644 --- a/tests/unit/session/test_session.py +++ b/tests/unit/session/test_session.py @@ -140,3 +140,73 @@ def test_skip_install_skip_package(cmd, initproj, mock_venv): ) result = cmd("--notest") assert result.ret == 0 + + [email protected]() +def venv_filter_project(initproj, cmd): + def func(*args): + initproj( + "pkg123-0.7", + filedefs={ + "tox.ini": """ + [tox] + envlist = {py27,py36}-{nocov,cov,diffcov}{,-extra} + skipsdist = true + + [testenv] + skip_install = true + commands = python -c 'print("{envname}")' + """ + }, + ) + result = cmd(*args) + assert result.ret == 0 + active = [i.name for i in result.session.venvlist] + return active, result + + yield func + + +def test_venv_filter_empty_all_active(venv_filter_project, monkeypatch): + monkeypatch.delenv("TOX_SKIP_ENV", raising=False) + active, result = venv_filter_project("-a") + assert result.outlines == [ + "py27-nocov", + "py27-nocov-extra", + "py27-cov", + "py27-cov-extra", + "py27-diffcov", + "py27-diffcov-extra", + "py36-nocov", + "py36-nocov-extra", + "py36-cov", + "py36-cov-extra", + "py36-diffcov", + "py36-diffcov-extra", + ] + assert active == result.outlines + + +def test_venv_filter_match_all_none_active(venv_filter_project, monkeypatch): + monkeypatch.setenv("TOX_SKIP_ENV", ".*") + active, result = venv_filter_project("-a") + assert not active + existing_envs = result.outlines + + _, result = venv_filter_project("-avv") + for name in existing_envs: + msg = "skip environment {}, matches filter '.*'".format(name) + assert msg in result.outlines + + +def test_venv_filter_match_some_some_active(venv_filter_project, monkeypatch): + monkeypatch.setenv("TOX_SKIP_ENV", "py27.*") + active, result = venv_filter_project("-avvv") + assert active == [ + "py36-nocov", + "py36-nocov-extra", + "py36-cov", + "py36-cov-extra", + "py36-diffcov", + "py36-diffcov-extra", + ]
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 2 }
3.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 distlib==0.3.9 exceptiongroup==1.2.2 execnet==2.0.2 filelock==3.12.2 importlib-metadata==6.7.0 iniconfig==2.0.0 packaging==24.0 platformdirs==4.0.0 pluggy==0.13.1 py==1.11.0 pytest==7.4.4 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 six==1.17.0 toml==0.10.2 tomli==2.0.1 -e git+https://github.com/tox-dev/tox.git@3b91d63c9c5a089ba8ae9cbaec20bdf52ecf25cf#egg=tox typing_extensions==4.7.1 virtualenv==20.26.6 zipp==3.15.0
name: tox channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.2.7 - distlib==0.3.9 - exceptiongroup==1.2.2 - execnet==2.0.2 - filelock==3.12.2 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - packaging==24.0 - platformdirs==4.0.0 - pluggy==0.13.1 - py==1.11.0 - pytest==7.4.4 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - six==1.17.0 - toml==0.10.2 - tomli==2.0.1 - tox==3.3.1.dev22+g3b91d63c - typing-extensions==4.7.1 - virtualenv==20.26.6 - zipp==3.15.0 prefix: /opt/conda/envs/tox
[ "tests/unit/session/test_session.py::test_venv_filter_match_all_none_active", "tests/unit/session/test_session.py::test_venv_filter_match_some_some_active" ]
[]
[ "tests/unit/session/test_session.py::test__resolve_pkg_missing_directory", "tests/unit/session/test_session.py::test__resolve_pkg_missing_directory_in_distshare", "tests/unit/session/test_session.py::test__resolve_pkg_multiple_valid_versions", "tests/unit/session/test_session.py::test__resolve_pkg_with_invalid_version", "tests/unit/session/test_session.py::test__resolve_pkg_with_alpha_version", "tests/unit/session/test_session.py::test__resolve_pkg_doubledash", "tests/unit/session/test_session.py::test_minversion", "tests/unit/session/test_session.py::test_tox_parallel_build_safe", "tests/unit/session/test_session.py::test_skip_sdist", "tests/unit/session/test_session.py::test_skip_install_skip_package", "tests/unit/session/test_session.py::test_venv_filter_empty_all_active" ]
[]
MIT License
3,082
[ "changelog/824.feature.rst", "doc/config.rst", "src/tox/session.py" ]
[ "changelog/824.feature.rst", "doc/config.rst", "src/tox/session.py" ]
tox-dev__tox-991
3b91d63c9c5a089ba8ae9cbaec20bdf52ecf25cf
2018-09-16 22:06:58
cf6afcecaca22df7b509facaea43c09a15570f75
diff --git a/changelog/824.feature.rst b/changelog/824.feature.rst new file mode 100644 index 00000000..aae99136 --- /dev/null +++ b/changelog/824.feature.rst @@ -0,0 +1,1 @@ +use the os environment variable ``TOX_SKIP_ENV`` to filter out tox environment names from the run list (set by :confval:`envlist`) - by :user:`gaborbernat` diff --git a/changelog/838.feature.rst b/changelog/838.feature.rst new file mode 100644 index 00000000..b90ff843 --- /dev/null +++ b/changelog/838.feature.rst @@ -0,0 +1,1 @@ +always set ``PIP_USER=0`` (do not install into the user site package, but inside the virtual environment created) and ``PIP_NO_DEPS=0`` (installing without dependencies can cause broken package installations) inside tox - by :user:`gaborbernat` diff --git a/changelog/947.feature.rst b/changelog/947.feature.rst new file mode 100644 index 00000000..7fff5e71 --- /dev/null +++ b/changelog/947.feature.rst @@ -0,0 +1,1 @@ +allow injecting config value inside the ini file dependent of the fact that we're connected to an interactive shell or not - by :user:`gaborbernat` diff --git a/doc/config.rst b/doc/config.rst index 89ce7a3a..5f6ab273 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -76,6 +76,13 @@ and will first lookup global tox settings in this section: * environment variable ``TOXENV`` * ``tox.ini`` file's ``envlist`` + .. versionadded:: 3.4.0 + + What tox environments are ran during the tox invocation can be further filtered + via the operating system environment variable ``TOX_SKIP_ENV`` regular expression + (e.g. ``py27.*`` means **don't** evaluate environments that start with the key ``py27``). + Skipped environments will be logged at level two verbosity level. + .. confval:: ignore_basepython_conflict=True|False(default) .. versionadded:: 3.1.0 @@ -557,6 +564,17 @@ the above example is roughly equivalent to .. _`command positional substitution`: .. _`positional substitution`: +interactive shell substitution +++++++++++++++++++++++++++++++ + +It's possible to inject a config value only when tox is running in interactive shell (standard input): + + {tty:ON_VALUE:OFF_VALUE} + +The first value is the value to inject when the interactive terminal is available, +the second value is the value to use when it's not. The later on is optional. A good use case +for this is e.g. passing in the ``--pdb`` flag for pytest. + substitutions for positional arguments in commands ++++++++++++++++++++++++++++++++++++++++++++++++++ diff --git a/src/tox/config.py b/src/tox/config.py index 99714e6d..16f2c606 100755 --- a/src/tox/config.py +++ b/src/tox/config.py @@ -1430,6 +1430,10 @@ class Replacer: if sub_type == "env": return self._replace_env(match) + if sub_type == "tty": + if is_interactive(): + return match.group("substitution_value") + return match.group("default_value") if sub_type is not None: raise tox.exception.ConfigError( "No support for the {} substitution type".format(sub_type) @@ -1437,16 +1441,16 @@ class Replacer: return self._replace_substitution(match) def _replace_env(self, match): - envkey = match.group("substitution_value") - if not envkey: + key = match.group("substitution_value") + if not key: raise tox.exception.ConfigError("env: requires an environment variable name") default = match.group("default_value") - envvalue = self.reader.get_environ_value(envkey) - if envvalue is not None: - return envvalue + value = self.reader.get_environ_value(key) + if value is not None: + return value if default is not None: return default - raise tox.exception.MissingSubstitution(envkey) + raise tox.exception.MissingSubstitution(key) def _substitute_from_other_section(self, key): if key.startswith("[") and "]" in key: @@ -1475,6 +1479,10 @@ class Replacer: return str(val) +def is_interactive(): + return sys.stdin.isatty() + + class _ArgvlistReader: @classmethod def getargvlist(cls, reader, value, replace=True): diff --git a/src/tox/session.py b/src/tox/session.py index 7da1efcd..dc25a277 100644 --- a/src/tox/session.py +++ b/src/tox/session.py @@ -381,7 +381,7 @@ class Session: self._spec2pkg = {} self._name2venv = {} try: - self.venvlist = [self.getvenv(x) for x in self.config.envlist] + self.venvlist = [self.getvenv(x) for x in self.evaluated_env_list()] except LookupError: raise SystemExit(1) except tox.exception.ConfigError as e: @@ -389,6 +389,18 @@ class Session: raise SystemExit(1) self._actions = [] + def evaluated_env_list(self): + tox_env_filter = os.environ.get("TOX_SKIP_ENV") + tox_env_filter_re = re.compile(tox_env_filter) if tox_env_filter is not None else None + for name in self.config.envlist: + if tox_env_filter_re is not None and tox_env_filter_re.match(name): + msg = "skip environment {}, matches filter {!r}".format( + name, tox_env_filter_re.pattern + ) + self.report.verbosity1(msg) + continue + yield name + @property def hook(self): return self.config.pluginmanager.hook diff --git a/src/tox/venv.py b/src/tox/venv.py index a10149a9..0209f915 100755 --- a/src/tox/venv.py +++ b/src/tox/venv.py @@ -5,6 +5,7 @@ import pipes import re import sys import warnings +from itertools import chain import py @@ -231,7 +232,7 @@ class VirtualEnv(object): setup_py = setupdir.join("setup.py") setup_cfg = setupdir.join("setup.cfg") args = [self.envconfig.envpython, str(setup_py), "--name"] - env = self._getenv() + env = self._get_os_environ() output = action.popen(args, cwd=setupdir, redirect=False, returnout=True, env=env) name = output.strip() args = [self.envconfig.envpython, "-c", "import sys; print(sys.path)"] @@ -297,30 +298,26 @@ class VirtualEnv(object): return options def run_install_command(self, packages, action, options=()): - argv = self.envconfig.install_command[:] - i = argv.index("{packages}") - argv[i : i + 1] = packages - if "{opts}" in argv: - i = argv.index("{opts}") - argv[i : i + 1] = list(options) + def expand(val): + # expand an install command + if val == "{packages}": + for package in packages: + yield package + elif val == "{opts}": + for opt in options: + yield opt + else: + yield val - for x in ("PIP_RESPECT_VIRTUALENV", "PIP_REQUIRE_VIRTUALENV", "__PYVENV_LAUNCHER__"): - os.environ.pop(x, None) + cmd = list(chain.from_iterable(expand(val) for val in self.envconfig.install_command)) - if "PYTHONPATH" not in self.envconfig.passenv: - # If PYTHONPATH not explicitly asked for, remove it. - if "PYTHONPATH" in os.environ: - self.session.report.warning( - "Discarding $PYTHONPATH from environment, to override " - "specify PYTHONPATH in 'passenv' in your configuration." - ) - os.environ.pop("PYTHONPATH") + self.ensure_pip_os_environ_ok() old_stdout = sys.stdout sys.stdout = codecs.getwriter("utf8")(sys.stdout) try: self._pcall( - argv, + cmd, cwd=self.envconfig.config.toxinidir, action=action, redirect=self.session.report.verbosity < 2, @@ -328,6 +325,24 @@ class VirtualEnv(object): finally: sys.stdout = old_stdout + def ensure_pip_os_environ_ok(self): + for key in ("PIP_RESPECT_VIRTUALENV", "PIP_REQUIRE_VIRTUALENV", "__PYVENV_LAUNCHER__"): + os.environ.pop(key, None) + if "PYTHONPATH" not in self.envconfig.passenv: + # If PYTHONPATH not explicitly asked for, remove it. + if "PYTHONPATH" in os.environ: + self.session.report.warning( + "Discarding $PYTHONPATH from environment, to override " + "specify PYTHONPATH in 'passenv' in your configuration." + ) + os.environ.pop("PYTHONPATH") + + # installing packages at user level may mean we're not installing inside the venv + os.environ["PIP_USER"] = "0" + + # installing without dependencies may lead to broken packages + os.environ["PIP_NO_DEPS"] = "0" + def _install(self, deps, extraopts=None, action=None): if not deps: return @@ -353,13 +368,13 @@ class VirtualEnv(object): options.extend(extraopts) self.run_install_command(packages=packages, options=options, action=action) - def _getenv(self, testcommand=False): - if testcommand: + def _get_os_environ(self, is_test_command=False): + if is_test_command: # for executing tests we construct a clean environment env = {} - for envname in self.envconfig.passenv: - if envname in os.environ: - env[envname] = os.environ[envname] + for env_key in self.envconfig.passenv: + if env_key in os.environ: + env[env_key] = os.environ[env_key] else: # for executing non-test commands we use the full # invocation environment @@ -377,7 +392,7 @@ class VirtualEnv(object): self.session.make_emptydir(self.envconfig.envtmpdir) self.envconfig.envtmpdir.ensure(dir=1) cwd = self.envconfig.changedir - env = self._getenv(testcommand=True) + env = self._get_os_environ(is_test_command=True) # Display PYTHONHASHSEED to assist with reproducibility. action.setactivity("runtests", "PYTHONHASHSEED={!r}".format(env.get("PYTHONHASHSEED"))) for i, argv in enumerate(self.envconfig.commands): @@ -405,7 +420,7 @@ class VirtualEnv(object): action=action, redirect=redirect, ignore_ret=ignore_ret, - testcommand=True, + is_test_command=True, ) except tox.exception.InvocationError as err: if self.envconfig.ignore_outcome: @@ -424,18 +439,28 @@ class VirtualEnv(object): raise def _pcall( - self, args, cwd, venv=True, testcommand=False, action=None, redirect=True, ignore_ret=False + self, + args, + cwd, + venv=True, + is_test_command=False, + action=None, + redirect=True, + ignore_ret=False, ): + # construct environment variables os.environ.pop("VIRTUALENV_PYTHON", None) + env = self._get_os_environ(is_test_command=is_test_command) + bin_dir = str(self.envconfig.envbindir) + env["PATH"] = os.pathsep.join([bin_dir, os.environ["PATH"]]) + self.session.report.verbosity2("setting PATH={}".format(env["PATH"])) - cwd.ensure(dir=1) + # get command args[0] = self.getcommandpath(args[0], venv, cwd) if sys.platform != "win32" and "TOX_LIMITED_SHEBANG" in os.environ: args = prepend_shebang_interpreter(args) - env = self._getenv(testcommand=testcommand) - bindir = str(self.envconfig.envbindir) - env["PATH"] = p = os.pathsep.join([bindir, os.environ["PATH"]]) - self.session.report.verbosity2("setting PATH={}".format(p)) + + cwd.ensure(dir=1) # ensure the cwd exists return action.popen(args, cwd=cwd, env=env, redirect=redirect, ignore_ret=ignore_ret)
Tox should be able to detect interactive tty and alter some options based on this It will be extremely useful if tox would allow users to detect an interactive tty and alter some execution options (like variables). Example use case: adding `--pdb` to py.test arguments on interactive tty. Any CI execution would run without an interactive TTY as this would be present only on developer terminal, a case where we may want to use an interactive debugger. Now this is achievable using bash with something like ``` if [[ -t 1 ]]; then tox else tox -- --pdb fi ``` As you can see this is not only inconvenient but also creates more problems when you have multiple environments. Reference: https://stackoverflow.com/questions/911168/how-to-detect-if-my-shell-script-is-running-through-a-pipe
tox-dev/tox
diff --git a/tests/unit/session/test_session.py b/tests/unit/session/test_session.py index 90b1b992..0e93f588 100644 --- a/tests/unit/session/test_session.py +++ b/tests/unit/session/test_session.py @@ -140,3 +140,73 @@ def test_skip_install_skip_package(cmd, initproj, mock_venv): ) result = cmd("--notest") assert result.ret == 0 + + [email protected]() +def venv_filter_project(initproj, cmd): + def func(*args): + initproj( + "pkg123-0.7", + filedefs={ + "tox.ini": """ + [tox] + envlist = {py27,py36}-{nocov,cov,diffcov}{,-extra} + skipsdist = true + + [testenv] + skip_install = true + commands = python -c 'print("{envname}")' + """ + }, + ) + result = cmd(*args) + assert result.ret == 0 + active = [i.name for i in result.session.venvlist] + return active, result + + yield func + + +def test_venv_filter_empty_all_active(venv_filter_project, monkeypatch): + monkeypatch.delenv("TOX_SKIP_ENV", raising=False) + active, result = venv_filter_project("-a") + assert result.outlines == [ + "py27-nocov", + "py27-nocov-extra", + "py27-cov", + "py27-cov-extra", + "py27-diffcov", + "py27-diffcov-extra", + "py36-nocov", + "py36-nocov-extra", + "py36-cov", + "py36-cov-extra", + "py36-diffcov", + "py36-diffcov-extra", + ] + assert active == result.outlines + + +def test_venv_filter_match_all_none_active(venv_filter_project, monkeypatch): + monkeypatch.setenv("TOX_SKIP_ENV", ".*") + active, result = venv_filter_project("-a") + assert not active + existing_envs = result.outlines + + _, result = venv_filter_project("-avv") + for name in existing_envs: + msg = "skip environment {}, matches filter '.*'".format(name) + assert msg in result.outlines + + +def test_venv_filter_match_some_some_active(venv_filter_project, monkeypatch): + monkeypatch.setenv("TOX_SKIP_ENV", "py27.*") + active, result = venv_filter_project("-avvv") + assert active == [ + "py36-nocov", + "py36-nocov-extra", + "py36-cov", + "py36-cov-extra", + "py36-diffcov", + "py36-diffcov-extra", + ] diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 78afe57e..3943b032 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -2724,3 +2724,29 @@ def test_config_bad_config_type_specified(monkeypatch, tmpdir, capsys): msg = "\n".join(notes) + "\n" assert err == msg assert "ERROR:" not in out + + +def test_interactive_na(newconfig, monkeypatch): + monkeypatch.setattr(tox.config, "is_interactive", lambda: False) + config = newconfig( + """ + [testenv:py] + setenv = A = {tty:X:Y} + """ + ) + assert config.envconfigs["py"].setenv["A"] == "Y" + + +def test_interactive_available(newconfig, monkeypatch): + monkeypatch.setattr(tox.config, "is_interactive", lambda: True) + config = newconfig( + """ + [testenv:py] + setenv = A = {tty:X:Y} + """ + ) + assert config.envconfigs["py"].setenv["A"] == "X" + + +def test_interactive(): + tox.config.is_interactive() diff --git a/tests/unit/test_venv.py b/tests/unit/test_venv.py index 54a52511..5b830956 100644 --- a/tests/unit/test_venv.py +++ b/tests/unit/test_venv.py @@ -625,6 +625,8 @@ class TestVenvTest: assert "PIP_RESPECT_VIRTUALENV" not in os.environ assert "PIP_REQUIRE_VIRTUALENV" not in os.environ assert "__PYVENV_LAUNCHER__" not in os.environ + assert os.environ["PIP_USER"] == "0" + assert os.environ["PIP_NO_DEPS"] == "0" def test_pythonpath_usage(self, newmocksession, monkeypatch): monkeypatch.setenv("PYTHONPATH", "/my/awesome/library")
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 4 }
3.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-mock", "pytest-timeout", "pytest-xdist", "pytest-randomly" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 distlib==0.3.9 execnet==1.9.0 filelock==3.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 packaging==21.3 platformdirs==2.4.0 pluggy==0.13.1 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-randomly==3.10.3 pytest-timeout==2.1.0 pytest-xdist==3.0.2 six==1.17.0 toml==0.10.2 tomli==1.2.3 -e git+https://github.com/tox-dev/tox.git@3b91d63c9c5a089ba8ae9cbaec20bdf52ecf25cf#egg=tox typing_extensions==4.1.1 virtualenv==20.17.1 zipp==3.6.0
name: tox channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - distlib==0.3.9 - execnet==1.9.0 - filelock==3.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - packaging==21.3 - platformdirs==2.4.0 - pluggy==0.13.1 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-randomly==3.10.3 - pytest-timeout==2.1.0 - pytest-xdist==3.0.2 - six==1.17.0 - toml==0.10.2 - tomli==1.2.3 - typing-extensions==4.1.1 - virtualenv==20.17.1 - zipp==3.6.0 prefix: /opt/conda/envs/tox
[ "tests/unit/session/test_session.py::test_venv_filter_match_all_none_active", "tests/unit/session/test_session.py::test_venv_filter_match_some_some_active", "tests/unit/test_config.py::test_interactive_available", "tests/unit/test_config.py::test_interactive_na", "tests/unit/test_config.py::test_interactive" ]
[ "tests/unit/test_venv.py::TestCreationConfig::test_python_recreation", "tests/unit/test_venv.py::test_develop_extras", "tests/unit/test_venv.py::test_install_python3", "tests/unit/test_venv.py::test_install_recreate", "tests/unit/test_venv.py::test_install_sdist_extras", "tests/unit/test_venv.py::test_install_deps_pre", "tests/unit/test_venv.py::test_create", "tests/unit/test_venv.py::test_installpkg_indexserver", "tests/unit/test_venv.py::test_install_deps_indexserver", "tests/unit/test_venv.py::test_install_deps_wildcard", "tests/unit/test_venv.py::test_install_command_verbosity[3-1]", "tests/unit/test_venv.py::test_install_command_verbosity[2-0]", "tests/unit/test_venv.py::test_installpkg_no_upgrade", "tests/unit/test_venv.py::test_run_install_command", "tests/unit/test_venv.py::test_env_variables_added_to_pcall", "tests/unit/test_venv.py::test_install_command_verbosity[1-0]", "tests/unit/test_venv.py::test_install_command_verbosity[6-3]", "tests/unit/test_venv.py::test_installpkg_upgrade", "tests/unit/test_venv.py::test_install_command_verbosity[0-0]", "tests/unit/test_venv.py::test_run_custom_install_command", "tests/unit/test_venv.py::test_install_command_verbosity[4-2]", "tests/unit/test_venv.py::test_install_command_verbosity[5-3]", "tests/unit/test_venv.py::TestVenvTest::test_envbindir_path", "tests/unit/test_venv.py::TestVenvTest::test_pythonpath_usage", "tests/unit/test_config.py::TestVenvConfig::test_force_dep_with_url" ]
[ "tests/unit/test_venv.py::TestCreationConfig::test_develop_recreation", "tests/unit/test_venv.py::TestCreationConfig::test_matchingdependencies_latest", "tests/unit/test_venv.py::TestCreationConfig::test_matchingdependencies", "tests/unit/test_venv.py::TestCreationConfig::test_basic", "tests/unit/test_venv.py::TestCreationConfig::test_dep_recreation", "tests/unit/test_venv.py::TestCreationConfig::test_matchingdependencies_file", "tests/unit/test_venv.py::test_commandpath_venv_precedence", "tests/unit/test_venv.py::test_env_variables_added_to_needs_reinstall", "tests/unit/test_venv.py::test_install_command_whitelisted", "tests/unit/test_venv.py::test_test_hashseed_is_in_output", "tests/unit/test_venv.py::test_install_command_not_installed_bash", "tests/unit/test_venv.py::test_test_runtests_action_command_is_in_output", "tests/unit/test_venv.py::test_getsupportedinterpreter", "tests/unit/test_venv.py::test_install_command_not_installed", "tests/unit/test_venv.py::test_getdigest", "tests/unit/test_venv.py::test_create_sitepackages", "tests/unit/test_venv.py::test_install_error", "tests/unit/test_venv.py::test_command_relative_issue36", "tests/unit/test_venv.py::test_tox_testenv_pre_post", "tests/unit/test_venv.py::test_tox_testenv_create", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_args", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_empty_interpreter_ws", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_ws", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_empty_instance", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_arg", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_real", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_non_utf8", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_simple", "tests/unit/test_venv.py::test_ignore_outcome_failing_cmd", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_long_example", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_empty_interpreter", "tests/unit/session/test_session.py::test_tox_parallel_build_safe", "tests/unit/session/test_session.py::test_skip_sdist", "tests/unit/session/test_session.py::test__resolve_pkg_missing_directory", "tests/unit/session/test_session.py::test__resolve_pkg_with_alpha_version", "tests/unit/session/test_session.py::test__resolve_pkg_with_invalid_version", "tests/unit/session/test_session.py::test_skip_install_skip_package", "tests/unit/session/test_session.py::test__resolve_pkg_doubledash", "tests/unit/session/test_session.py::test__resolve_pkg_missing_directory_in_distshare", "tests/unit/session/test_session.py::test_minversion", "tests/unit/session/test_session.py::test_venv_filter_empty_all_active", "tests/unit/session/test_session.py::test__resolve_pkg_multiple_valid_versions", "tests/unit/test_config.py::TestIndexServer::test_multiple_homedir_relative_local_indexservers", "tests/unit/test_config.py::TestIndexServer::test_indexserver", "tests/unit/test_config.py::TestIndexServer::test_parse_indexserver", "tests/unit/test_config.py::TestIniParserPrefix::test_value_matches_prefixed_section_substitution", "tests/unit/test_config.py::TestIniParserPrefix::test_fallback_sections", "tests/unit/test_config.py::TestIniParserPrefix::test_value_doesn_match_prefixed_section_substitution", "tests/unit/test_config.py::TestIniParserPrefix::test_basic_section_access", "tests/unit/test_config.py::TestIniParserPrefix::test_other_section_substitution", "tests/unit/test_config.py::TestConfigTestEnv::test_factor_use_not_checked", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir_jython[jython]", "tests/unit/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_section", "tests/unit/test_config.py::TestConfigTestEnv::test_posargs_backslashed_or_quoted", "tests/unit/test_config.py::TestConfigTestEnv::test_pip_pre", "tests/unit/test_config.py::TestConfigTestEnv::test_sitepackages_switch", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors", "tests/unit/test_config.py::TestConfigTestEnv::test_pip_pre_cmdline_override", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_glob_from_global_env", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[linux2]", "tests/unit/test_config.py::TestConfigTestEnv::test_single_value_from_other_secton", "tests/unit/test_config.py::TestConfigTestEnv::test_factors", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[win32]", "tests/unit/test_config.py::TestConfigTestEnv::test_factors_groups_touch", "tests/unit/test_config.py::TestConfigTestEnv::test_envconfigs_based_on_factors", "tests/unit/test_config.py::TestConfigTestEnv::test_specific_command_overrides", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_double", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy3]", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_defaults", "tests/unit/test_config.py::TestConfigTestEnv::test_simple", "tests/unit/test_config.py::TestConfigTestEnv::test_install_command_setting", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_with_factor", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors_conflict_ignore", "tests/unit/test_config.py::TestConfigTestEnv::test_installpkg_tops_develop", "tests/unit/test_config.py::TestConfigTestEnv::test_install_command_must_contain_packages", "tests/unit/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist0-deps0]", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[linux2]", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_error", "tests/unit/test_config.py::TestConfigTestEnv::test_changedir_override", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_noargs_issue240", "tests/unit/test_config.py::TestConfigTestEnv::test_rewrite_simple_posargs", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_from_global_env", "tests/unit/test_config.py::TestConfigTestEnv::test_ignore_errors", "tests/unit/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy]", "tests/unit/test_config.py::TestConfigTestEnv::test_ignore_outcome", "tests/unit/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[win32]", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_nested_env_defaults", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_positional", "tests/unit/test_config.py::TestConfigTestEnv::test_multilevel_substitution", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue246", "tests/unit/test_config.py::TestConfigTestEnv::test_changedir", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors_conflict", "tests/unit/test_config.py::TestConfigTestEnv::test_period_in_factor", "tests/unit/test_config.py::TestConfigTestEnv::test_install_command_substitutions", "tests/unit/test_config.py::TestConfigTestEnv::test_recursive_substitution_cycle_fails", "tests/unit/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue515", "tests/unit/test_config.py::TestConfigTestEnv::test_whitelist_externals", "tests/unit/test_config.py::TestConfigTestEnv::test_factors_in_boolean", "tests/unit/test_config.py::TestConfigTestEnv::test_defaults", "tests/unit/test_config.py::TestConfigTestEnv::test_default_factors_conflict_lying_name", "tests/unit/test_config.py::TestConfigTestEnv::test_factor_ops", "tests/unit/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist1-deps1]", "tests/unit/test_config.py::TestConfigTestEnv::test_commentchars_issue33", "tests/unit/test_config.py::TestConfigTestEnv::test_factors_in_setenv", "tests/unit/test_config.py::TestConfigTestEnv::test_rewrite_posargs", "tests/unit/test_config.py::TestSetenv::test_getdict_lazy", "tests/unit/test_config.py::TestSetenv::test_setenv_recursive_direct", "tests/unit/test_config.py::TestSetenv::test_setenv_overrides", "tests/unit/test_config.py::TestSetenv::test_setenv_cross_section_subst_twice", "tests/unit/test_config.py::TestSetenv::test_setenv_with_envdir_and_basepython", "tests/unit/test_config.py::TestSetenv::test_setenv_default_os_environ", "tests/unit/test_config.py::TestSetenv::test_setenv_ordering_1", "tests/unit/test_config.py::TestSetenv::test_getdict_lazy_update", "tests/unit/test_config.py::TestSetenv::test_setenv_cross_section_subst_issue294", "tests/unit/test_config.py::TestSetenv::test_setenv_uses_other_setenv", "tests/unit/test_config.py::TestSetenv::test_setenv_cross_section_mixed", "tests/unit/test_config.py::TestSetenv::test_setenv_uses_os_environ", "tests/unit/test_config.py::TestGetcontextname::test_blank", "tests/unit/test_config.py::TestGetcontextname::test_hudson_legacy", "tests/unit/test_config.py::TestGetcontextname::test_jenkins", "tests/unit/test_config.py::TestParseEnv::test_parse_recreate", "tests/unit/test_config.py::TestConfigPackage::test_defaults_distshare", "tests/unit/test_config.py::TestConfigPackage::test_project_paths", "tests/unit/test_config.py::TestConfigPackage::test_defaults_changed_dir", "tests/unit/test_config.py::TestConfigPackage::test_defaults", "tests/unit/test_config.py::test_get_homedir", "tests/unit/test_config.py::test_env_spec[-e", "tests/unit/test_config.py::test_config_via_pyproject_legacy", "tests/unit/test_config.py::test_config_bad_config_type_specified", "tests/unit/test_config.py::test_plugin_require", "tests/unit/test_config.py::test_config_bad_pyproject_specified", "tests/unit/test_config.py::test_isolated_build_env_cannot_be_in_envlist", "tests/unit/test_config.py::TestParseconfig::test_explicit_config_path", "tests/unit/test_config.py::TestParseconfig::test_search_parents", "tests/unit/test_config.py::TestConfigConstSubstitutions::test_pathsep_regex", "tests/unit/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[;]", "tests/unit/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[:]", "tests/unit/test_config.py::TestIniParser::test_argvlist_windows_escaping", "tests/unit/test_config.py::TestIniParser::test_argvlist_quoting_in_command", "tests/unit/test_config.py::TestIniParser::test_getstring_environment_substitution_with_default", "tests/unit/test_config.py::TestIniParser::test_argvlist", "tests/unit/test_config.py::TestIniParser::test_argvlist_quoted_posargs", "tests/unit/test_config.py::TestIniParser::test_getbool", "tests/unit/test_config.py::TestIniParser::test_normal_env_sub_works", "tests/unit/test_config.py::TestIniParser::test_substitution_with_multiple_words", "tests/unit/test_config.py::TestIniParser::test_getstring_fallback_sections", "tests/unit/test_config.py::TestIniParser::test_getdict", "tests/unit/test_config.py::TestIniParser::test_posargs_are_added_escaped_issue310", "tests/unit/test_config.py::TestIniParser::test_value_doesn_match_section_substitution", "tests/unit/test_config.py::TestIniParser::test_positional_arguments_are_only_replaced_when_standing_alone", "tests/unit/test_config.py::TestIniParser::test_argvlist_comment_after_command", "tests/unit/test_config.py::TestIniParser::test_getargv", "tests/unit/test_config.py::TestIniParser::test_getpath", "tests/unit/test_config.py::TestIniParser::test_value_matches_section_substitution", "tests/unit/test_config.py::TestIniParser::test_argvlist_positional_substitution", "tests/unit/test_config.py::TestIniParser::test_getstring_other_section_substitution", "tests/unit/test_config.py::TestIniParser::test_missing_env_sub_populates_missing_subs", "tests/unit/test_config.py::TestIniParser::test_argvlist_command_contains_hash", "tests/unit/test_config.py::TestIniParser::test_argvlist_multiline", "tests/unit/test_config.py::TestIniParser::test_getstring_single", "tests/unit/test_config.py::TestIniParser::test_missing_env_sub_raises_config_error_in_non_testenv", "tests/unit/test_config.py::TestIniParser::test_getlist", "tests/unit/test_config.py::TestIniParser::test_missing_substitution", "tests/unit/test_config.py::TestIniParser::test_argvlist_posargs_with_quotes", "tests/unit/test_config.py::TestIniParser::test_getstring_substitution", "tests/unit/test_config.py::TestGlobalOptions::test_verbosity", "tests/unit/test_config.py::TestGlobalOptions::test_envlist_expansion", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_no_arg", "tests/unit/test_config.py::TestGlobalOptions::test_notest", "tests/unit/test_config.py::TestGlobalOptions::test_defaultenv_commandline", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_overrides_false", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_not_specified", "tests/unit/test_config.py::TestGlobalOptions::test_envlist_cross_product", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_false", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args1-1]", "tests/unit/test_config.py::TestGlobalOptions::test_env_selection", "tests/unit/test_config.py::TestGlobalOptions::test_minversion", "tests/unit/test_config.py::TestGlobalOptions::test_defaultenv_partial_override", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args0-0]", "tests/unit/test_config.py::TestGlobalOptions::test_envlist_multiline", "tests/unit/test_config.py::TestGlobalOptions::test_substitution_jenkins_context", "tests/unit/test_config.py::TestGlobalOptions::test_py_venv", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args2-2]", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_cli_overrides_true", "tests/unit/test_config.py::TestGlobalOptions::test_correct_basepython_chosen_from_default_factors", "tests/unit/test_config.py::TestGlobalOptions::test_sdist_specification", "tests/unit/test_config.py::TestGlobalOptions::test_quiet[args3-3]", "tests/unit/test_config.py::TestGlobalOptions::test_substitution_jenkins_default", "tests/unit/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_true", "tests/unit/test_config.py::TestVenvConfig::test_process_deps", "tests/unit/test_config.py::TestVenvConfig::test_config_parsing_minimal", "tests/unit/test_config.py::TestVenvConfig::test_envdir_set_manually_with_substitutions", "tests/unit/test_config.py::TestVenvConfig::test_is_same_dep", "tests/unit/test_config.py::TestVenvConfig::test_envdir_set_manually", "tests/unit/test_config.py::TestVenvConfig::test_force_dep_version", "tests/unit/test_config.py::TestVenvConfig::test_config_parsing_multienv", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_posargs", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_multiline", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_global", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_regression_issue595", "tests/unit/test_config.py::TestIniParserAgainstCommandsKey::test_command_section_and_posargs_substitution", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_posargs", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_word", "tests/unit/test_config.py::TestCommandParser::test_command_parser_with_complex_word_set", "tests/unit/test_config.py::TestCommandParser::test_command_with_runs_of_whitespace", "tests/unit/test_config.py::TestCommandParser::test_commands_with_backslash", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_multiple_words", "tests/unit/test_config.py::TestCommandParser::test_command_with_split_line_in_subst_arguments", "tests/unit/test_config.py::TestCommandParser::test_command_parsing_for_issue_10", "tests/unit/test_config.py::TestCommandParser::test_command_parser_for_substitution_with_spaces", "tests/unit/test_config.py::TestHashseedOption::test_setenv", "tests/unit/test_config.py::TestHashseedOption::test_passing_integer", "tests/unit/test_config.py::TestHashseedOption::test_one_random_hashseed", "tests/unit/test_config.py::TestHashseedOption::test_setenv_in_one_testenv", "tests/unit/test_config.py::TestHashseedOption::test_noset_with_setenv", "tests/unit/test_config.py::TestHashseedOption::test_passing_no_argument", "tests/unit/test_config.py::TestHashseedOption::test_passing_string", "tests/unit/test_config.py::TestHashseedOption::test_passing_empty_string", "tests/unit/test_config.py::TestHashseedOption::test_noset", "tests/unit/test_config.py::TestHashseedOption::test_default", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[win]", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[osx]", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[lin]", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform_rex", "tests/unit/test_config.py::TestConfigPlatform::test_config_parse_platform", "tests/unit/test_config.py::TestCmdInvocation::test_version_no_plugins", "tests/unit/test_config.py::TestCmdInvocation::test_help", "tests/unit/test_config.py::TestCmdInvocation::test_version_with_normal_plugin", "tests/unit/test_config.py::TestCmdInvocation::test_override_workdir", "tests/unit/test_config.py::TestCmdInvocation::test_version_with_fileless_module", "tests/unit/test_config.py::TestCmdInvocation::test_version_simple", "tests/unit/test_config.py::TestCmdInvocation::test_showconfig_with_force_dep_version", "tests/unit/test_config.py::TestCmdInvocation::test_config_specific_ini", "tests/unit/test_config.py::TestCmdInvocation::test_no_tox_ini" ]
[]
MIT License
3,083
[ "doc/config.rst", "changelog/838.feature.rst", "src/tox/venv.py", "src/tox/session.py", "changelog/947.feature.rst", "changelog/824.feature.rst", "src/tox/config.py" ]
[ "doc/config.rst", "changelog/838.feature.rst", "src/tox/venv.py", "src/tox/session.py", "changelog/947.feature.rst", "changelog/824.feature.rst", "src/tox/config.py" ]
conan-io__conan-3560
bb404b34bc894032ac4c56ac03d2cf3e6a7ef3e9
2018-09-17 09:34:00
b02cce4e78d5982e00b66f80a683465b3c679033
diff --git a/conans/client/graph/graph.py b/conans/client/graph/graph.py index dabc5ff3e..16570b31c 100644 --- a/conans/client/graph/graph.py +++ b/conans/client/graph/graph.py @@ -275,7 +275,7 @@ class DepsGraph(object): if new_level: result.append(new_level) return result - + def nodes_to_build(self): ret = [] for level in self.by_levels(): diff --git a/conans/client/graph/graph_builder.py b/conans/client/graph/graph_builder.py index 587dd487b..1704247f7 100644 --- a/conans/client/graph/graph_builder.py +++ b/conans/client/graph/graph_builder.py @@ -56,14 +56,14 @@ class DepsGraphBuilder(object): if alias: req.conan_reference = alias - if not hasattr(conanfile, "_evaluated_requires"): - conanfile._evaluated_requires = conanfile.requires.copy() - elif conanfile.requires != conanfile._evaluated_requires: + if not hasattr(conanfile, "_conan_evaluated_requires"): + conanfile._conan_evaluated_requires = conanfile.requires.copy() + elif conanfile.requires != conanfile._conan_evaluated_requires: raise ConanException("%s: Incompatible requirements obtained in different " "evaluations of 'requirements'\n" " Previous requirements: %s\n" " New requirements: %s" - % (conanref, list(conanfile._evaluated_requires.values()), + % (conanref, list(conanfile._conan_evaluated_requires.values()), list(conanfile.requires.values()))) def _load_deps(self, node, down_reqs, dep_graph, public_deps, down_ref, down_options, @@ -179,10 +179,10 @@ class DepsGraphBuilder(object): # So it is necessary to save the "requires" state and restore it before a second # execution of requirements(). It is a shallow copy, if first iteration is # RequireResolve'd or overridden, the inner requirements are modified - if not hasattr(conanfile, "_original_requires"): - conanfile._original_requires = conanfile.requires.copy() + if not hasattr(conanfile, "_conan_original_requires"): + conanfile._conan_original_requires = conanfile.requires.copy() else: - conanfile.requires = conanfile._original_requires.copy() + conanfile.requires = conanfile._conan_original_requires.copy() with conanfile_exception_formatter(str(conanfile), "requirements"): conanfile.requirements() diff --git a/conans/client/graph/graph_manager.py b/conans/client/graph/graph_manager.py index 32bdb94a9..415e83107 100644 --- a/conans/client/graph/graph_manager.py +++ b/conans/client/graph/graph_manager.py @@ -100,8 +100,8 @@ class GraphManager(object): require.conan_reference = require.range_reference = reference else: conanfile.requires(str(reference)) - conanfile._user = reference.user - conanfile._channel = reference.channel + conanfile._conan_user = reference.user + conanfile._conan_channel = reference.channel # Computing the full dependency graph cache_settings = self._client_cache.settings.copy() diff --git a/conans/client/installer.py b/conans/client/installer.py index b0fe070eb..119df2cdd 100644 --- a/conans/client/installer.py +++ b/conans/client/installer.py @@ -407,7 +407,7 @@ class ConanInstaller(object): # Update the info but filtering the package values that not apply to the subtree # of this current node and its dependencies. subtree_libnames = [node.conan_ref.name for node in node_order] - for package_name, env_vars in conan_file._env_values.data.items(): + for package_name, env_vars in conan_file._conan_env_values.data.items(): for name, value in env_vars.items(): if not package_name or package_name in subtree_libnames or \ package_name == conan_file.name: diff --git a/conans/client/loader.py b/conans/client/loader.py index de3db350e..05d07feec 100644 --- a/conans/client/loader.py +++ b/conans/client/loader.py @@ -159,7 +159,7 @@ class ConanFileLoader(object): # imports method conanfile.imports = parser.imports_method(conanfile) - conanfile._env_values.update(processed_profile._env_values) + conanfile._conan_env_values.update(processed_profile._env_values) return conanfile def load_virtual(self, references, processed_profile, scope_options=True, diff --git a/conans/client/tools/win.py b/conans/client/tools/win.py index d864f8ea8..fb8790e99 100644 --- a/conans/client/tools/win.py +++ b/conans/client/tools/win.py @@ -489,4 +489,4 @@ def run_in_windows_bash(conanfile, bashcmd, cwd=None, subsystem=None, msys_mingw wincmd = '%s --login -c %s' % (bash_path, escape_windows_cmd(to_run)) conanfile.output.info('run_in_windows_bash: %s' % wincmd) # https://github.com/conan-io/conan/issues/2839 (subprocess=True) - return conanfile._runner(wincmd, output=conanfile.output, subprocess=True) + return conanfile._conan_runner(wincmd, output=conanfile.output, subprocess=True) diff --git a/conans/model/conan_file.py b/conans/model/conan_file.py index f025e5367..798790c00 100644 --- a/conans/model/conan_file.py +++ b/conans/model/conan_file.py @@ -109,9 +109,9 @@ class ConanFile(object): # an output stream (writeln, info, warn error) self.output = output # something that can run commands, as os.sytem - self._runner = runner - self._user = user - self._channel = channel + self._conan_runner = runner + self._conan_user = user + self._conan_channel = channel def initialize(self, settings, env, local=None): if isinstance(self.generators, str): @@ -147,15 +147,15 @@ class ConanFile(object): self.deps_user_info = DepsUserInfo() # user specified env variables - self._env_values = env.copy() # user specified -e + self._conan_env_values = env.copy() # user specified -e @property def env(self): - """Apply the self.deps_env_info into a copy of self._env_values (will prioritize the - self._env_values, user specified from profiles or -e first, then inherited)""" + """Apply the self.deps_env_info into a copy of self._conan_env_values (will prioritize the + self._conan_env_values, user specified from profiles or -e first, then inherited)""" # Cannot be lazy cached, because it's called in configure node, and we still don't have # the deps_env_info objects available - tmp_env_values = self._env_values.copy() + tmp_env_values = self._conan_env_values.copy() tmp_env_values.update(self.deps_env_info) ret, multiple = tmp_env_values.env_dicts(self.name) @@ -164,21 +164,21 @@ class ConanFile(object): @property def channel(self): - if not self._channel: - self._channel = os.getenv("CONAN_CHANNEL") - if not self._channel: + if not self._conan_channel: + self._conan_channel = os.getenv("CONAN_CHANNEL") + if not self._conan_channel: raise ConanException("CONAN_CHANNEL environment variable not defined, " "but self.channel is used in conanfile") - return self._channel + return self._conan_channel @property def user(self): - if not self._user: - self._user = os.getenv("CONAN_USERNAME") - if not self._user: + if not self._conan_user: + self._conan_user = os.getenv("CONAN_USERNAME") + if not self._conan_user: raise ConanException("CONAN_USERNAME environment variable not defined, " "but self.user is used in conanfile") - return self._user + return self._conan_user def collect_libs(self, folder="lib"): self.output.warn("Use 'self.collect_libs' is deprecated, " @@ -239,14 +239,15 @@ class ConanFile(object): ignore_errors=False, run_environment=False): def _run(): if not win_bash: - return self._runner(command, output, os.path.abspath(RUN_LOG_NAME), cwd) + return self._conan_runner(command, output, os.path.abspath(RUN_LOG_NAME), cwd) # FIXME: run in windows bash is not using output return tools.run_in_windows_bash(self, bashcmd=command, cwd=cwd, subsystem=subsystem, msys_mingw=msys_mingw) if run_environment: with tools.run_environment(self): if os_info.is_macos: - command = 'DYLD_LIBRARY_PATH="%s" %s' % (os.environ.get('DYLD_LIBRARY_PATH', ''), command) + command = 'DYLD_LIBRARY_PATH="%s" %s' % (os.environ.get('DYLD_LIBRARY_PATH', ''), + command) retcode = _run() else: retcode = _run() @@ -268,7 +269,7 @@ class ConanFile(object): raise ConanException("You need to create a method 'test' in your test/conanfile.py") def __repr__(self): - if self.name and self.version and self._channel and self._user: + if self.name and self.version and self._conan_channel and self._conan_user: return "%s/%s@%s/%s" % (self.name, self.version, self.user, self.channel) elif self.name and self.version: return "%s/%s@PROJECT" % (self.name, self.version)
Define policy for conan vs user ConanFile attributes and methods To reduce possible future collisions when things are added to conan. E.g.: - Use "_myattribute" for user defined attributes. Conan reserves all public members - Use "__myattribute" for user defined atributes. (What happens for inheritance via python_requires?) - Etc.
conan-io/conan
diff --git a/conans/test/model/conanfile_test.py b/conans/test/model/conanfile_test.py new file mode 100644 index 000000000..e515cf5a7 --- /dev/null +++ b/conans/test/model/conanfile_test.py @@ -0,0 +1,41 @@ +import unittest +from conans.model.conan_file import ConanFile +from conans.model.env_info import EnvValues +from conans.model.settings import Settings +from conans.test.utils.tools import TestClient + + +class ConanFileTest(unittest.TestCase): + def test_conanfile_naming(self): + for member in vars(ConanFile): + if member.startswith('_') and not member.startswith("__"): + self.assertTrue(member.startswith('_conan')) + + conanfile = ConanFile(None, None) + conanfile.initialize(Settings(), EnvValues()) + + for member in vars(conanfile): + if member.startswith('_') and not member.startswith("__"): + self.assertTrue(member.startswith('_conan')) + + def test_conanfile_naming_complete(self): + client = TestClient() + conanfile = """from conans import ConanFile +class Pkg(ConanFile): + pass + def package_info(self): + for member in Pkg.__dict__: + if member.startswith('_') and not member.startswith("__"): + assert(member.startswith('_conan')) + for member in vars(self): + if member.startswith('_') and not member.startswith("__"): + assert(member.startswith('_conan')) +""" + client.save({"conanfile.py": conanfile}) + client.run("create . PkgA/0.1@user/testing") + client.save({"conanfile.py": conanfile.replace("pass", + "requires = 'PkgA/0.1@user/testing'")}) + client.run("create . PkgB/0.1@user/testing") + client.save({"conanfile.py": conanfile.replace("pass", + "requires = 'PkgB/0.1@user/testing'")}) + client.run("create . PkgC/0.1@user/testing") diff --git a/conans/test/util/tools_test.py b/conans/test/util/tools_test.py index daf39682f..7b8f9e55d 100644 --- a/conans/test/util/tools_test.py +++ b/conans/test/util/tools_test.py @@ -864,9 +864,8 @@ ProgramFiles(x86)=C:\Program Files (x86) self.assertEqual(vcvars["PROCESSOR_REVISION"], "9e09") self.assertEqual(vcvars["ProgramFiles(x86)"], "C:\Program Files (x86)") + @unittest.skipUnless(platform.system() == "Windows", "Requires Windows") def run_in_bash_test(self): - if platform.system() != "Windows": - return class MockConanfile(object): def __init__(self): @@ -878,22 +877,22 @@ ProgramFiles(x86)=C:\Program Files (x86) def __call__(self, command, output, log_filepath=None, cwd=None, subprocess=False): # @UnusedVariable self.command = command - self._runner = MyRun() + self._conan_runner = MyRun() conanfile = MockConanfile() with patch.object(OSInfo, "bash_path", return_value='bash'): tools.run_in_windows_bash(conanfile, "a_command.bat", subsystem="cygwin") - self.assertIn("bash", conanfile._runner.command) - self.assertIn("--login -c", conanfile._runner.command) - self.assertIn("^&^& a_command.bat ^", conanfile._runner.command) + self.assertIn("bash", conanfile._conan_runner.command) + self.assertIn("--login -c", conanfile._conan_runner.command) + self.assertIn("^&^& a_command.bat ^", conanfile._conan_runner.command) with tools.environment_append({"CONAN_BASH_PATH": "path\\to\\mybash.exe"}): tools.run_in_windows_bash(conanfile, "a_command.bat", subsystem="cygwin") - self.assertIn('path\\to\\mybash.exe --login -c', conanfile._runner.command) + self.assertIn('path\\to\\mybash.exe --login -c', conanfile._conan_runner.command) with tools.environment_append({"CONAN_BASH_PATH": "path with spaces\\to\\mybash.exe"}): tools.run_in_windows_bash(conanfile, "a_command.bat", subsystem="cygwin") - self.assertIn('"path with spaces\\to\\mybash.exe" --login -c', conanfile._runner.command) + self.assertIn('"path with spaces\\to\\mybash.exe" --login -c', conanfile._conan_runner.command) # try to append more env vars conanfile = MockConanfile() @@ -901,7 +900,7 @@ ProgramFiles(x86)=C:\Program Files (x86) tools.run_in_windows_bash(conanfile, "a_command.bat", subsystem="cygwin", env={"PATH": "/other/path", "MYVAR": "34"}) self.assertIn('^&^& PATH=\\^"/cygdrive/other/path:/cygdrive/path/to/somewhere:$PATH\\^" ' - '^&^& MYVAR=34 ^&^& a_command.bat ^', conanfile._runner.command) + '^&^& MYVAR=34 ^&^& a_command.bat ^', conanfile._conan_runner.command) def download_retries_test(self): http_server = StoppableThreadBottle()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 7 }
1.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist" ], "pre_install": [ "apt-get update", "apt-get install -y gcc g++-multilib wget unzip" ], "python": "3.6", "reqs_path": [ "conans/requirements.txt", "conans/requirements_server.txt", "conans/requirements_dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==2.11.7 attrs==22.2.0 beautifulsoup4==4.12.3 bottle==0.12.25 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 colorama==0.3.9 -e git+https://github.com/conan-io/conan.git@bb404b34bc894032ac4c56ac03d2cf3e6a7ef3e9#egg=conan coverage==4.2 deprecation==2.0.7 dill==0.3.4 distro==1.1.0 execnet==1.9.0 fasteners==0.19 future==0.16.0 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 isort==5.10.1 lazy-object-proxy==1.7.1 mccabe==0.7.0 mock==1.3.0 node-semver==0.2.0 nose==1.3.7 packaging==21.3 parameterized==0.8.1 patch==1.16 pbr==6.1.1 platformdirs==2.4.0 pluggy==1.0.0 pluginbase==0.7 py==1.11.0 Pygments==2.14.0 PyJWT==1.7.1 pylint==2.13.9 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-xdist==3.0.2 PyYAML==3.13 requests==2.27.1 six==1.17.0 soupsieve==2.3.2.post1 tomli==1.2.3 typed-ast==1.5.5 typing_extensions==4.1.1 urllib3==1.26.20 waitress==2.0.0 WebOb==1.8.9 WebTest==2.0.35 wrapt==1.16.0 zipp==3.6.0
name: conan channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==2.11.7 - attrs==22.2.0 - beautifulsoup4==4.12.3 - bottle==0.12.25 - charset-normalizer==2.0.12 - codecov==2.1.13 - colorama==0.3.9 - coverage==4.2 - deprecation==2.0.7 - dill==0.3.4 - distro==1.1.0 - execnet==1.9.0 - fasteners==0.19 - future==0.16.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isort==5.10.1 - lazy-object-proxy==1.7.1 - mccabe==0.7.0 - mock==1.3.0 - node-semver==0.2.0 - nose==1.3.7 - packaging==21.3 - parameterized==0.8.1 - patch==1.16 - pbr==6.1.1 - platformdirs==2.4.0 - pluggy==1.0.0 - pluginbase==0.7 - py==1.11.0 - pygments==2.14.0 - pyjwt==1.7.1 - pylint==2.13.9 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-xdist==3.0.2 - pyyaml==3.13 - requests==2.27.1 - six==1.17.0 - soupsieve==2.3.2.post1 - tomli==1.2.3 - typed-ast==1.5.5 - typing-extensions==4.1.1 - urllib3==1.26.20 - waitress==2.0.0 - webob==1.8.9 - webtest==2.0.35 - wrapt==1.16.0 - zipp==3.6.0 prefix: /opt/conda/envs/conan
[ "conans/test/model/conanfile_test.py::ConanFileTest::test_conanfile_naming" ]
[ "conans/test/model/conanfile_test.py::ConanFileTest::test_conanfile_naming_complete", "conans/test/util/tools_test.py::ToolsTest::test_get_env_in_conanfile", "conans/test/util/tools_test.py::ToolsTest::test_global_tools_overrided", "conans/test/util/tools_test.py::GitToolTest::test_clone_submodule_git" ]
[ "conans/test/util/tools_test.py::ReplaceInFileTest::test_replace_in_file", "conans/test/util/tools_test.py::ToolsTest::test_environment_nested", "conans/test/util/tools_test.py::GitToolTest::test_clone_existing_folder_git", "conans/test/util/tools_test.py::GitToolTest::test_clone_existing_folder_without_branch", "conans/test/util/tools_test.py::GitToolTest::test_clone_git", "conans/test/util/tools_test.py::GitToolTest::test_credentials", "conans/test/util/tools_test.py::GitToolTest::test_repo_root", "conans/test/util/tools_test.py::GitToolTest::test_verify_ssl" ]
[]
MIT License
3,085
[ "conans/client/tools/win.py", "conans/client/graph/graph_manager.py", "conans/client/graph/graph.py", "conans/client/loader.py", "conans/client/installer.py", "conans/client/graph/graph_builder.py", "conans/model/conan_file.py" ]
[ "conans/client/tools/win.py", "conans/client/graph/graph_manager.py", "conans/client/graph/graph.py", "conans/client/loader.py", "conans/client/installer.py", "conans/client/graph/graph_builder.py", "conans/model/conan_file.py" ]
wright-group__WrightTools-746
4cf127e9d431265dad6f42c48b5be05bc36e3cb7
2018-09-17 15:10:30
6e0c301b1f703527709a2669bbde785255254239
diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py index dec8f19..e9ae20f 100644 --- a/WrightTools/kit/_array.py +++ b/WrightTools/kit/_array.py @@ -243,18 +243,52 @@ def share_nans(*arrs) -> tuple: return tuple([a + nans for a in arrs]) -def smooth_1D(arr, n=10) -> np.ndarray: - """Smooth 1D data by 'running average'. +def smooth_1D(arr, n=10, smooth_type="flat") -> np.ndarray: + """Smooth 1D data using a window function. + + Edge effects will be present. Parameters ---------- - n : int - number of points to average + arr : array_like + Input array, 1D. + n : int (optional) + Window length. + smooth_type : {'flat', 'hanning', 'hamming', 'bartlett', 'blackman'} (optional) + Type of window function to convolve data with. + 'flat' window will produce a moving average smoothing. + + Returns + ------- + array_like + Smoothed 1D array. """ - for i in range(n, len(arr) - n): - window = arr[i - n : i + n].copy() - arr[i] = window.mean() - return arr + + # check array input + if arr.ndim != 1: + raise wt_exceptions.DimensionalityError(1, arr.ndim) + if arr.size < n: + message = "Input array size must be larger than window size." + raise wt_exceptions.ValueError(message) + if n < 3: + return arr + # construct window array + if smooth_type == "flat": + w = np.ones(n, dtype=arr.dtype) + elif smooth_type == "hanning": + w = np.hanning(n) + elif smooth_type == "hamming": + w = np.hamming(n) + elif smooth_type == "bartlett": + w = np.bartlett(n) + elif smooth_type == "blackman": + w = np.blackman(n) + else: + message = "Given smooth_type, {0}, not available.".format(str(smooth_type)) + raise wt_exceptions.ValueError(message) + # convolve reflected array with window function + out = np.convolve(w / w.sum(), arr, mode="same") + return out def svd(a, i=None) -> tuple:
test kit.smooth_1D write tests for `wt.kit.smooth_1D`
wright-group/WrightTools
diff --git a/tests/kit/smooth_1D.py b/tests/kit/smooth_1D.py new file mode 100644 index 0000000..5e4e9b4 --- /dev/null +++ b/tests/kit/smooth_1D.py @@ -0,0 +1,35 @@ +"""Test kit.smooth_1D.""" + + +# --- import -------------------------------------------------------------------------------------- + + +import numpy as np + +import WrightTools as wt + + +# --- test ---------------------------------------------------------------------------------------- + + +def test_basic_smoothing_functionality(): + # create arrays + x = np.linspace(0, 10, 1000) + y = np.sin(x) + np.random.seed(seed=12) + r = np.random.rand(1000) - .5 + yr = y + r + # iterate through window types + windows = ["flat", "hanning", "hamming", "bartlett", "blackman"] + for w in windows: + out = wt.kit.smooth_1D(yr, n=101, smooth_type=w) + check_arr = out - y + check_arr = check_arr[50:-50] # get rid of edge effects + assert np.allclose(check_arr, 0, rtol=.2, atol=.2) + + +# --- run ----------------------------------------------------------------------------------------- + + +if __name__ == "__main__": + test_basic_smoothing_functionality()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
3.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y libfreetype6-dev libopenblas-dev" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
black==25.1.0 cfgv==3.4.0 click==8.1.8 contourpy==1.3.0 coverage==7.8.0 cycler==0.12.1 distlib==0.3.9 exceptiongroup==1.2.2 filelock==3.18.0 fonttools==4.56.0 h5py==3.13.0 identify==2.6.9 imageio==2.37.0 importlib_resources==6.5.2 iniconfig==2.1.0 kiwisolver==1.4.7 lazy_loader==0.4 matplotlib==3.9.4 mypy-extensions==1.0.0 networkx==3.2.1 nodeenv==1.9.1 numexpr==2.10.2 numpy==2.0.2 packaging==24.2 pathspec==0.12.1 pillow==11.1.0 platformdirs==4.3.7 pluggy==1.5.0 pre_commit==4.2.0 pydocstyle==6.3.0 pyparsing==3.2.3 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.2 scikit-image==0.24.0 scipy==1.13.1 six==1.17.0 snowballstemmer==2.2.0 swebench_matterhorn @ file:///swebench_matterhorn tidy_headers==1.0.4 tifffile==2024.8.30 tomli==2.2.1 typing_extensions==4.13.0 virtualenv==20.29.3 -e git+https://github.com/wright-group/WrightTools.git@4cf127e9d431265dad6f42c48b5be05bc36e3cb7#egg=WrightTools zipp==3.21.0
name: WrightTools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - black==25.1.0 - cfgv==3.4.0 - click==8.1.8 - contourpy==1.3.0 - coverage==7.8.0 - cycler==0.12.1 - distlib==0.3.9 - exceptiongroup==1.2.2 - filelock==3.18.0 - fonttools==4.56.0 - h5py==3.13.0 - identify==2.6.9 - imageio==2.37.0 - importlib-resources==6.5.2 - iniconfig==2.1.0 - kiwisolver==1.4.7 - lazy-loader==0.4 - matplotlib==3.9.4 - mypy-extensions==1.0.0 - networkx==3.2.1 - nodeenv==1.9.1 - numexpr==2.10.2 - numpy==2.0.2 - packaging==24.2 - pathspec==0.12.1 - pillow==11.1.0 - platformdirs==4.3.7 - pluggy==1.5.0 - pre-commit==4.2.0 - pydocstyle==6.3.0 - pyparsing==3.2.3 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.2 - scikit-image==0.24.0 - scipy==1.13.1 - six==1.17.0 - snowballstemmer==2.2.0 - swebench-matterhorn==0.0.0 - tidy-headers==1.0.4 - tifffile==2024.8.30 - tomli==2.2.1 - typing-extensions==4.13.0 - virtualenv==20.29.3 - zipp==3.21.0 prefix: /opt/conda/envs/WrightTools
[ "tests/kit/smooth_1D.py::test_basic_smoothing_functionality" ]
[]
[]
[]
MIT License
3,086
[ "WrightTools/kit/_array.py" ]
[ "WrightTools/kit/_array.py" ]
pypa__twine-397
c977b44cf87e066125e9de496429f8b3f5c90bf4
2018-09-17 19:33:24
c977b44cf87e066125e9de496429f8b3f5c90bf4
theacodes: @sigmavirus24 you were more involved in the discussion, do you wanna take a look?
diff --git a/setup.cfg b/setup.cfg index 66d2ceb..925c448 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,6 +16,5 @@ requires-dist = requests-toolbelt >= 0.8.0 pkginfo >= 1.4.2 setuptools >= 0.7.0 - argparse; python_version == '2.6' pyblake2; extra == 'with-blake2' and python_version < '3.6' keyring; extra == 'keyring' diff --git a/setup.py b/setup.py index d90b6b6..b96202a 100644 --- a/setup.py +++ b/setup.py @@ -18,21 +18,6 @@ import sys import twine -install_requires = [ - "tqdm >= 4.14", - "pkginfo >= 1.4.2", - "readme_renderer >= 21.0", - "requests >= 2.5.0, != 2.15, != 2.16", - "requests-toolbelt >= 0.8.0", - "setuptools >= 0.7.0", -] - -if sys.version_info[:2] < (2, 7): - install_requires += [ - "argparse", - ] - - blake2_requires = [] if sys.version_info[:2] < (3, 6): @@ -91,7 +76,14 @@ setup( ], }, - install_requires=install_requires, + install_requires=[ + "pkginfo >= 1.4.2", + "readme_renderer >= 21.0", + "requests >= 2.5.0, != 2.15, != 2.16", + "requests-toolbelt >= 0.8.0", + "setuptools >= 0.7.0", + "tqdm >= 4.14", + ], extras_require={ 'with-blake2': blake2_requires, 'keyring': [ diff --git a/twine/__main__.py b/twine/__main__.py index 9eed3aa..fbf276d 100644 --- a/twine/__main__.py +++ b/twine/__main__.py @@ -17,13 +17,16 @@ from __future__ import unicode_literals import sys +import requests + +from twine import exceptions from twine.cli import dispatch def main(): try: return dispatch(sys.argv[1:]) - except Exception as exc: + except (exceptions.TwineException, requests.exceptions.HTTPError) as exc: return '{0}: {1}'.format( exc.__class__.__name__, exc.args[0], diff --git a/twine/commands/__init__.py b/twine/commands/__init__.py index 55bc1a5..1bb61ae 100644 --- a/twine/commands/__init__.py +++ b/twine/commands/__init__.py @@ -17,6 +17,8 @@ from __future__ import unicode_literals import glob import os.path +from twine import exceptions + __all__ = [] @@ -40,7 +42,7 @@ def _find_dists(dists): files = glob.glob(filename) # If nothing matches, files is [] if not files: - raise ValueError( + raise exceptions.InvalidDistribution( "Cannot find file (or expand pattern): '%s'" % filename ) # Otherwise, files will be filenames that exist diff --git a/twine/commands/register.py b/twine/commands/register.py index 30400f8..11e551a 100644 --- a/twine/commands/register.py +++ b/twine/commands/register.py @@ -16,8 +16,8 @@ from __future__ import absolute_import, unicode_literals, print_function import argparse import os.path -from twine import exceptions as exc from twine.package import PackageFile +from twine import exceptions from twine import settings @@ -28,7 +28,7 @@ def register(register_settings, package): repository = register_settings.create_repository() if not os.path.exists(package): - raise exc.PackageNotFound( + raise exceptions.PackageNotFound( '"{0}" does not exist on the file system.'.format(package) ) @@ -38,7 +38,7 @@ def register(register_settings, package): repository.close() if resp.is_redirect: - raise exc.RedirectDetected( + raise exceptions.RedirectDetected( ('"{0}" attempted to redirect to "{1}" during registration.' ' Aborting...').format(repository_url, resp.headers["location"])) diff --git a/twine/commands/upload.py b/twine/commands/upload.py index 53fba25..f4cc59f 100644 --- a/twine/commands/upload.py +++ b/twine/commands/upload.py @@ -17,9 +17,9 @@ from __future__ import unicode_literals import argparse import os.path -import twine.exceptions as exc from twine.commands import _find_dists from twine.package import PackageFile +from twine import exceptions from twine import settings from twine import utils @@ -85,7 +85,7 @@ def upload(upload_settings, dists): # by PyPI should never happen in reality. This should catch malicious # redirects as well. if resp.is_redirect: - raise exc.RedirectDetected( + raise exceptions.RedirectDetected( ('"{0}" attempted to redirect to "{1}" during upload.' ' Aborting...').format(repository_url, resp.headers["location"])) diff --git a/twine/exceptions.py b/twine/exceptions.py index d227db6..9e35ea0 100644 --- a/twine/exceptions.py +++ b/twine/exceptions.py @@ -73,3 +73,15 @@ class InvalidSigningConfiguration(TwineException): """Both the sign and identity parameters must be present.""" pass + + +class InvalidConfiguration(TwineException): + """Raised when configuration is invalid.""" + + pass + + +class InvalidDistribution(TwineException): + """Raised when a distribution is invalid.""" + + pass diff --git a/twine/package.py b/twine/package.py index 4f07361..c9e5197 100644 --- a/twine/package.py +++ b/twine/package.py @@ -31,6 +31,7 @@ except ImportError: from twine.wheel import Wheel from twine.wininst import WinInst +from twine import exceptions DIST_TYPES = { "bdist_wheel": Wheel, @@ -78,7 +79,7 @@ class PackageFile(object): meta = DIST_TYPES[dtype](filename) break else: - raise ValueError( + raise exceptions.InvalidDistribution( "Unknown distribution format: '%s'" % os.path.basename(filename) ) @@ -151,7 +152,9 @@ class PackageFile(object): def add_gpg_signature(self, signature_filepath, signature_filename): if self.gpg_signature is not None: - raise ValueError('GPG Signature can only be added once') + raise exceptions.InvalidDistribution( + 'GPG Signature can only be added once' + ) with open(signature_filepath, "rb") as gpg: self.gpg_signature = (signature_filename, gpg.read()) diff --git a/twine/utils.py b/twine/utils.py index 1f36af2..83a1867 100644 --- a/twine/utils.py +++ b/twine/utils.py @@ -35,7 +35,7 @@ try: except ImportError: from urllib.parse import urlparse, urlunparse -import twine.exceptions +from twine import exceptions # Shim for raw_input in python3 if sys.version_info > (3,): @@ -109,7 +109,7 @@ def get_repository_from_config(config_file, repository, repository_url=None): "password": None, } if repository_url and "://" not in repository_url: - raise twine.exceptions.UnreachableRepositoryURLDetected( + raise exceptions.UnreachableRepositoryURLDetected( "Repository URL {0} has no protocol. Please add " "'https://'. \n".format(repository_url)) try: @@ -125,7 +125,7 @@ def get_repository_from_config(config_file, repository, repository_url=None): repo=repository, cfg=config_file ) - raise KeyError(msg) + raise exceptions.InvalidConfiguration(msg) _HOSTNAMES = set(["pypi.python.org", "testpypi.python.org", "upload.pypi.org", diff --git a/twine/wheel.py b/twine/wheel.py index cf6155d..8a52d6e 100644 --- a/twine/wheel.py +++ b/twine/wheel.py @@ -26,6 +26,8 @@ except ImportError: from pkginfo import distribution from pkginfo.distribution import Distribution +from twine import exceptions + # Monkeypatch Metadata 2.0 support distribution.HEADER_ATTRS_2_0 = distribution.HEADER_ATTRS_1_2 distribution.HEADER_ATTRS.update({"2.0": distribution.HEADER_ATTRS_2_0}) @@ -69,7 +71,9 @@ class Wheel(Distribution): def read(self): fqn = os.path.abspath(os.path.normpath(self.filename)) if not os.path.exists(fqn): - raise ValueError('No such file: %s' % fqn) + raise exceptions.InvalidDistribution( + 'No such file: %s' % fqn + ) if fqn.endswith('.whl'): archive = zipfile.ZipFile(fqn) @@ -78,7 +82,9 @@ class Wheel(Distribution): def read_file(name): return archive.read(name) else: - raise ValueError('Not a known archive format: %s' % fqn) + raise exceptions.InvalidDistribution( + 'Not a known archive format: %s' % fqn + ) try: for path in self.find_candidate_metadata_files(names): @@ -89,7 +95,9 @@ class Wheel(Distribution): finally: archive.close() - raise ValueError('No METADATA in archive: %s' % fqn) + raise exceptions.InvalidDistribution( + 'No METADATA in archive: %s' % fqn + ) def parse(self, data): super(Wheel, self).parse(data) diff --git a/twine/wininst.py b/twine/wininst.py index 6951070..5e8932a 100644 --- a/twine/wininst.py +++ b/twine/wininst.py @@ -7,6 +7,8 @@ import zipfile from pkginfo.distribution import Distribution +from twine import exceptions + wininst_file_re = re.compile(r".*py(?P<pyver>\d+\.\d+)\.exe$") @@ -28,7 +30,9 @@ class WinInst(Distribution): def read(self): fqn = os.path.abspath(os.path.normpath(self.filename)) if not os.path.exists(fqn): - raise ValueError('No such file: %s' % fqn) + raise exceptions.InvalidDistribution( + 'No such file: %s' % fqn + ) if fqn.endswith('.exe'): archive = zipfile.ZipFile(fqn) @@ -37,7 +41,9 @@ class WinInst(Distribution): def read_file(name): return archive.read(name) else: - raise ValueError('Not a known archive format: %s' % fqn) + raise exceptions.InvalidDistribution( + 'Not a known archive format: %s' % fqn + ) try: tuples = [x.split('/') for x in names @@ -51,4 +57,6 @@ class WinInst(Distribution): finally: archive.close() - raise ValueError('No PKG-INFO/.egg-info in archive: %s' % fqn) + raise exceptions.InvalidDistribution( + 'No PKG-INFO/.egg-info in archive: %s' % fqn + )
Consider reverting #248 While I understand the motivation for the changes introduced in #248 ("Rework handling and display of exceptions"), I think they should probably reverted, and a different approach to solving #228 be taken instead (like specifically handling this edge case rather than applying it to all exceptions). Suppressing the entire stack trace causes two problems: #### 1. Difficult to debug user problems For example, in #383, as a maintainer, I have no idea where this exception is getting raised from because the stack trace has been suppressed. The user has even less of an idea where to look to solve the problem themselves. #### 2. Difficult to work on `twine` As a contributor, I need to uncomment these changes in order to do development on twine, when I may occasionally make a mistake and raise an exception from somewhere. Not having the stacktrace available slows me down as a contributor. I'm happy to do the work as proposed, but wanted to raise this for discussion first before forging ahead. (cc @dstufft @sigmavirus24 @brainwane).
pypa/twine
diff --git a/tests/test_commands.py b/tests/test_commands.py index 44252d4..af556af 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -3,6 +3,7 @@ import os import pytest from twine.commands import _find_dists, _group_wheel_files_first +from twine import exceptions def test_ensure_wheel_files_uploaded_first(): @@ -34,7 +35,7 @@ def test_find_dists_expands_globs(): def test_find_dists_errors_on_invalid_globs(): - with pytest.raises(ValueError): + with pytest.raises(exceptions.InvalidDistribution): _find_dists(["twine/*.rb"]) diff --git a/tests/test_main.py b/tests/test_main.py index e7c6037..9c8d20c 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -11,11 +11,14 @@ # limitations under the License. from twine import __main__ as dunder_main +from twine import exceptions import pretend def test_exception_handling(monkeypatch): - replaced_dispatch = pretend.raiser(KeyError('foo')) + replaced_dispatch = pretend.raiser( + exceptions.InvalidConfiguration('foo') + ) monkeypatch.setattr(dunder_main, 'dispatch', replaced_dispatch) - assert dunder_main.main() == 'KeyError: foo' + assert dunder_main.main() == 'InvalidConfiguration: foo'
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 11 }
1.11
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "coverage", "pretend", "pyblake2" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "docs/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 bleach==4.1.0 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 colorama==0.4.5 coverage==6.2 cryptography==40.0.2 distlib==0.3.9 doc8==0.11.2 docutils==0.18.1 filelock==3.4.1 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 jeepney==0.7.1 Jinja2==3.0.3 keyring==23.4.1 MarkupSafe==2.0.1 packaging==21.3 pbr==6.1.1 pkginfo==1.10.0 platformdirs==2.4.0 pluggy==1.0.0 pretend==1.0.9 py==1.11.0 pyblake2==1.1.2 pycparser==2.21 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytz==2025.2 readme-renderer==34.0 releases==2.1.1 requests==2.27.1 requests-toolbelt==1.0.0 restructuredtext-lint==1.4.0 rfc3986==1.5.0 SecretStorage==3.3.3 semantic-version==2.6.0 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 stevedore==3.5.2 toml==0.10.2 tomli==1.2.3 tox==3.28.0 tqdm==4.64.1 -e git+https://github.com/pypa/twine.git@c977b44cf87e066125e9de496429f8b3f5c90bf4#egg=twine typing_extensions==4.1.1 urllib3==1.26.20 virtualenv==20.17.1 webencodings==0.5.1 zipp==3.6.0
name: twine channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - bleach==4.1.0 - cffi==1.15.1 - charset-normalizer==2.0.12 - colorama==0.4.5 - coverage==6.2 - cryptography==40.0.2 - distlib==0.3.9 - doc8==0.11.2 - docutils==0.18.1 - filelock==3.4.1 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - jeepney==0.7.1 - jinja2==3.0.3 - keyring==23.4.1 - markupsafe==2.0.1 - packaging==21.3 - pbr==6.1.1 - pkginfo==1.10.0 - platformdirs==2.4.0 - pluggy==1.0.0 - pretend==1.0.9 - py==1.11.0 - pyblake2==1.1.2 - pycparser==2.21 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytz==2025.2 - readme-renderer==34.0 - releases==2.1.1 - requests==2.27.1 - requests-toolbelt==1.0.0 - restructuredtext-lint==1.4.0 - rfc3986==1.5.0 - secretstorage==3.3.3 - semantic-version==2.6.0 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - stevedore==3.5.2 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - tqdm==4.64.1 - typing-extensions==4.1.1 - urllib3==1.26.20 - virtualenv==20.17.1 - webencodings==0.5.1 - zipp==3.6.0 prefix: /opt/conda/envs/twine
[ "tests/test_commands.py::test_find_dists_errors_on_invalid_globs", "tests/test_main.py::test_exception_handling" ]
[]
[ "tests/test_commands.py::test_ensure_wheel_files_uploaded_first", "tests/test_commands.py::test_ensure_if_no_wheel_files", "tests/test_commands.py::test_find_dists_expands_globs", "tests/test_commands.py::test_find_dists_handles_real_files" ]
[]
Apache License 2.0
3,087
[ "twine/wininst.py", "twine/commands/upload.py", "setup.py", "twine/commands/register.py", "twine/utils.py", "twine/wheel.py", "setup.cfg", "twine/exceptions.py", "twine/commands/__init__.py", "twine/__main__.py", "twine/package.py" ]
[ "twine/wininst.py", "twine/commands/upload.py", "setup.py", "twine/commands/register.py", "twine/utils.py", "twine/wheel.py", "setup.cfg", "twine/exceptions.py", "twine/commands/__init__.py", "twine/__main__.py", "twine/package.py" ]
google__mobly-497
5fdd0397ec5b32ac61be7b47e1c35ce84943e87c
2018-09-17 20:26:25
95286a01a566e056d44acfa9577a45bc7f37f51d
diff --git a/mobly/controller_manager.py b/mobly/controller_manager.py new file mode 100644 index 0000000..db23688 --- /dev/null +++ b/mobly/controller_manager.py @@ -0,0 +1,207 @@ +# Copyright 2018 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Module for Mobly controller management.""" +import collections +import copy +import logging +import yaml + +from mobly import records +from mobly import signals + + +def verify_controller_module(module): + """Verifies a module object follows the required interface for + controllers. + + The interface is explained in the docstring of + `base_test.BaseTestClass.register_controller`. + + Args: + module: An object that is a controller module. This is usually + imported with import statements or loaded by importlib. + + Raises: + ControllerError: if the module does not match the Mobly controller + interface, or one of the required members is null. + """ + required_attributes = ('create', 'destroy', 'MOBLY_CONTROLLER_CONFIG_NAME') + for attr in required_attributes: + if not hasattr(module, attr): + raise signals.ControllerError( + 'Module %s missing required controller module attribute' + ' %s.' % (module.__name__, attr)) + if not getattr(module, attr): + raise signals.ControllerError( + 'Controller interface %s in %s cannot be null.' % + (attr, module.__name__)) + + +class ControllerManager(object): + """Manages the controller objects for Mobly. + + This manages the life cycles and info retrieval of all controller objects + used in a test. + """ + + def __init__(self, class_name, controller_configs): + # Controller object management. + self._controller_objects = collections.OrderedDict( + ) # controller_name: objects + self._controller_modules = {} # controller_name: module + self._class_name = class_name + self._controller_configs = controller_configs + + def register_controller(self, module, required=True, min_number=1): + """Loads a controller module and returns its loaded devices. + + This is to be used in a mobly test class. + + Args: + module: A module that follows the controller module interface. + required: A bool. If True, failing to register the specified + controller module raises exceptions. If False, the objects + failed to instantiate will be skipped. + min_number: An integer that is the minimum number of controller + objects to be created. Default is one, since you should not + register a controller module without expecting at least one + object. + + Returns: + A list of controller objects instantiated from controller_module, or + None if no config existed for this controller and it was not a + required controller. + + Raises: + ControllerError: + * The controller module has already been registered. + * The actual number of objects instantiated is less than the + * `min_number`. + * `required` is True and no corresponding config can be found. + * Any other error occurred in the registration process. + """ + verify_controller_module(module) + # Use the module's name as the ref name + module_ref_name = module.__name__.split('.')[-1] + if module_ref_name in self._controller_objects: + raise signals.ControllerError( + 'Controller module %s has already been registered. It cannot ' + 'be registered again.' % module_ref_name) + # Create controller objects. + module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME + if module_config_name not in self._controller_configs: + if required: + raise signals.ControllerError( + 'No corresponding config found for %s' % + module_config_name) + logging.warning( + 'No corresponding config found for optional controller %s', + module_config_name) + return None + try: + # Make a deep copy of the config to pass to the controller module, + # in case the controller module modifies the config internally. + original_config = self._controller_configs[module_config_name] + controller_config = copy.deepcopy(original_config) + objects = module.create(controller_config) + except: + logging.exception( + 'Failed to initialize objects for controller %s, abort!', + module_config_name) + raise + if not isinstance(objects, list): + raise signals.ControllerError( + 'Controller module %s did not return a list of objects, abort.' + % module_ref_name) + # Check we got enough controller objects to continue. + actual_number = len(objects) + if actual_number < min_number: + module.destroy(objects) + raise signals.ControllerError( + 'Expected to get at least %d controller objects, got %d.' % + (min_number, actual_number)) + # Save a shallow copy of the list for internal usage, so tests can't + # affect internal registry by manipulating the object list. + self._controller_objects[module_ref_name] = copy.copy(objects) + logging.debug('Found %d objects for controller %s', len(objects), + module_config_name) + self._controller_modules[module_ref_name] = module + return objects + + def unregister_controllers(self): + """Destroy controller objects and clear internal registry. + + This will be called after each test class. + """ + # TODO(xpconanfan): actually record these errors instead of just + # logging them. + for name, module in self._controller_modules.items(): + logging.debug('Destroying %s.', name) + try: + module.destroy(self._controller_objects[name]) + except: + logging.exception('Exception occurred destroying %s.', name) + self._controller_objects = collections.OrderedDict() + self._controller_modules = {} + + def _create_controller_info_record(self, controller_module_name): + """Creates controller info record for a particular controller type. + + Info is retrieved from all the controller objects spawned from the + specified module, using the controller module's `get_info` function. + + Args: + controller_module_name: string, the name of the controller module + to retrieve info from. + + Returns: + A records.ControllerInfoRecord object. + """ + module = self._controller_modules[controller_module_name] + controller_info = None + try: + controller_info = module.get_info( + copy.copy(self._controller_objects[controller_module_name])) + except AttributeError: + logging.warning('No optional debug info found for controller ' + '%s. To provide it, implement `get_info`.', + controller_module_name) + try: + yaml.dump(controller_info) + except TypeError: + logging.warning('The info of controller %s in class "%s" is not ' + 'YAML serializable! Coercing it to string.', + controller_module_name, self._class_name) + controller_info = str(controller_info) + return records.ControllerInfoRecord( + self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME, + controller_info) + + def get_controller_info_records(self): + """Get the info records for all the controller objects in the manager. + + New info records for each controller object are created for every call + so the latest info is included. + + Returns: + List of records.ControllerInfoRecord objects. Each opject conatins + the info of a type of controller + """ + info_records = [] + for controller_module_name in self._controller_objects.keys(): + record = self._create_controller_info_record( + controller_module_name) + if record: + info_records.append(record) + return info_records diff --git a/mobly/records.py b/mobly/records.py index 171c0ca..1dec134 100644 --- a/mobly/records.py +++ b/mobly/records.py @@ -539,28 +539,16 @@ class TestResult(object): else: self.error.append(record) - def add_controller_info(self, controller_name, controller_info, - test_class): - """Adds controller info to results. + def add_controller_info_record(self, controller_info_record): + """Adds a controller info record to results. - This can be called multiple times for each + This can be called multiple times for each test class. Args: - controller_name: string, name of the controller. - controller_info: yaml serializable info about the controller. - test_class: string, a tag for identifying a class. This should be - the test class's own `TAG` attribute. + controller_info_record: ControllerInfoRecord object to be added to + the result. """ - info = controller_info - try: - yaml.dump(controller_info) - except TypeError: - logging.warning('The info of controller %s in class "%s" is not ' - 'YAML serializable! Coercing it to string.', - controller_name, test_class) - info = str(controller_info) - self.controller_info.append( - ControllerInfoRecord(test_class, controller_name, info)) + self.controller_info.append(controller_info_record) def add_class_error(self, test_record): """Add a record to indicate a test class has failed before any test
Create a controller manager The management of controller object life cycles should probably be entirely self-contained. Moving registration mechanism from test runner to base test has improved the situation, but the core problem is still not solved. The management is loosely composed of class vars and methods in base class, which is convoluted with the base class's internal logic. This is difficult to manage. We should have a proper `ControllerManager`.
google/mobly
diff --git a/mobly/base_test.py b/mobly/base_test.py index 045fee8..5dc5e8f 100644 --- a/mobly/base_test.py +++ b/mobly/base_test.py @@ -21,6 +21,7 @@ import logging from future.utils import raise_with_traceback +from mobly import controller_manager from mobly import expects from mobly import records from mobly import runtime_test_info @@ -46,30 +47,6 @@ class Error(Exception): """Raised for exceptions that occured in BaseTestClass.""" -def _verify_controller_module(module): - """Verifies a module object follows the required interface for - controllers. - - Args: - module: An object that is a controller module. This is usually - imported with import statements or loaded by importlib. - - Raises: - ControllerError: if the module does not match the Mobly controller - interface, or one of the required members is null. - """ - required_attributes = ('create', 'destroy', 'MOBLY_CONTROLLER_CONFIG_NAME') - for attr in required_attributes: - if not hasattr(module, attr): - raise signals.ControllerError( - 'Module %s missing required controller module attribute' - ' %s.' % (module.__name__, attr)) - if not getattr(module, attr): - raise signals.ControllerError( - 'Controller interface %s in %s cannot be null.' % - (attr, module.__name__)) - - class BaseTestClass(object): """Base class for all test classes to inherit from. @@ -97,8 +74,6 @@ class BaseTestClass(object): log_path: string, specifies the root directory for all logs written by a test run. test_bed_name: string, the name of the test bed used by a test run. - controller_configs: dict, configs used for instantiating controller - objects. user_params: dict, custom parameters from user, to be consumed by the test logic. """ @@ -125,7 +100,6 @@ class BaseTestClass(object): self.TAG = self._class_name # Set params. self.log_path = configs.log_path - self.controller_configs = configs.controller_configs self.test_bed_name = configs.test_bed_name self.user_params = configs.user_params self.results = records.TestResult() @@ -133,10 +107,8 @@ class BaseTestClass(object): # Deprecated, use `self.current_test_info.name`. self.current_test_name = None self._generated_test_table = collections.OrderedDict() - # Controller object management. - self._controller_registry = collections.OrderedDict( - ) # controller_name: objects - self._controller_modules = {} # controller_name: module + self._controller_manager = controller_manager.ControllerManager( + class_name=self.TAG, controller_configs=configs.controller_configs) def __enter__(self): return self @@ -269,86 +241,15 @@ class BaseTestClass(object): * `required` is True and no corresponding config can be found. * Any other error occurred in the registration process. """ - _verify_controller_module(module) - # Use the module's name as the ref name - module_ref_name = module.__name__.split('.')[-1] - if module_ref_name in self._controller_registry: - raise signals.ControllerError( - 'Controller module %s has already been registered. It cannot ' - 'be registered again.' % module_ref_name) - # Create controller objects. - create = module.create - module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME - if module_config_name not in self.controller_configs: - if required: - raise signals.ControllerError( - 'No corresponding config found for %s' % - module_config_name) - logging.warning( - 'No corresponding config found for optional controller %s', - module_config_name) - return None - try: - # Make a deep copy of the config to pass to the controller module, - # in case the controller module modifies the config internally. - original_config = self.controller_configs[module_config_name] - controller_config = copy.deepcopy(original_config) - objects = create(controller_config) - except: - logging.exception( - 'Failed to initialize objects for controller %s, abort!', - module_config_name) - raise - if not isinstance(objects, list): - raise signals.ControllerError( - 'Controller module %s did not return a list of objects, abort.' - % module_ref_name) - # Check we got enough controller objects to continue. - actual_number = len(objects) - if actual_number < min_number: - module.destroy(objects) - raise signals.ControllerError( - 'Expected to get at least %d controller objects, got %d.' % - (min_number, actual_number)) - # Save a shallow copy of the list for internal usage, so tests can't - # affect internal registry by manipulating the object list. - self._controller_registry[module_ref_name] = copy.copy(objects) - logging.debug('Found %d objects for controller %s', len(objects), - module_config_name) - self._controller_modules[module_ref_name] = module - return objects - - def _unregister_controllers(self): - """Destroy controller objects and clear internal registry. - - This will be called after each test class. - """ - # TODO(xpconanfan): actually record these errors instead of just - # logging them. - for name, module in self._controller_modules.items(): - try: - logging.debug('Destroying %s.', name) - module.destroy(self._controller_registry[name]) - except: - logging.exception('Exception occurred destroying %s.', name) - self._controller_registry = collections.OrderedDict() - self._controller_modules = {} + return self._controller_manager.register_controller( + module, required, min_number) def _record_controller_info(self): # Collect controller information and write to test result. - for module_ref_name, objects in self._controller_registry.items(): - module = self._controller_modules[module_ref_name] - try: - controller_info = module.get_info(copy.copy(objects)) - except AttributeError: - logging.warning('No optional debug info found for controller ' - '%s. To provide it, implement `get_info`.', - module_ref_name) - continue - self.results.add_controller_info( - controller_name=module.MOBLY_CONTROLLER_CONFIG_NAME, - controller_info=controller_info, - test_class=self.TAG) + for record in self._controller_manager.get_controller_info_records(): + self.results.add_controller_info_record(record) + self.summary_writer.dump( + record.to_dict(), records.TestSummaryEntryType.CONTROLLER_INFO) def _setup_generated_tests(self): """Proxy function to guarantee the base implementation of @@ -423,6 +324,10 @@ class BaseTestClass(object): self.results.add_class_error(record) self.summary_writer.dump(record.to_dict(), records.TestSummaryEntryType.RECORD) + finally: + # Write controller info and summary to summary file. + self._record_controller_info() + self._controller_manager.unregister_controllers() def teardown_class(self): """Teardown function that will be called after all the selected tests in @@ -905,14 +810,7 @@ class BaseTestClass(object): setattr(e, 'results', self.results) raise e finally: - # Write controller info and summary to summary file. - self._record_controller_info() - for controller_info in self.results.controller_info: - self.summary_writer.dump( - controller_info.to_dict(), - records.TestSummaryEntryType.CONTROLLER_INFO) self._teardown_class() - self._unregister_controllers() logging.info('Summary for test class %s: %s', self.TAG, self.results.summary_str()) diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py index f6349ea..ff866bb 100755 --- a/tests/mobly/base_test_test.py +++ b/tests/mobly/base_test_test.py @@ -267,14 +267,25 @@ class BaseTestTest(unittest.TestCase): on_fail_call_check.assert_called_once_with("haha") def test_teardown_class_fail_by_exception(self): + mock_test_config = self.mock_test_cls_configs.copy() + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + mock_ctrlr_2_config_name = mock_second_controller.MOBLY_CONTROLLER_CONFIG_NAME + my_config = [{'serial': 'xxxx', 'magic': 'Magic'}] + mock_test_config.controller_configs[mock_ctrlr_config_name] = my_config + mock_test_config.controller_configs[ + mock_ctrlr_2_config_name] = copy.copy(my_config) + class MockBaseTest(base_test.BaseTestClass): + def setup_class(self): + self.register_controller(mock_controller) + def test_something(self): pass def teardown_class(self): raise Exception(MSG_EXPECTED_EXCEPTION) - bt_cls = MockBaseTest(self.mock_test_cls_configs) + bt_cls = MockBaseTest(mock_test_config) bt_cls.run() test_record = bt_cls.results.passed[0] class_record = bt_cls.results.error[0] @@ -287,6 +298,53 @@ class BaseTestTest(unittest.TestCase): expected_summary = ('Error 1, Executed 1, Failed 0, Passed 1, ' 'Requested 1, Skipped 0') self.assertEqual(bt_cls.results.summary_str(), expected_summary) + # Verify the controller info is recorded correctly. + info = bt_cls.results.controller_info[0] + self.assertEqual(info.test_class, 'MockBaseTest') + self.assertEqual(info.controller_name, 'MagicDevice') + self.assertEqual(info.controller_info, [{ + 'MyMagic': { + 'magic': 'Magic' + } + }]) + + def test_teardown_class_raise_abort_all(self): + mock_test_config = self.mock_test_cls_configs.copy() + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + mock_ctrlr_2_config_name = mock_second_controller.MOBLY_CONTROLLER_CONFIG_NAME + my_config = [{'serial': 'xxxx', 'magic': 'Magic'}] + mock_test_config.controller_configs[mock_ctrlr_config_name] = my_config + mock_test_config.controller_configs[ + mock_ctrlr_2_config_name] = copy.copy(my_config) + + class MockBaseTest(base_test.BaseTestClass): + def setup_class(self): + self.register_controller(mock_controller) + + def test_something(self): + pass + + def teardown_class(self): + raise asserts.abort_all(MSG_EXPECTED_EXCEPTION) + + bt_cls = MockBaseTest(mock_test_config) + with self.assertRaisesRegex(signals.TestAbortAll, + MSG_EXPECTED_EXCEPTION): + bt_cls.run() + test_record = bt_cls.results.passed[0] + self.assertTrue(bt_cls.results.is_all_pass) + expected_summary = ('Error 0, Executed 1, Failed 0, Passed 1, ' + 'Requested 1, Skipped 0') + self.assertEqual(bt_cls.results.summary_str(), expected_summary) + # Verify the controller info is recorded correctly. + info = bt_cls.results.controller_info[0] + self.assertEqual(info.test_class, 'MockBaseTest') + self.assertEqual(info.controller_name, 'MagicDevice') + self.assertEqual(info.controller_info, [{ + 'MyMagic': { + 'magic': 'Magic' + } + }]) def test_setup_test_fail_by_exception(self): mock_on_fail = mock.Mock() @@ -1836,113 +1894,6 @@ class BaseTestTest(unittest.TestCase): } }]) - def test_register_controller_no_config(self): - bt_cls = MockEmptyBaseTest(self.mock_test_cls_configs) - with self.assertRaisesRegex(signals.ControllerError, - 'No corresponding config found for'): - bt_cls.register_controller(mock_controller) - - def test_register_controller_no_config_for_not_required(self): - bt_cls = MockEmptyBaseTest(self.mock_test_cls_configs) - self.assertIsNone( - bt_cls.register_controller(mock_controller, required=False)) - - def test_register_controller_dup_register(self): - """Verifies correctness of registration, internal tally of controllers - objects, and the right error happen when a controller module is - registered twice. - """ - mock_test_config = self.mock_test_cls_configs.copy() - mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME - mock_test_config.controller_configs = { - mock_ctrlr_config_name: ['magic1', 'magic2'] - } - bt_cls = MockEmptyBaseTest(mock_test_config) - bt_cls.register_controller(mock_controller) - registered_name = 'mock_controller' - self.assertTrue(registered_name in bt_cls._controller_registry) - mock_ctrlrs = bt_cls._controller_registry[registered_name] - self.assertEqual(mock_ctrlrs[0].magic, 'magic1') - self.assertEqual(mock_ctrlrs[1].magic, 'magic2') - self.assertTrue(bt_cls._controller_modules[registered_name]) - expected_msg = 'Controller module .* has already been registered.' - with self.assertRaisesRegex(signals.ControllerError, expected_msg): - bt_cls.register_controller(mock_controller) - - def test_register_controller_no_get_info(self): - mock_test_config = self.mock_test_cls_configs.copy() - mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME - get_info = getattr(mock_controller, 'get_info') - delattr(mock_controller, 'get_info') - try: - mock_test_config.controller_configs = { - mock_ctrlr_config_name: ['magic1', 'magic2'] - } - bt_cls = MockEmptyBaseTest(mock_test_config) - bt_cls.register_controller(mock_controller) - self.assertEqual(bt_cls.results.controller_info, []) - finally: - setattr(mock_controller, 'get_info', get_info) - - def test_register_controller_return_value(self): - mock_test_config = self.mock_test_cls_configs.copy() - mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME - mock_test_config.controller_configs = { - mock_ctrlr_config_name: ['magic1', 'magic2'] - } - bt_cls = MockEmptyBaseTest(mock_test_config) - magic_devices = bt_cls.register_controller(mock_controller) - self.assertEqual(magic_devices[0].magic, 'magic1') - self.assertEqual(magic_devices[1].magic, 'magic2') - - def test_register_controller_change_return_value(self): - mock_test_config = self.mock_test_cls_configs.copy() - mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME - mock_test_config.controller_configs = { - mock_ctrlr_config_name: ['magic1', 'magic2'] - } - bt_cls = MockEmptyBaseTest(mock_test_config) - magic_devices = bt_cls.register_controller(mock_controller) - magic1 = magic_devices.pop(0) - self.assertIs(magic1, - bt_cls._controller_registry['mock_controller'][0]) - self.assertEqual( - len(bt_cls._controller_registry['mock_controller']), 2) - - def test_register_controller_less_than_min_number(self): - mock_test_config = self.mock_test_cls_configs.copy() - mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME - mock_test_config.controller_configs = { - mock_ctrlr_config_name: ['magic1', 'magic2'] - } - bt_cls = MockEmptyBaseTest(mock_test_config) - expected_msg = 'Expected to get at least 3 controller objects, got 2.' - with self.assertRaisesRegex(signals.ControllerError, expected_msg): - bt_cls.register_controller(mock_controller, min_number=3) - - def test_verify_controller_module(self): - base_test._verify_controller_module(mock_controller) - - def test_verify_controller_module_null_attr(self): - try: - tmp = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME - mock_controller.MOBLY_CONTROLLER_CONFIG_NAME = None - msg = 'Controller interface .* in .* cannot be null.' - with self.assertRaisesRegex(signals.ControllerError, msg): - base_test._verify_controller_module(mock_controller) - finally: - mock_controller.MOBLY_CONTROLLER_CONFIG_NAME = tmp - - def test_verify_controller_module_missing_attr(self): - try: - tmp = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME - delattr(mock_controller, 'MOBLY_CONTROLLER_CONFIG_NAME') - msg = 'Module .* missing required controller module attribute' - with self.assertRaisesRegex(signals.ControllerError, msg): - base_test._verify_controller_module(mock_controller) - finally: - setattr(mock_controller, 'MOBLY_CONTROLLER_CONFIG_NAME', tmp) - if __name__ == "__main__": unittest.main() diff --git a/tests/mobly/controller_manager_test.py b/tests/mobly/controller_manager_test.py new file mode 100755 index 0000000..7c9f1b6 --- /dev/null +++ b/tests/mobly/controller_manager_test.py @@ -0,0 +1,206 @@ +# Copyright 2018 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Unit tests for controller manager.""" +import mock + +from future.tests.base import unittest + +from mobly import controller_manager +from mobly import signals + +from tests.lib import mock_controller + + +class ControllerManagerTest(unittest.TestCase): + """Unit tests for Mobly's ControllerManager.""" + + def test_verify_controller_module(self): + controller_manager.verify_controller_module(mock_controller) + + def test_verify_controller_module_null_attr(self): + try: + tmp = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + mock_controller.MOBLY_CONTROLLER_CONFIG_NAME = None + msg = 'Controller interface .* in .* cannot be null.' + with self.assertRaisesRegex(signals.ControllerError, msg): + controller_manager.verify_controller_module(mock_controller) + finally: + mock_controller.MOBLY_CONTROLLER_CONFIG_NAME = tmp + + def test_verify_controller_module_missing_attr(self): + try: + tmp = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + delattr(mock_controller, 'MOBLY_CONTROLLER_CONFIG_NAME') + msg = 'Module .* missing required controller module attribute' + with self.assertRaisesRegex(signals.ControllerError, msg): + controller_manager.verify_controller_module(mock_controller) + finally: + setattr(mock_controller, 'MOBLY_CONTROLLER_CONFIG_NAME', tmp) + + def test_register_controller_no_config(self): + c_manager = controller_manager.ControllerManager('SomeClass', {}) + with self.assertRaisesRegex(signals.ControllerError, + 'No corresponding config found for'): + c_manager.register_controller(mock_controller) + + def test_register_controller_no_config_for_not_required(self): + c_manager = controller_manager.ControllerManager('SomeClass', {}) + self.assertIsNone( + c_manager.register_controller(mock_controller, required=False)) + + def test_register_controller_dup_register(self): + """Verifies correctness of registration, internal tally of controllers + objects, and the right error happen when a controller module is + registered twice. + """ + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + controller_configs = {mock_ctrlr_config_name: ['magic1', 'magic2']} + c_manager = controller_manager.ControllerManager( + 'SomeClass', controller_configs) + c_manager.register_controller(mock_controller) + registered_name = 'mock_controller' + self.assertTrue(registered_name in c_manager._controller_objects) + mock_ctrlrs = c_manager._controller_objects[registered_name] + self.assertEqual(mock_ctrlrs[0].magic, 'magic1') + self.assertEqual(mock_ctrlrs[1].magic, 'magic2') + self.assertTrue(c_manager._controller_modules[registered_name]) + expected_msg = 'Controller module .* has already been registered.' + with self.assertRaisesRegex(signals.ControllerError, expected_msg): + c_manager.register_controller(mock_controller) + + def test_register_controller_return_value(self): + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + controller_configs = {mock_ctrlr_config_name: ['magic1', 'magic2']} + c_manager = controller_manager.ControllerManager( + 'SomeClass', controller_configs) + magic_devices = c_manager.register_controller(mock_controller) + self.assertEqual(magic_devices[0].magic, 'magic1') + self.assertEqual(magic_devices[1].magic, 'magic2') + + def test_register_controller_change_return_value(self): + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + controller_configs = {mock_ctrlr_config_name: ['magic1', 'magic2']} + c_manager = controller_manager.ControllerManager( + 'SomeClass', controller_configs) + magic_devices = c_manager.register_controller(mock_controller) + magic1 = magic_devices.pop(0) + self.assertIs(magic1, + c_manager._controller_objects['mock_controller'][0]) + self.assertEqual( + len(c_manager._controller_objects['mock_controller']), 2) + + def test_register_controller_less_than_min_number(self): + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + controller_configs = {mock_ctrlr_config_name: ['magic1', 'magic2']} + c_manager = controller_manager.ControllerManager( + 'SomeClass', controller_configs) + expected_msg = 'Expected to get at least 3 controller objects, got 2.' + with self.assertRaisesRegex(signals.ControllerError, expected_msg): + c_manager.register_controller(mock_controller, min_number=3) + + @mock.patch('yaml.dump', side_effect=TypeError('ha')) + def test_get_controller_info_record_not_serializable(self, _): + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + controller_configs = {mock_ctrlr_config_name: ['magic1', 'magic2']} + c_manager = controller_manager.ControllerManager( + 'SomeClass', controller_configs) + c_manager.register_controller(mock_controller) + record = c_manager.get_controller_info_records()[0] + actual_controller_info = record.controller_info + self.assertEqual(actual_controller_info, + "[{'MyMagic': 'magic1'}, {'MyMagic': 'magic2'}]") + + def test_controller_record_exists_without_get_info(self): + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + controller_configs = {mock_ctrlr_config_name: ['magic1', 'magic2']} + c_manager = controller_manager.ControllerManager( + 'SomeClass', controller_configs) + get_info = getattr(mock_controller, 'get_info') + delattr(mock_controller, 'get_info') + try: + c_manager.register_controller(mock_controller) + record = c_manager.get_controller_info_records()[0] + self.assertIsNone(record.controller_info) + self.assertEqual(record.test_class, 'SomeClass') + self.assertEqual(record.controller_name, 'MagicDevice') + finally: + setattr(mock_controller, 'get_info', get_info) + + @mock.patch('tests.lib.mock_controller.get_info') + def test_get_controller_info_records_empty(self, mock_get_info_func): + mock_get_info_func.return_value = None + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + controller_configs = {mock_ctrlr_config_name: ['magic1', 'magic2']} + c_manager = controller_manager.ControllerManager( + 'SomeClass', controller_configs) + c_manager.register_controller(mock_controller) + record = c_manager.get_controller_info_records()[0] + self.assertIsNone(record.controller_info) + self.assertEqual(record.test_class, 'SomeClass') + self.assertEqual(record.controller_name, 'MagicDevice') + + def test_get_controller_info_records(self): + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + controller_configs = {mock_ctrlr_config_name: ['magic1', 'magic2']} + c_manager = controller_manager.ControllerManager( + 'SomeClass', controller_configs) + c_manager.register_controller(mock_controller) + record = c_manager.get_controller_info_records()[0] + record_dict = record.to_dict() + record_dict.pop('Timestamp') + self.assertEqual( + record_dict, { + 'Controller Info': [{ + 'MyMagic': 'magic1' + }, { + 'MyMagic': 'magic2' + }], + 'Controller Name': 'MagicDevice', + 'Test Class': 'SomeClass' + }) + + def test_get_controller_info_without_registration(self): + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + controller_configs = {mock_ctrlr_config_name: ['magic1', 'magic2']} + c_manager = controller_manager.ControllerManager( + 'SomeClass', controller_configs) + self.assertFalse(c_manager.get_controller_info_records()) + + @mock.patch('tests.lib.mock_controller.destroy') + def test_unregister_controller(self, mock_destroy_func): + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + controller_configs = {mock_ctrlr_config_name: ['magic1', 'magic2']} + c_manager = controller_manager.ControllerManager( + 'SomeClass', controller_configs) + objects = c_manager.register_controller(mock_controller) + c_manager.unregister_controllers() + mock_destroy_func.assert_called_once_with(objects) + self.assertFalse(c_manager._controller_objects) + self.assertFalse(c_manager._controller_modules) + + @mock.patch('tests.lib.mock_controller.destroy') + def test_unregister_controller_without_registration( + self, mock_destroy_func): + mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME + controller_configs = {mock_ctrlr_config_name: ['magic1', 'magic2']} + c_manager = controller_manager.ControllerManager( + 'SomeClass', controller_configs) + c_manager.unregister_controllers() + mock_destroy_func.assert_not_called() + self.assertFalse(c_manager._controller_objects) + self.assertFalse(c_manager._controller_modules) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/mobly/records_test.py b/tests/mobly/records_test.py index a967b8b..9648637 100755 --- a/tests/mobly/records_test.py +++ b/tests/mobly/records_test.py @@ -239,15 +239,18 @@ class RecordsTest(unittest.TestCase): record1.test_pass(s) tr1 = records.TestResult() tr1.add_record(record1) - tr1.add_controller_info('SomeClass', 'MockDevice', - ['magicA', 'magicB']) + controller_info = records.ControllerInfoRecord( + 'SomeClass', 'MockDevice', ['magicA', 'magicB']) + tr1.add_controller_info_record(controller_info) record2 = records.TestResultRecord(self.tn) record2.test_begin() s = signals.TestPass(self.details, self.json_extra) record2.test_pass(s) tr2 = records.TestResult() tr2.add_record(record2) - tr2.add_controller_info('SomeClass', 'MockDevice', ['magicC']) + controller_info = records.ControllerInfoRecord( + 'SomeClass', 'MockDevice', ['magicC']) + tr2.add_controller_info_record(controller_info) tr2 += tr1 self.assertTrue(tr2.passed, [tr1, tr2]) self.assertTrue(tr2.controller_info, {'MockDevice': ['magicC']}) @@ -413,25 +416,17 @@ class RecordsTest(unittest.TestCase): self.assertIsNot(er, new_er) self.assertDictEqual(er.to_dict(), new_er.to_dict()) - def test_add_controller_info(self): + def test_add_controller_info_record(self): tr = records.TestResult() self.assertFalse(tr.controller_info) - tr.add_controller_info('MockDevice', ['magicA', 'magicB'], 'MyTest') + controller_info = records.ControllerInfoRecord( + 'SomeClass', 'MockDevice', ['magicA', 'magicB']) + tr.add_controller_info_record(controller_info) self.assertTrue(tr.controller_info[0]) self.assertEqual(tr.controller_info[0].controller_name, 'MockDevice') self.assertEqual(tr.controller_info[0].controller_info, ['magicA', 'magicB']) - @mock.patch('yaml.dump', side_effect=TypeError('ha')) - def test_add_controller_info_not_serializable(self, mock_yaml_dump): - tr = records.TestResult() - self.assertFalse(tr.controller_info) - tr.add_controller_info('MockDevice', ['magicA', 'magicB'], 'MyTest') - self.assertTrue(tr.controller_info[0]) - self.assertEqual(tr.controller_info[0].controller_name, 'MockDevice') - self.assertEqual(tr.controller_info[0].controller_info, - "['magicA', 'magicB']") - if __name__ == '__main__': unittest.main()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_added_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
1.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "mock", "pytest", "pytz" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 future==1.0.0 iniconfig==2.1.0 -e git+https://github.com/google/mobly.git@5fdd0397ec5b32ac61be7b47e1c35ce84943e87c#egg=mobly mock==5.2.0 packaging==24.2 pluggy==1.5.0 portpicker==1.6.0 psutil==7.0.0 pyserial==3.5 pytest==8.3.5 pytz==2025.2 PyYAML==6.0.2 timeout-decorator==0.5.0 tomli==2.2.1
name: mobly channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - future==1.0.0 - iniconfig==2.1.0 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - portpicker==1.6.0 - psutil==7.0.0 - pyserial==3.5 - pytest==8.3.5 - pytz==2025.2 - pyyaml==6.0.2 - timeout-decorator==0.5.0 - tomli==2.2.1 prefix: /opt/conda/envs/mobly
[ "tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_on_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_on_fail_from_setup_class", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_setup_class", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_setup_test", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_teardown_class", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_test", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_on_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_setup_test", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_setup_class", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_true", "tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions", "tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention", "tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list", "tests/mobly/base_test_test.py::BaseTestTest::test_current_test_info", "tests/mobly/base_test_test.py::BaseTestTest::test_current_test_info_in_setup_class", "tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name", "tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests", "tests/mobly/base_test_test.py::BaseTestTest::test_exception_objects_in_record", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_equal", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_false", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_in_teardown_test", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_multiple_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_no_op", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_no_raises_custom_msg", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_no_raises_default_msg", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_true", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_true_and_assert_true", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_two_tests", "tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded", "tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded", "tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests", "tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name", "tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run", "tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run", "tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_cannot_modify_original_record", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_both_test_and_teardown_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_setup_class_fails_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_setup_test_fails_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_triggered_by_setup_class_failure_then_fail_too", "tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_cannot_modify_original_record", "tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_procedure_function_gets_correct_record", "tests/mobly/base_test_test.py::BaseTestTest::test_promote_extra_errors_to_termination_signal", "tests/mobly/base_test_test.py::BaseTestTest::test_record_controller_info", "tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list", "tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_and_teardown_execution_count", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_generated_tests_failure", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal", "tests/mobly/base_test_test.py::BaseTestTest::test_skip", "tests/mobly/base_test_test.py::BaseTestTest::test_skip_if", "tests/mobly/base_test_test.py::BaseTestTest::test_skip_in_setup_test", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_class_fail_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_class_raise_abort_all", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_controller_record_exists_without_get_info", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_get_controller_info_record_not_serializable", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_get_controller_info_records", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_get_controller_info_records_empty", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_get_controller_info_without_registration", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_register_controller_change_return_value", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_register_controller_dup_register", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_register_controller_less_than_min_number", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_register_controller_no_config", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_register_controller_no_config_for_not_required", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_register_controller_return_value", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_unregister_controller", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_unregister_controller_without_registration", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_verify_controller_module", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_verify_controller_module_missing_attr", "tests/mobly/controller_manager_test.py::ControllerManagerTest::test_verify_controller_module_null_attr", "tests/mobly/records_test.py::RecordsTest::test_add_controller_info_record", "tests/mobly/records_test.py::RecordsTest::test_exception_record_deepcopy", "tests/mobly/records_test.py::RecordsTest::test_is_all_pass", "tests/mobly/records_test.py::RecordsTest::test_is_all_pass_negative", "tests/mobly/records_test.py::RecordsTest::test_is_all_pass_with_add_class_error", "tests/mobly/records_test.py::RecordsTest::test_is_test_executed", "tests/mobly/records_test.py::RecordsTest::test_result_add_class_error_with_special_error", "tests/mobly/records_test.py::RecordsTest::test_result_add_class_error_with_test_signal", "tests/mobly/records_test.py::RecordsTest::test_result_add_operator_success", "tests/mobly/records_test.py::RecordsTest::test_result_add_operator_type_mismatch", "tests/mobly/records_test.py::RecordsTest::test_result_record_fail_none", "tests/mobly/records_test.py::RecordsTest::test_result_record_fail_stacktrace", "tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_float_extra", "tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_json_extra", "tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_unicode_exception", "tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_unicode_test_signal", "tests/mobly/records_test.py::RecordsTest::test_result_record_pass_none", "tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_float_extra", "tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_json_extra", "tests/mobly/records_test.py::RecordsTest::test_result_record_skip_none", "tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_float_extra", "tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_json_extra" ]
[ "tests/mobly/base_test_test.py::BaseTestTest::test_write_user_data", "tests/mobly/records_test.py::RecordsTest::test_summary_user_data", "tests/mobly/records_test.py::RecordsTest::test_summary_write_dump", "tests/mobly/records_test.py::RecordsTest::test_summary_write_dump_with_unicode" ]
[]
[]
Apache License 2.0
3,088
[ "mobly/records.py", "mobly/controller_manager.py" ]
[ "mobly/records.py", "mobly/controller_manager.py" ]
faucetsdn__chewie-39
79e0c35e0089af91349fcd233d6419a25b28c25a
2018-09-18 03:31:11
84338da6a8fac3ad64fe5f69b1264805d98e986b
Bairdo: there's an error dont merge yet Bairdo: #41 fixes the error. this can now be merged KitL: I'm not particularly familiar with Chewie, but from the description it looks as though it would have been good to include a test. Is that possible? Bairdo: Yea, it should. chewie.py is currently not tested here (in this repo) at all, i will look into it shortly.
diff --git a/chewie/chewie.py b/chewie/chewie.py index a046688..cfbb661 100644 --- a/chewie/chewie.py +++ b/chewie/chewie.py @@ -99,19 +99,19 @@ class Chewie: def auth_failure(self, src_mac, port_id): """failure shim between faucet and chewie - Args: - src_mac (MacAddress): the mac of the failed supplicant - port_id (MacAddress): the 'mac' identifier of what switch port - the failure is on""" + Args: + src_mac (MacAddress): the mac of the failed supplicant + port_id (MacAddress): the 'mac' identifier of what switch port + the failure is on""" if self.failure_handler: self.failure_handler(src_mac, port_id) def auth_logoff(self, src_mac, port_id): """logoff shim between faucet and chewie - Args: - src_mac (MacAddress): the mac of the logoff supplicant - port_id (MacAddress): the 'mac' identifier of what switch port - the logoff is on""" + Args: + src_mac (MacAddress): the mac of the logoff supplicant + port_id (MacAddress): the 'mac' identifier of what switch port + the logoff is on""" if self.logoff_handler: self.logoff_handler(src_mac, port_id) @@ -144,7 +144,7 @@ class Chewie: message, dst_mac = MessageParser.ethernet_parse(packed_message) self.logger.info("eap EAP(): %s", message) self.logger.info("Received message: %s" % message.__dict__) - sm = self.get_state_machine(message.src_mac) + sm = self.get_state_machine(message.src_mac, dst_mac) event = EventMessageReceived(message, dst_mac) sm.event(event) except Exception as e: @@ -160,7 +160,7 @@ class Chewie: try: while True: sleep(0) - eap_message, src_mac, username, state = self.radius_output_messages.get() + eap_message, src_mac, username, state, port_id = self.radius_output_messages.get() self.logger.info("got eap to send to radius.. mac: %s %s, username: %s", type(src_mac), src_mac, username) state_dict = None @@ -169,7 +169,7 @@ class Chewie: self.logger.info("Sending to RADIUS eap message %s with state %s", eap_message.__dict__, state_dict) radius_packet_id = self.get_next_radius_packet_id() - self.packet_id_to_mac[radius_packet_id] = src_mac + self.packet_id_to_mac[radius_packet_id] = {'src_mac': src_mac, 'port_id': port_id} # message is eap. needs to be wrapped into a radius packet. request_authenticator = os.urandom(16) self.packet_id_to_request_authenticator[radius_packet_id] = request_authenticator @@ -258,17 +258,21 @@ class Chewie: Returns: FullEAPStateMachine """ - return self.get_state_machine(self.packet_id_to_mac[packet_id]) + return self.get_state_machine(**self.packet_id_to_mac[packet_id]) - def get_state_machine(self, src_mac): + def get_state_machine(self, src_mac, port_id): """Gets or creates if it does not already exist an FullEAPStateMachine for the src_mac. Args: - src_mac (MACAddress): who's to get. + src_mac (MacAddress): who's to get. + port_id (MacAddress): ID of the port where the src_mac is. Returns: FullEAPStateMachine """ - sm = self.state_machines.get(src_mac, None) + port_sms = self.state_machines.get(str(port_id), None) + if port_sms is None: + self.state_machines[str(port_id)] = {} + sm = self.state_machines[str(port_id)].get(src_mac, None) if not sm: sm = FullEAPStateMachine(self.eap_output_messages, self.radius_output_messages, src_mac, self.timer_scheduler, self.auth_success, @@ -276,7 +280,7 @@ class Chewie: sm.eapRestart = True # TODO what if port is not actually enabled, but then how did they auth? sm.portEnabled = True - self.state_machines[src_mac] = sm + self.state_machines[str(port_id)][src_mac] = sm return sm def get_next_radius_packet_id(self): diff --git a/chewie/eap_state_machine.py b/chewie/eap_state_machine.py index aba5f5a..ac60dab 100644 --- a/chewie/eap_state_machine.py +++ b/chewie/eap_state_machine.py @@ -712,7 +712,8 @@ class FullEAPStateMachine: if self.aaaEapRespData.code == Eap.RESPONSE: self.radius_output_messages.put((self.aaaEapRespData, self.src_mac, self.aaaIdentity.identity, - self.radius_state_attribute)) + self.radius_state_attribute, + self.port_id_mac)) self.sent_count += 1 self.set_timer() self.aaaEapResp = False
Could verify that all EAP packets in a sequence come from the same port. If we have 2 hosts with same MAC (e.g. 1 good, 1 Malicious) and they try to authenticate, it could be possible to authenticate on the malicious port when good is successful if malicious sends eap packet before the radius access-accept has been received, and thus setting the sm.port_id_mac. Basically we want to stop this. 2 obvious ways. 1. Make a state machine tied to a mac and port. So in the above case there would be 2 state machines. 2. some sort of verification logic. Option 1 looks easier to implement. Will also handle if 2 good, both would auth.
faucetsdn/chewie
diff --git a/test/test_chewie.py b/test/test_chewie.py new file mode 100644 index 0000000..931cdda --- /dev/null +++ b/test/test_chewie.py @@ -0,0 +1,76 @@ +"""Unittests for chewie/chewie.py""" + +import logging +import unittest + +from chewie.chewie import Chewie + + +def auth_handler(chewie, client_mac, port_id_mac): # pylint: disable=unused-argument + """dummy handler for successful authentications""" + print('Successful auth from MAC %s on port: %s' % (str(client_mac), str(port_id_mac))) + + +def failure_handler(chewie, client_mac, port_id_mac): # pylint: disable=unused-argument + """dummy handler for failed authentications""" + print('failure from MAC %s on port: %s' % (str(client_mac), str(port_id_mac))) + + +def logoff_handler(chewie, client_mac, port_id_mac): # pylint: disable=unused-argument + """dummy handler for logoffs""" + print('logoff from MAC %s on port: %s' % (str(client_mac), str(port_id_mac))) + + +class ChewieTestCase(unittest.TestCase): + """Main chewie.py test class""" + + def setUp(self): + logger = logging.getLogger() + + self.chewie = Chewie('lo', logger, + auth_handler, failure_handler, logoff_handler, + '127.0.0.1', 1812, 'SECRET', + '44:44:44:44:44:44') + + def test_get_sm(self): + """Tests Chewie.get_state_machine()""" + self.assertEqual(len(self.chewie.state_machines), 0) + # creates the sm if it doesn't exist + sm = self.chewie.get_state_machine('12:34:56:78:9a:bc', # pylint: disable=invalid-name + '00:00:00:00:00:01') + + self.assertEqual(len(self.chewie.state_machines), 1) + + self.assertIs(sm, self.chewie.get_state_machine('12:34:56:78:9a:bc', + '00:00:00:00:00:01')) + + self.assertIsNot(sm, self.chewie.get_state_machine('12:34:56:78:9a:bc', + '00:00:00:00:00:02')) + self.assertIsNot(sm, self.chewie.get_state_machine('ab:cd:ef:12:34:56', + '00:00:00:00:00:01')) + + # 2 ports + self.assertEqual(len(self.chewie.state_machines), 2) + # port 1 has 2 macs + self.assertEqual(len(self.chewie.state_machines['00:00:00:00:00:01']), 2) + # port 2 has 1 mac + self.assertEqual(len(self.chewie.state_machines['00:00:00:00:00:02']), 1) + + def test_get_sm_by_packet_id(self): + """Tests Chewie.get_sm_by_packet_id()""" + self.chewie.packet_id_to_mac[56] = {'src_mac': '12:34:56:78:9a:bc', + 'port_id': '00:00:00:00:00:01'} + sm = self.chewie.get_state_machine('12:34:56:78:9a:bc', # pylint: disable=invalid-name + '00:00:00:00:00:01') + + self.assertIs(self.chewie.get_state_machine_from_radius_packet_id(56), + sm) + with self.assertRaises(KeyError): + self.chewie.get_state_machine_from_radius_packet_id(20) + + def test_get_next_radius_packet_id(self): + """Tests Chewie.get_next_radius_packet_id()""" + for i in range(0, 260): + _i = i % 256 + self.assertEqual(self.chewie.get_next_radius_packet_id(), + _i)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 2 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "coverage", "pylint", "pytest-cov", "pytype" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==3.3.9 attrs==25.3.0 -e git+https://github.com/faucetsdn/chewie.git@79e0c35e0089af91349fcd233d6419a25b28c25a#egg=chewie coverage==7.8.0 dill==0.3.9 dnspython==2.7.0 eventlet==0.39.1 exceptiongroup==1.2.2 greenlet==3.1.1 immutabledict==4.2.1 importlab==0.8.1 iniconfig==2.1.0 isort==6.0.1 Jinja2==3.1.6 libcst==1.7.0 MarkupSafe==3.0.2 mccabe==0.7.0 msgspec==0.19.0 netils==0.0.1 networkx==3.1 ninja==1.11.1.4 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pycnite==2024.7.31 pydot==3.0.4 pylint==3.3.6 pyparsing==3.2.3 pytest==8.3.5 pytest-cov==6.0.0 pytype==2024.9.13 PyYAML==6.0.2 tabulate==0.9.0 toml==0.10.2 tomli==2.2.1 tomlkit==0.13.2 typing_extensions==4.13.0
name: chewie channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==3.3.9 - attrs==25.3.0 - coverage==7.8.0 - dill==0.3.9 - dnspython==2.7.0 - eventlet==0.39.1 - exceptiongroup==1.2.2 - greenlet==3.1.1 - immutabledict==4.2.1 - importlab==0.8.1 - iniconfig==2.1.0 - isort==6.0.1 - jinja2==3.1.6 - libcst==1.7.0 - markupsafe==3.0.2 - mccabe==0.7.0 - msgspec==0.19.0 - netils==0.0.1 - networkx==3.1 - ninja==1.11.1.4 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pycnite==2024.7.31 - pydot==3.0.4 - pylint==3.3.6 - pyparsing==3.2.3 - pytest==8.3.5 - pytest-cov==6.0.0 - pytype==2024.9.13 - pyyaml==6.0.2 - tabulate==0.9.0 - toml==0.10.2 - tomli==2.2.1 - tomlkit==0.13.2 - typing-extensions==4.13.0 prefix: /opt/conda/envs/chewie
[ "test/test_chewie.py::ChewieTestCase::test_get_sm", "test/test_chewie.py::ChewieTestCase::test_get_sm_by_packet_id" ]
[]
[ "test/test_chewie.py::ChewieTestCase::test_get_next_radius_packet_id" ]
[]
Apache License 2.0
3,089
[ "chewie/eap_state_machine.py", "chewie/chewie.py" ]
[ "chewie/eap_state_machine.py", "chewie/chewie.py" ]
Rambatino__CHAID-88
17a4cbaed359b644e7ef34a93b44c38a2e24874e
2018-09-18 04:05:05
17a4cbaed359b644e7ef34a93b44c38a2e24874e
diff --git a/CHAID/tree.py b/CHAID/tree.py index 17e192a..86508e2 100644 --- a/CHAID/tree.py +++ b/CHAID/tree.py @@ -221,7 +221,7 @@ class Tree(object): def print_tree(self): """ prints the tree out """ - self.to_tree().show() + self.to_tree().show(line_type='ascii') def node_predictions(self): """ Determines which rows fall into which node """ @@ -264,7 +264,7 @@ class Tree(object): """ if isinstance(self.observed, ContinuousColumn): return ValueError("Cannot make model predictions on a continuous scale") - pred = np.zeros(self.data_size) + pred = np.zeros(self.data_size).astype('object') for node in self: if node.is_terminal: pred[node.indices] = max(node.members, key=node.members.get) @@ -275,4 +275,5 @@ class Tree(object): Calculates the fraction of risk associated with the model predictions """ - return 1 - float((self.model_predictions() == self.observed.arr).sum()) / self.data_size + sub_observed = np.array([self.observed.metadata[i] for i in self.observed.arr]) + return 1 - float((self.model_predictions() == sub_observed).sum()) / self.data_size
model_predictions fails with categorical dependant variables If the dependent variable is categorical, where categories are strings, the method model_predictions fails. The problem is that the the pred array is initialized as: pred = np.zeros(self.data_size) and that enforces predictions to be numerical. In order to solve that, the model_predictions could be rewritten to something like the following: pred = [None] * self.data_size for node in self: if node.is_terminal: max_val = max(node.members, key=node.members.get) for i in node.indices: pred[i] = max_val return pred Best regards
Rambatino/CHAID
diff --git a/tests/test_tree.py b/tests/test_tree.py index fcb5b60..776ab6c 100644 --- a/tests/test_tree.py +++ b/tests/test_tree.py @@ -570,3 +570,41 @@ class TestContinuousDependentVariable(TestCase): tree = CHAID.Tree.from_numpy(self.ndarr, self.normal_arr, alpha_merge=0.999, max_depth=5, min_child_node_size=11, dep_variable_type='continuous', weights=self.wt) assert round(tree.tree_store[0].p, 4) == 0.3681 assert len(tree.tree_store) == 5 + + +class TestStringCategoricalDependentVariableForModelPrediction(TestCase): + """ Test to make sure we can handle string categorical dependent varaibles """ + def setUp(self): + """ + Setup data for test case + """ + self.region = np.array([ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 2, 3, 2, 2, 2, + 3, 2, 4, 4, 2, 4, 4, 4, 2, 2, 2, 2, 3, 2, 3, 2, 3, 2, 2, 2]) + self.age = np.array([ + 3, 4, 4, 3, 2, 4, 2, 3, 3, 2, 2, 3, 4, 3, 4, 2, 2, 3, 2, 3, + 2, 4, 4, 3, 2, 3, 1, 2, 4, 4, 3, 4, 4, 3, 2, 4, 2, 3, 3, 2, + 2, 3, 4, 3, 4, 2, 2, 3, 2, 3, 2, 4, 4, 3, 2, 3, 1, 2, 4, 4]) + self.gender = np.array([ + 1, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, + 2, 2, 2, 1, 2, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 2, 2, 2, 1, 2, + 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 2, 1, 2, 1, 2, 2]) + self.lover = np.array(['lover'] * 25 + ['non-lover'] * 35) + self.tree = CHAID.Tree.from_numpy( + np.vstack([self.region, self.age, self.gender]).transpose(), + self.lover, + alpha_merge=0.05 + ) + + def test_string_dependent_categorical_variable_for_model_prediction(self): + assert (self.tree.model_predictions() == np.array(['lover'] * 30 + ['non-lover'] * 30)).all() + + def test_risk_still_works(self): + int_lover = np.array([1] * 25 + [0] * 35) + other_tree = CHAID.Tree.from_numpy( + np.vstack([self.region, self.age, self.gender]).transpose(), + int_lover, + alpha_merge=0.05 + ) + assert self.tree.risk() == other_tree.risk()
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
5.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "tox", "tox-pyenv", "detox" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 -e git+https://github.com/Rambatino/CHAID.git@17a4cbaed359b644e7ef34a93b44c38a2e24874e#egg=CHAID charset-normalizer==2.0.12 codecov==2.1.13 coverage==6.2 Cython==3.0.12 detox==0.19 distlib==0.3.9 dnspython==2.2.1 eventlet==0.33.3 filelock==3.4.1 greenlet==2.0.2 idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work numpy==1.19.5 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pandas==1.1.5 platformdirs==2.4.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.27.1 savReaderWriter==3.4.2 scipy==1.5.4 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 tox==3.6.1 tox-pyenv==1.1.0 treelib==1.7.1 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 virtualenv==20.17.1 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: CHAID channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - cython==3.0.12 - detox==0.19 - distlib==0.3.9 - dnspython==2.2.1 - eventlet==0.33.3 - filelock==3.4.1 - greenlet==2.0.2 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - numpy==1.19.5 - pandas==1.1.5 - platformdirs==2.4.0 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.27.1 - savreaderwriter==3.4.2 - scipy==1.5.4 - six==1.17.0 - tomli==1.2.3 - tox==3.6.1 - tox-pyenv==1.1.0 - treelib==1.7.1 - urllib3==1.26.20 - virtualenv==20.17.1 prefix: /opt/conda/envs/CHAID
[ "tests/test_tree.py::TestStringCategoricalDependentVariableForModelPrediction::test_risk_still_works", "tests/test_tree.py::TestStringCategoricalDependentVariableForModelPrediction::test_string_dependent_categorical_variable_for_model_prediction" ]
[]
[ "tests/test_tree.py::TestClassificationRules::test_all_paths", "tests/test_tree.py::TestClassificationRules::test_single_path", "tests/test_tree.py::test_best_split_unique_values", "tests/test_tree.py::test_spliting_identical_values", "tests/test_tree.py::test_best_split_with_combination", "tests/test_tree.py::test_best_split_with_combination_combining_if_too_small", "tests/test_tree.py::test_new_columns_constructor", "tests/test_tree.py::TestSurrogate::test_surrgate_detection", "tests/test_tree.py::TestSurrogate::test_surrogate_default_min_p", "tests/test_tree.py::test_p_and_chi_values", "tests/test_tree.py::test_p_and_chi_values_when_weighting_applied", "tests/test_tree.py::test_correct_dof", "tests/test_tree.py::test_zero_subbed_weighted_ndarry", "tests/test_tree.py::test_min_child_node_size_is_30", "tests/test_tree.py::test_to_tree_returns_a_tree", "tests/test_tree.py::test_max_depth_returns_correct_invalid_message", "tests/test_tree.py::test_node_predictions", "tests/test_tree.py::TestTreeGenerated::test_deletion", "tests/test_tree.py::TestTreeGenerated::test_iter", "tests/test_tree.py::TestTreeGenerated::test_modification", "tests/test_tree.py::TestComplexStructures::test_p_and_chi_values_selectivity", "tests/test_tree.py::TestBugFixes::test_incorrect_weighted_counts", "tests/test_tree.py::TestBugFixes::test_splits_shouldnt_carry_on_splitting_below_min_child_node_size", "tests/test_tree.py::TestBugFixes::test_unicode_printing", "tests/test_tree.py::TestStoppingRules::test_min_child_node_size_does_not_stop_for_unweighted_case", "tests/test_tree.py::TestStoppingRules::test_min_child_node_size_does_not_stop_for_weighted_case", "tests/test_tree.py::TestStoppingRules::test_min_child_node_size_does_stop_for_unweighted_case", "tests/test_tree.py::TestStoppingRules::test_min_child_node_size_does_stop_for_weighted_case", "tests/test_tree.py::TestContinuousDependentVariable::test_bartlett_significance", "tests/test_tree.py::TestContinuousDependentVariable::test_continuous_dependent_variable", "tests/test_tree.py::TestContinuousDependentVariable::test_continuous_dependent_variable_with_weighting" ]
[]
Apache License 2.0
3,090
[ "CHAID/tree.py" ]
[ "CHAID/tree.py" ]
scieloorg__articles_meta-144
9f784d5eb030514da9891cea06c69013d2ae7913
2018-09-18 11:31:46
dd0f291b6bf6d43336fa0ca6fd6d1cebc6990829
diff --git a/articlemeta/export_rsps.py b/articlemeta/export_rsps.py index 6c93b1e..a64585a 100644 --- a/articlemeta/export_rsps.py +++ b/articlemeta/export_rsps.py @@ -1095,12 +1095,12 @@ class XMLArticleMetaCountsPipe(plumber.Pipe): try: startpage = int(raw.start_page) - except ValueError: + except (ValueError, TypeError): startpage = None try: endpage = int(raw.end_page) - except ValueError: + except (ValueError, TypeError): endpage = None pages = 0
Tratar a exceção do tipo: TypeError para raw.start_page Log da produção ``` 9/17/2018 8:33:21 PM yield self.transform(data) 9/17/2018 8:33:21 PM File "/usr/local/lib/python3.5/site-packages/articlemeta/export_rsps.py", line 1097, in transform 9/17/2018 8:33:21 PM startpage = int(raw.start_page) 9/17/2018 8:33:21 PM TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType' ``` Tratar a exceção do tipo: TypeError
scieloorg/articles_meta
diff --git a/tests/test_export_rsps.py b/tests/test_export_rsps.py index d0a586c..b36bc6a 100644 --- a/tests/test_export_rsps.py +++ b/tests/test_export_rsps.py @@ -1456,6 +1456,24 @@ class ExportTests(unittest.TestCase): self.assertEqual(0, int(count)) + def test_xml_article_meta_counts_pages_pages_is_none_pipe(self): + fakexylosearticle = Article({'article': {}}) + + pxml = ET.Element('article') + pxml.append(ET.Element('front')) + + front = pxml.find('front') + front.append(ET.Element('article-meta')) + + data = [fakexylosearticle, pxml] + + xmlarticle = export_rsps.XMLArticleMetaCountsPipe() + raw, xml = xmlarticle.transform(data) + + count = xml.find('./front/article-meta/counts/page-count').get('count') + + self.assertEqual(0, int(count)) + def test_xml_article_meta_counts_pages_invalid_pages_first_gt_last_pipe(self): pxml = ET.Element('article') pxml.append(ET.Element('front'))
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
1.37
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc libxml2-dev libxslt-dev git" ], "python": "3.5", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/scieloorg/articles_meta.git@9f784d5eb030514da9891cea06c69013d2ae7913#egg=articlemeta attrs==22.2.0 certifi==2018.1.18 chardet==3.0.4 coverage==6.2 crossrefapi==1.3.0 execnet==1.9.0 hupper==1.0 idna==2.6 importlib-metadata==4.8.3 iniconfig==1.1.1 legendarium==2.0.2 lxml==4.1.1 packaging==21.3 PasteDeploy==1.5.2 picles.plumber==0.11 plaster==1.0 plaster-pastedeploy==0.4.2 pluggy==1.0.0 ply==3.10 py==1.11.0 pymongo==3.6.1 pyparsing==3.1.4 pyramid==1.9.1 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 raven==6.5.0 repoze.lru==0.7 requests==2.18.4 thriftpy==0.3.9 -e git+https://github.com/scieloorg/thriftpy-wrap@00e4c3ea4f20bfb2414eef7690d769bde308c0a4#egg=thriftpywrap tomli==1.2.3 translationstring==1.4 typing_extensions==4.1.1 urllib3==1.22 venusian==1.1.0 WebOb==1.8.0rc1 xylose==1.34.0 zipp==3.6.0 zope.deprecation==4.3.0 zope.interface==4.4.3
name: articles_meta channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - certifi==2018.1.18 - chardet==3.0.4 - coverage==6.2 - crossrefapi==1.3.0 - execnet==1.9.0 - hupper==1.0 - idna==2.6 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - legendarium==2.0.2 - lxml==4.1.1 - packaging==21.3 - pastedeploy==1.5.2 - picles-plumber==0.11 - plaster==1.0 - plaster-pastedeploy==0.4.2 - pluggy==1.0.0 - ply==3.10 - py==1.11.0 - pymongo==3.6.1 - pyparsing==3.1.4 - pyramid==1.9.1 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - raven==6.5.0 - repoze-lru==0.7 - requests==2.18.4 - thriftpy==0.3.9 - tomli==1.2.3 - translationstring==1.4 - typing-extensions==4.1.1 - urllib3==1.22 - venusian==1.1.0 - webob==1.8.0rc1 - xylose==1.34.0 - zipp==3.6.0 - zope-deprecation==4.3.0 - zope-interface==4.4.3 prefix: /opt/conda/envs/articles_meta
[ "tests/test_export_rsps.py::ExportTests::test_xml_article_meta_counts_pages_pages_is_none_pipe" ]
[]
[ "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_article_title_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_article_title_without_data_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_date_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_date_with_year_and_month_and_day_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_date_with_year_and_month_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_date_without_data_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_element_citation_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_fpage_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_fpage_without_data_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_id_as_str_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_issue_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_issue_without_data_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_lpage_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_lpage_without_data_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_person_group_given_names_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_person_group_len_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_person_group_surname_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_person_group_without_data_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_setup_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_source_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_source_without_data_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_volume_pipe", "tests/test_export_rsps.py::XMLCitationTests::test_xml_citation_volume_without_data_pipe", "tests/test_export_rsps.py::ExportTests::test_setuppipe_attributes_dtd_version", "tests/test_export_rsps.py::ExportTests::test_setuppipe_attributes_specific_use", "tests/test_export_rsps.py::ExportTests::test_setuppipe_element_name", "tests/test_export_rsps.py::ExportTests::test_xml_article_body_without_data_pipe", "tests/test_export_rsps.py::ExportTests::test_xml_article_meta_article_id_doi_pipe", "tests/test_export_rsps.py::ExportTests::test_xml_article_meta_article_id_doi_without_data_pipe", "tests/test_export_rsps.py::ExportTests::test_xml_article_meta_article_id_publisher_pipe", "tests/test_export_rsps.py::ExportTests::test_xml_article_meta_counts_citations_pipe", "tests/test_export_rsps.py::ExportTests::test_xml_article_meta_counts_pages_invalid_pages_first_gt_last_pipe", "tests/test_export_rsps.py::ExportTests::test_xml_article_meta_counts_pages_invalid_pages_pipe", "tests/test_export_rsps.py::ExportTests::test_xml_article_meta_counts_pages_pipe", "tests/test_export_rsps.py::ExportTests::test_xml_article_meta_permission_pipe", "tests/test_export_rsps.py::ExportTests::test_xml_citations_count_pipe", "tests/test_export_rsps.py::ExportTests::test_xml_citations_without_data_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_affiliation_address_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_affiliation_country_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_affiliation_index_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_affiliation_institution_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_affiliation_without_data_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_article_categories_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_article_categories_without_data_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_contrib_group_author_names_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_contrib_group_author_roles_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_contrib_group_author_without_xrefs_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_contrib_group_author_xrefs_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_contrib_group_without_data_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_elocation_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_first_page_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_issue_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_last_page_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_pub_month_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_pub_year_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_suppl__vol_10_issue_1_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_suppl__vol_10_issue_20_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_suppl__vol_10_issue_20_suppl_10_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_suppl_issue_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_suppl_vol_0_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_suppl_vol_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_volume_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_without_elocation_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_without_first_page_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_without_issue_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_without_last_page_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_general_info_without_volume_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_keywords_languages_data_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_keywords_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_keywords_without_data_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_original_language_abstract_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_original_language_abstract_without_data_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_title_group_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_translated_abstract_without_data_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_translated_title_group_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_meta_translated_title_group_without_data_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlarticle_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlclose_pipe", "tests/test_export_rsps.py::ExportTests::test_xmlfront_pipe", "tests/test_export_rsps.py::ExportTests::test_xmljournal_id_pipe", "tests/test_export_rsps.py::ExportTests::test_xmljournal_meta_abbrev_journal_title_pipe", "tests/test_export_rsps.py::ExportTests::test_xmljournal_meta_electronic_issn_pipe", "tests/test_export_rsps.py::ExportTests::test_xmljournal_meta_journal_title_group_pipe", "tests/test_export_rsps.py::ExportTests::test_xmljournal_meta_print_issn_pipe", "tests/test_export_rsps.py::ExportTests::test_xmljournal_meta_publisher_pipe" ]
[]
BSD 2-Clause "Simplified" License
3,091
[ "articlemeta/export_rsps.py" ]
[ "articlemeta/export_rsps.py" ]
wright-group__WrightTools-748
4cf127e9d431265dad6f42c48b5be05bc36e3cb7
2018-09-18 15:15:28
6e0c301b1f703527709a2669bbde785255254239
diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py index dec8f19..e9ae20f 100644 --- a/WrightTools/kit/_array.py +++ b/WrightTools/kit/_array.py @@ -243,18 +243,52 @@ def share_nans(*arrs) -> tuple: return tuple([a + nans for a in arrs]) -def smooth_1D(arr, n=10) -> np.ndarray: - """Smooth 1D data by 'running average'. +def smooth_1D(arr, n=10, smooth_type="flat") -> np.ndarray: + """Smooth 1D data using a window function. + + Edge effects will be present. Parameters ---------- - n : int - number of points to average + arr : array_like + Input array, 1D. + n : int (optional) + Window length. + smooth_type : {'flat', 'hanning', 'hamming', 'bartlett', 'blackman'} (optional) + Type of window function to convolve data with. + 'flat' window will produce a moving average smoothing. + + Returns + ------- + array_like + Smoothed 1D array. """ - for i in range(n, len(arr) - n): - window = arr[i - n : i + n].copy() - arr[i] = window.mean() - return arr + + # check array input + if arr.ndim != 1: + raise wt_exceptions.DimensionalityError(1, arr.ndim) + if arr.size < n: + message = "Input array size must be larger than window size." + raise wt_exceptions.ValueError(message) + if n < 3: + return arr + # construct window array + if smooth_type == "flat": + w = np.ones(n, dtype=arr.dtype) + elif smooth_type == "hanning": + w = np.hanning(n) + elif smooth_type == "hamming": + w = np.hamming(n) + elif smooth_type == "bartlett": + w = np.bartlett(n) + elif smooth_type == "blackman": + w = np.blackman(n) + else: + message = "Given smooth_type, {0}, not available.".format(str(smooth_type)) + raise wt_exceptions.ValueError(message) + # convolve reflected array with window function + out = np.convolve(w / w.sum(), arr, mode="same") + return out def svd(a, i=None) -> tuple:
more robust clip test The current test for clip fails regularly, but stochastically. The function works, but is clipping all values
wright-group/WrightTools
diff --git a/tests/dataset/clip.py b/tests/dataset/clip.py index 705894e..c1a0a89 100644 --- a/tests/dataset/clip.py +++ b/tests/dataset/clip.py @@ -3,9 +3,6 @@ # --- import -------------------------------------------------------------------------------------- - -import random - import WrightTools as wt from WrightTools import datasets @@ -16,7 +13,7 @@ from WrightTools import datasets def test_w1_wa(): p = datasets.PyCMDS.w1_wa_000 data = wt.data.from_PyCMDS(p) - new_max = random.random() * 0.5 * data.array_signal.max() + data.array_signal.min() + new_max = 0.5 * data.array_signal.max() + data.array_signal.min() data.array_signal.clip(max=new_max) assert data.array_signal.max() <= new_max data.close() diff --git a/tests/kit/smooth_1D.py b/tests/kit/smooth_1D.py new file mode 100644 index 0000000..5e4e9b4 --- /dev/null +++ b/tests/kit/smooth_1D.py @@ -0,0 +1,35 @@ +"""Test kit.smooth_1D.""" + + +# --- import -------------------------------------------------------------------------------------- + + +import numpy as np + +import WrightTools as wt + + +# --- test ---------------------------------------------------------------------------------------- + + +def test_basic_smoothing_functionality(): + # create arrays + x = np.linspace(0, 10, 1000) + y = np.sin(x) + np.random.seed(seed=12) + r = np.random.rand(1000) - .5 + yr = y + r + # iterate through window types + windows = ["flat", "hanning", "hamming", "bartlett", "blackman"] + for w in windows: + out = wt.kit.smooth_1D(yr, n=101, smooth_type=w) + check_arr = out - y + check_arr = check_arr[50:-50] # get rid of edge effects + assert np.allclose(check_arr, 0, rtol=.2, atol=.2) + + +# --- run ----------------------------------------------------------------------------------------- + + +if __name__ == "__main__": + test_basic_smoothing_functionality()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 1 }, "num_modified_files": 1 }
3.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y libfreetype6-dev libopenblas-dev" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
black==25.1.0 cfgv==3.4.0 click==8.1.8 contourpy==1.3.0 coverage==7.8.0 cycler==0.12.1 distlib==0.3.9 exceptiongroup==1.2.2 filelock==3.18.0 fonttools==4.56.0 h5py==3.13.0 identify==2.6.9 imageio==2.37.0 importlib_resources==6.5.2 iniconfig==2.1.0 kiwisolver==1.4.7 lazy_loader==0.4 matplotlib==3.9.4 mypy-extensions==1.0.0 networkx==3.2.1 nodeenv==1.9.1 numexpr==2.10.2 numpy==2.0.2 packaging==24.2 pathspec==0.12.1 pillow==11.1.0 platformdirs==4.3.7 pluggy==1.5.0 pre_commit==4.2.0 pydocstyle==6.3.0 pyparsing==3.2.3 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.2 scikit-image==0.24.0 scipy==1.13.1 six==1.17.0 snowballstemmer==2.2.0 swebench_matterhorn @ file:///swebench_matterhorn tidy_headers==1.0.4 tifffile==2024.8.30 tomli==2.2.1 typing_extensions==4.13.0 virtualenv==20.29.3 -e git+https://github.com/wright-group/WrightTools.git@4cf127e9d431265dad6f42c48b5be05bc36e3cb7#egg=WrightTools zipp==3.21.0
name: WrightTools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - black==25.1.0 - cfgv==3.4.0 - click==8.1.8 - contourpy==1.3.0 - coverage==7.8.0 - cycler==0.12.1 - distlib==0.3.9 - exceptiongroup==1.2.2 - filelock==3.18.0 - fonttools==4.56.0 - h5py==3.13.0 - identify==2.6.9 - imageio==2.37.0 - importlib-resources==6.5.2 - iniconfig==2.1.0 - kiwisolver==1.4.7 - lazy-loader==0.4 - matplotlib==3.9.4 - mypy-extensions==1.0.0 - networkx==3.2.1 - nodeenv==1.9.1 - numexpr==2.10.2 - numpy==2.0.2 - packaging==24.2 - pathspec==0.12.1 - pillow==11.1.0 - platformdirs==4.3.7 - pluggy==1.5.0 - pre-commit==4.2.0 - pydocstyle==6.3.0 - pyparsing==3.2.3 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.2 - scikit-image==0.24.0 - scipy==1.13.1 - six==1.17.0 - snowballstemmer==2.2.0 - swebench-matterhorn==0.0.0 - tidy-headers==1.0.4 - tifffile==2024.8.30 - tomli==2.2.1 - typing-extensions==4.13.0 - virtualenv==20.29.3 - zipp==3.21.0 prefix: /opt/conda/envs/WrightTools
[ "tests/kit/smooth_1D.py::test_basic_smoothing_functionality" ]
[ "tests/dataset/clip.py::test_w1_wa" ]
[]
[]
MIT License
3,092
[ "WrightTools/kit/_array.py" ]
[ "WrightTools/kit/_array.py" ]
tox-dev__tox-1000
cf6afcecaca22df7b509facaea43c09a15570f75
2018-09-18 20:57:38
cf6afcecaca22df7b509facaea43c09a15570f75
codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `5.53%`. > The diff coverage is `89.47%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 64.25% -5.54% ========================================== Files 14 14 Lines 3406 2960 -446 Branches 453 359 -94 ========================================== - Hits 2377 1902 -475 - Misses 965 975 +10 - Partials 64 83 +19 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `57.95% <ø> (-7.62%)` | :arrow_down: | | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `51.28% <ø> (-7.64%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `74.86% <50%> (-2.87%)` | :arrow_down: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `73.69% <94.11%> (-2.19%)` | :arrow_down: | | [src/tox/interpreters.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9pbnRlcnByZXRlcnMucHk=) | `67.42% <0%> (-15.31%)` | :arrow_down: | | [src/tox/\_quickstart.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9fcXVpY2tzdGFydC5weQ==) | `31.28% <0%> (-11.58%)` | :arrow_down: | | [src/tox/exception.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9leGNlcHRpb24ucHk=) | `69.04% <0%> (-9.68%)` | :arrow_down: | | [src/tox/constants.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25zdGFudHMucHk=) | `53.48% <0%> (-6.09%)` | :arrow_down: | | ... and [5 more](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree-more) | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **increase** coverage by `21.9%`. > The diff coverage is `84.61%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== + Coverage 69.78% 91.68% +21.9% ========================================== Files 14 14 Lines 3406 2563 -843 Branches 453 431 -22 ========================================== - Hits 2377 2350 -27 + Misses 965 141 -824 - Partials 64 72 +8 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `89.34% <0%> (+30.42%)` | :arrow_up: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `96.07% <100%> (+30.5%)` | :arrow_up: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `91.36% <50%> (+13.63%)` | :arrow_up: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `92.63% <95%> (+16.74%)` | :arrow_up: | | [src/tox/interpreters.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9pbnRlcnByZXRlcnMucHk=) | `69.76% <0%> (-12.97%)` | :arrow_down: | | [src/tox/\_pytestplugin.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9fcHl0ZXN0cGx1Z2luLnB5) | `91.93% <0%> (+11.44%)` | :arrow_up: | | [src/tox/result.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9yZXN1bHQucHk=) | `96.07% <0%> (+15.75%)` | :arrow_up: | | [src/tox/exception.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9leGNlcHRpb24ucHk=) | `100% <0%> (+21.27%)` | :arrow_up: | | ... and [5 more](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree-more) | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `1.17%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 68.61% -1.18% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== - Hits 2377 2357 -20 - Misses 965 1012 +47 - Partials 64 66 +2 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `64.95% <100%> (-0.62%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.41% <50%> (-0.32%)` | :arrow_down: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `75.34% <90.47%> (-0.55%)` | :arrow_down: | | [src/tox/interpreters.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9pbnRlcnByZXRlcnMucHk=) | `64.74% <0%> (-17.99%)` | :arrow_down: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `1.17%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 68.61% -1.18% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== - Hits 2377 2357 -20 - Misses 965 1012 +47 - Partials 64 66 +2 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `64.95% <100%> (-0.62%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.41% <50%> (-0.32%)` | :arrow_down: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `75.34% <90.47%> (-0.55%)` | :arrow_down: | | [src/tox/interpreters.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9pbnRlcnByZXRlcnMucHk=) | `64.74% <0%> (-17.99%)` | :arrow_down: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `1.17%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 68.61% -1.18% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== - Hits 2377 2357 -20 - Misses 965 1012 +47 - Partials 64 66 +2 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `64.95% <100%> (-0.62%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.41% <50%> (-0.32%)` | :arrow_down: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `75.34% <90.47%> (-0.55%)` | :arrow_down: | | [src/tox/interpreters.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9pbnRlcnByZXRlcnMucHk=) | `64.74% <0%> (-17.99%)` | :arrow_down: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `0.21%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 69.57% -0.22% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== + Hits 2377 2390 +13 - Misses 965 980 +15 - Partials 64 65 +1 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `65.1% <100%> (-0.47%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.72% <50%> (ø)` | :arrow_up: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `76.26% <90.47%> (+0.38%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `0.21%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 69.57% -0.22% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== + Hits 2377 2390 +13 - Misses 965 980 +15 - Partials 64 65 +1 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `65.1% <100%> (-0.47%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.72% <50%> (ø)` | :arrow_up: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `76.26% <90.47%> (+0.38%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `0.21%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 69.57% -0.22% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== + Hits 2377 2390 +13 - Misses 965 980 +15 - Partials 64 65 +1 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `65.1% <100%> (-0.47%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.72% <50%> (ø)` | :arrow_up: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `76.26% <90.47%> (+0.38%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `0.21%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 69.57% -0.22% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== + Hits 2377 2390 +13 - Misses 965 980 +15 - Partials 64 65 +1 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `65.1% <100%> (-0.47%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.72% <50%> (ø)` | :arrow_up: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `76.26% <90.47%> (+0.38%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `0.21%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 69.57% -0.22% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== + Hits 2377 2390 +13 - Misses 965 980 +15 - Partials 64 65 +1 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `65.1% <100%> (-0.47%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.72% <50%> (ø)` | :arrow_up: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `76.26% <90.47%> (+0.38%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `0.21%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 69.57% -0.22% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== + Hits 2377 2390 +13 - Misses 965 980 +15 - Partials 64 65 +1 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `65.1% <100%> (-0.47%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.72% <50%> (ø)` | :arrow_up: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `76.26% <90.47%> (+0.38%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `0.21%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 69.57% -0.22% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== + Hits 2377 2390 +13 - Misses 965 980 +15 - Partials 64 65 +1 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `65.1% <100%> (-0.47%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.72% <50%> (ø)` | :arrow_up: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `76.26% <90.47%> (+0.38%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `0.21%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 69.57% -0.22% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== + Hits 2377 2390 +13 - Misses 965 980 +15 - Partials 64 65 +1 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `65.1% <100%> (-0.47%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.72% <50%> (ø)` | :arrow_up: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `76.26% <90.47%> (+0.38%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `0.21%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 69.57% -0.22% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== + Hits 2377 2390 +13 - Misses 965 980 +15 - Partials 64 65 +1 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `65.1% <100%> (-0.47%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.72% <50%> (ø)` | :arrow_up: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `76.26% <90.47%> (+0.38%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=h1) Report > Merging [#1000](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/70bfb8300e604d8b7297a9458608e326934d49de?src=pr&el=desc) will **decrease** coverage by `0.21%`. > The diff coverage is `81.48%`. [![Impacted file tree graph](https://codecov.io/gh/tox-dev/tox/pull/1000/graphs/tree.svg?width=650&token=DYodAwDCZ5&height=150&src=pr)](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #1000 +/- ## ========================================== - Coverage 69.78% 69.57% -0.22% ========================================== Files 14 14 Lines 3406 3435 +29 Branches 453 458 +5 ========================================== + Hits 2377 2390 +13 - Misses 965 980 +15 - Partials 64 65 +1 ``` | [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/tox/package.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9wYWNrYWdlLnB5) | `57.67% <0%> (-1.25%)` | :arrow_down: | | [src/tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9jb25maWcucHk=) | `65.1% <100%> (-0.47%)` | :arrow_down: | | [src/tox/session.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC9zZXNzaW9uLnB5) | `77.72% <50%> (ø)` | :arrow_up: | | [src/tox/venv.py](https://codecov.io/gh/tox-dev/tox/pull/1000/diff?src=pr&el=tree#diff-c3JjL3RveC92ZW52LnB5) | `76.26% <90.47%> (+0.38%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=footer). Last update [70bfb83...d1d646f](https://codecov.io/gh/tox-dev/tox/pull/1000?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). asottile: jfc codecov SETTLE DOWN :laughing: gaborbernat: @asottile While trying to write tests for this and making them pass discovered two feature request that helped me understand what's happening - reasoning for venv recreation, - only update-ing ``.tox.config1`` when it changes, not always; And two user affecting bugs: - PEP-517 packaging - and command quoting being incorrect. And two internal bugs (thing happened to work by chance, but not by design): - python md5 being zero when there were dependencies because of variable re-usage; - and pragma no cover being ignored. As such this PR became a sort of longer one. However due to the iterative nature of these would be troublesome to split it up. As such I would appreciate to review and merge it as one.
diff --git a/.gitignore b/.gitignore index c80af931..83b4c440 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,8 @@ coverage.xml htmlcov .idea +.vscode + .eggs/ py27/ diff --git a/changelog/1003.bugfix.rst b/changelog/1003.bugfix.rst new file mode 100644 index 00000000..7303a0b3 --- /dev/null +++ b/changelog/1003.bugfix.rst @@ -0,0 +1,1 @@ +PEP-517 packaging fails with sdist already exists, fixed via ensuring the dist folder is empty before invoking the backend and `pypa/setuptools#1481 <https://github.com/pypa/setuptools/pull/1481>`_ - by :user:`gaborbernat` diff --git a/changelog/1004.feature.rst b/changelog/1004.feature.rst new file mode 100644 index 00000000..8e6f044a --- /dev/null +++ b/changelog/1004.feature.rst @@ -0,0 +1,1 @@ +``-vv`` will print out why a virtual environment is re-created whenever this operation is triggered - by :user:`gaborbernat` diff --git a/changelog/167.feature.rst b/changelog/167.feature.rst new file mode 100644 index 00000000..0323c743 --- /dev/null +++ b/changelog/167.feature.rst @@ -0,0 +1,3 @@ +add :confval:`commands_pre` and :confval:`commands_post` that run before and after running +the :confval:`commands` (setup runs always, commands only if setup suceeds, teardown always - all +run until the first failing command) - by :user:`gaborbernat` diff --git a/doc/config.rst b/doc/config.rst index 69db342c..61b6214a 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -171,15 +171,17 @@ Complete list of settings that you can put into ``testenv*`` sections: .. confval:: commands=ARGVLIST - The commands to be called for testing. Each line is interpreted as one command; - however a command can be split over multiple lines by ending the line with the ``\`` - character. + The commands to be called for testing. Only execute if :confval:`commands_pre` succeed. + + Each line is interpreted as one command; however a command can be split over + multiple lines by ending the line with the ``\`` character. Commands will execute one by one in sequential fashion until one of them fails (their exit code is non-zero) or all of them succeed. The exit code of a command may be ignored (meaning they are always considered successful) by prefixing the command with a dash (``-``) - this is similar to how ``make`` recipe lines work. The outcome of the environment is considered successful - only if all commands succeeded (exit code ignored via the ``-`` or success exit code value of zero). + only if all commands (these + setup + teardown) succeeded (exit code ignored via the + ``-`` or success exit code value of zero). :note: the virtual environment binary path (the ``bin`` folder within) is prepended to the os ``PATH``, meaning commands will first try to resolve to an executable from within the @@ -187,6 +189,17 @@ Complete list of settings that you can put into ``testenv*`` sections: translates as the virtual environments ``python`` (having the same runtime version as the :confval:`basepython`), and ``pip`` translates as the virtual environments ``pip``. +.. confval:: commands_pre=ARGVLIST + + Commands to run before running the :confval:`commands`. + All evaluation and configuration logic applies from :confval:`commands`. + +.. confval:: commands_post=ARGVLIST + + Commands to run after running the :confval:`commands`. Execute regardless of the outcome of + both :confval:`commands` and :confval:`commands_pre`. + All evaluation and configuration logic applies from :confval:`commands`. + .. confval:: install_command=ARGV .. versionadded:: 1.6 diff --git a/pyproject.toml b/pyproject.toml index 89839548..ed7af662 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [build-system] requires = [ - "setuptools >= 35.0.2", + "setuptools >= 40.0.4", "setuptools_scm >= 2.0.0, <4", "wheel >= 0.29.0", ] diff --git a/setup.py b/setup.py index d9443290..90bd6d0a 100644 --- a/setup.py +++ b/setup.py @@ -110,7 +110,7 @@ def main(): ] + [ ("Programming Language :: Python :: {}".format(x)) - for x in "2 2.7 3 3.4 3.5 3.6".split() + for x in "2 2.7 3 3.4 3.5 3.6 3.7".split() ], ) diff --git a/src/tox/config.py b/src/tox/config.py index 62cb3768..e560c221 100755 --- a/src/tox/config.py +++ b/src/tox/config.py @@ -772,6 +772,20 @@ def tox_addoption(parser): help="each line specifies a test command and can use substitution.", ) + parser.add_testenv_attribute( + name="commands_pre", + type="argvlist", + default="", + help="each line specifies a setup command action and can use substitution.", + ) + + parser.add_testenv_attribute( + name="commands_post", + type="argvlist", + default="", + help="each line specifies a teardown command and can use substitution.", + ) + parser.add_testenv_attribute( "ignore_outcome", type="bool", diff --git a/src/tox/package.py b/src/tox/package.py index 7f8d24a3..eb0ade34 100644 --- a/src/tox/package.py +++ b/src/tox/package.py @@ -183,7 +183,12 @@ def perform_isolated_build(build_info, package_venv, session, config): build_info.backend_module, build_info.backend_object, "sdist", str(config.distdir) ) ) + + # need to start with an empty (but existing) source distribution folder + if config.distdir.exists(): + config.distdir.remove(rec=1, ignore_errors=True) config.distdir.ensure_dir() + result = package_venv._pcall( [package_venv.envconfig.envpython, "-c", script], returnout=True, diff --git a/src/tox/session.py b/src/tox/session.py index 95716060..55ff0a87 100644 --- a/src/tox/session.py +++ b/src/tox/session.py @@ -178,7 +178,7 @@ class Action(object): popen.action = self self._popenlist.append(popen) try: - self.report.logpopen(popen, env=env) + self.report.logpopen(popen, cmd_args_shell) try: if resultjson and not redirect: if popen.stderr is not None: @@ -290,13 +290,12 @@ class Reporter(object): else: return Verbosity.DEBUG - def logpopen(self, popen, env): + def logpopen(self, popen, cmd_args_shell): """ log information about the action.popen() created process. """ - cmd = " ".join(map(str, popen.args)) if popen.outpath: - self.verbosity1(" {}$ {} >{}".format(popen.cwd, cmd, popen.outpath)) + self.verbosity1(" {}$ {} >{}".format(popen.cwd, cmd_args_shell, popen.outpath)) else: - self.verbosity1(" {}$ {} ".format(popen.cwd, cmd)) + self.verbosity1(" {}$ {} ".format(popen.cwd, cmd_args_shell)) def logaction_start(self, action): msg = "{} {}".format(action.msg, " ".join(map(str, action.args))) @@ -589,7 +588,8 @@ class Session: if venv.status: return self.hook.tox_runtest_pre(venv=venv) - self.hook.tox_runtest(venv=venv, redirect=redirect) + if venv.status == 0: + self.hook.tox_runtest(venv=venv, redirect=redirect) self.hook.tox_runtest_post(venv=venv) else: venv.status = "skipped tests" diff --git a/src/tox/venv.py b/src/tox/venv.py index 8358cd1f..a89f96b1 100755 --- a/src/tox/venv.py +++ b/src/tox/venv.py @@ -1,5 +1,5 @@ -import ast import codecs +import json import os import pipes import re @@ -41,7 +41,6 @@ class CreationConfig: try: lines = path.readlines(cr=0) value = lines.pop(0).split(None, 1) - md5, python = value version, sitepackages, usedevelop, alwayscopy = lines.pop(0).split(None, 4) sitepackages = bool(int(sitepackages)) usedevelop = bool(int(usedevelop)) @@ -50,25 +49,31 @@ class CreationConfig: for line in lines: md5, depstring = line.split(None, 1) deps.append((md5, depstring)) + md5, python = value return CreationConfig(md5, python, version, sitepackages, usedevelop, deps, alwayscopy) except Exception: return None + def matches_with_reason(self, other, deps_matches_subset=False): + for attr in ("md5", "python", "version", "sitepackages", "usedevelop", "alwayscopy"): + left = getattr(self, attr) + right = getattr(other, attr) + if left != right: + return False, "attr {} {!r}!={!r}".format(attr, left, right) + self_deps = set(self.deps) + other_deps = set(other.deps) + if self_deps != other_deps: + if deps_matches_subset: + diff = other_deps - self_deps + if not diff: + return False, "missing in previous {!r}".format(diff) + else: + return False, "{!r}!={!r}".format(self_deps, other_deps) + return True, None + def matches(self, other, deps_matches_subset=False): - return ( - other - and self.md5 == other.md5 - and self.python == other.python - and self.version == other.version - and self.sitepackages == other.sitepackages - and self.usedevelop == other.usedevelop - and self.alwayscopy == other.alwayscopy - and ( - all(d in self.deps for d in other.deps) - if deps_matches_subset is True - else self.deps == other.deps - ) - ) + outcome, _ = self.matches_with_reason(other, deps_matches_subset) + return outcome class VirtualEnv(object): @@ -164,15 +169,19 @@ class VirtualEnv(object): if status string is empty, all is ok. """ rconfig = CreationConfig.readconfig(self.path_config) - if ( - not self.envconfig.recreate - and rconfig - and rconfig.matches( - self._getliveconfig(), getattr(self.envconfig, "deps_matches_subset", False) - ) - ): + if self.envconfig.recreate: + reason = "-r flag" + else: + if rconfig is None: + reason = "no previous config {}".format(self.path_config) + else: + live_config = self._getliveconfig() + deps_subset_match = getattr(self.envconfig, "deps_matches_subset", False) + outcome, reason = rconfig.matches_with_reason(live_config, deps_subset_match) + if reason is None: action.info("reusing", self.envconfig.envdir) return + action.info("cannot reuse", reason) if rconfig is None: action.setactivity("create", self.envconfig.envdir) else: @@ -189,7 +198,6 @@ class VirtualEnv(object): def _getliveconfig(self): python = self.envconfig.python_info.executable - md5 = getdigest(python) version = tox.__version__ sitepackages = self.envconfig.sitepackages develop = self.envconfig.usedevelop @@ -199,6 +207,7 @@ class VirtualEnv(object): raw_dep = dep.name md5 = getdigest(raw_dep) deps.append((md5, raw_dep)) + md5 = getdigest(python) return CreationConfig(md5, python, version, sitepackages, develop, deps, alwayscopy) def _getresolvedeps(self): @@ -226,7 +235,13 @@ class VirtualEnv(object): return re.match(self.envconfig.platform, sys.platform) def finish(self): - self._getliveconfig().writeconfig(self.path_config) + previous_config = CreationConfig.readconfig(self.path_config) + live_config = self._getliveconfig() + if previous_config is None or not previous_config.matches(live_config): + self.session.report.verbosity1( + "write config to {} as {!r}".format(self.path_config, live_config) + ) + live_config.writeconfig(self.path_config) def _needs_reinstall(self, setupdir, action): setup_py = setupdir.join("setup.py") @@ -234,12 +249,18 @@ class VirtualEnv(object): args = [self.envconfig.envpython, str(setup_py), "--name"] env = self._get_os_environ() output = action.popen(args, cwd=setupdir, redirect=False, returnout=True, env=env) - name = output.strip() - args = [self.envconfig.envpython, "-c", "import sys; print(sys.path)"] + name = next( + (i for i in output.split("\n") if i and not i.startswith("pydev debugger:")), "" + ) + args = [ + self.envconfig.envpython, + "-c", + "import sys; import json; print(json.dumps(sys.path))", + ] out = action.popen(args, redirect=False, returnout=True, env=env) try: - sys_path = ast.literal_eval(out.strip()) - except SyntaxError: + sys_path = json.loads(out) + except ValueError: sys_path = [] egg_info_fname = ".".join((name, "egg-info")) for d in reversed(sys_path): @@ -386,22 +407,34 @@ class VirtualEnv(object): env["VIRTUAL_ENV"] = str(self.path) return env - def test(self, redirect=False): - with self.session.newaction(self, "runtests") as action: - self.status = 0 - self.session.make_emptydir(self.envconfig.envtmpdir) - self.envconfig.envtmpdir.ensure(dir=1) + def test( + self, + redirect=False, + name="runtests", + commands=None, + ignore_outcome=None, + ignore_errors=None, + display_hash_seed=False, + ): + if commands is None: + commands = self.envconfig.commands + if ignore_outcome is None: + ignore_outcome = self.envconfig.ignore_outcome + if ignore_errors is None: + ignore_errors = self.envconfig.ignore_errors + with self.session.newaction(self, name) as action: cwd = self.envconfig.changedir - env = self._get_os_environ(is_test_command=True) - # Display PYTHONHASHSEED to assist with reproducibility. - action.setactivity("runtests", "PYTHONHASHSEED={!r}".format(env.get("PYTHONHASHSEED"))) - for i, argv in enumerate(self.envconfig.commands): + if display_hash_seed: + env = self._get_os_environ(is_test_command=True) + # Display PYTHONHASHSEED to assist with reproducibility. + action.setactivity(name, "PYTHONHASHSEED={!r}".format(env.get("PYTHONHASHSEED"))) + for i, argv in enumerate(commands): # have to make strings as _pcall changes argv[0] to a local() # happens if the same environment is invoked twice message = "commands[{}] | {}".format( i, " ".join([pipes.quote(str(x)) for x in argv]) ) - action.setactivity("runtests", message) + action.setactivity(name, message) # check to see if we need to ignore the return code # if so, we need to alter the command line arguments if argv[0].startswith("-"): @@ -423,7 +456,7 @@ class VirtualEnv(object): is_test_command=True, ) except tox.exception.InvocationError as err: - if self.envconfig.ignore_outcome: + if ignore_outcome: msg = "command failed but result from testenv is ignored\ncmd:" self.session.report.warning("{} {}".format(msg, err)) self.status = "ignored failed command" @@ -431,7 +464,7 @@ class VirtualEnv(object): self.session.report.error(str(err)) self.status = "commands failed" - if not self.envconfig.ignore_errors: + if not ignore_errors: break # Don't process remaining commands except KeyboardInterrupt: self.status = "keyboardinterrupt" @@ -532,6 +565,32 @@ def tox_runtest(venv, redirect): return True # Return non-None to indicate plugin has completed [email protected] +def tox_runtest_pre(venv): + venv.status = 0 + venv.session.make_emptydir(venv.envconfig.envtmpdir) + venv.envconfig.envtmpdir.ensure(dir=1) + venv.test( + name="run-test-pre", + commands=venv.envconfig.commands_pre, + redirect=False, + ignore_outcome=False, + ignore_errors=False, + display_hash_seed=True, + ) + + [email protected] +def tox_runtest_post(venv): + venv.test( + name="run-test-post", + commands=venv.envconfig.commands_post, + redirect=False, + ignore_outcome=False, + ignore_errors=False, + ) + + @tox.hookimpl def tox_runenvreport(venv, action): # write out version dependency information diff --git a/tox.ini b/tox.ini index 53311495..f14c5bf0 100644 --- a/tox.ini +++ b/tox.ini @@ -107,7 +107,20 @@ branch = true [coverage:report] skip_covered = True show_missing = True -exclude_lines = if __name__ == ["']__main__["']: +exclude_lines = + # Have to re-enable the standard pragma + \#\s*pragma: no cover + # We optionally substitute this + ${COVERAGE_IGNORE_WINDOWS} + + # Don't complain if tests don't hit defensive assertion code: + ^\s*raise AssertionError\b + ^\s*raise NotImplementedError\b + ^\s*return NotImplemented\b + ^\s*raise$ + + # Don't complain if non-runnable code isn't run: + ^if __name__ == ['"]__main__['"]:$ [coverage:paths] source = src/tox
Have pre and post test run in config New summary: Plugins can already run pre and post test commands via https://tox.readthedocs.io/en/latest/plugins.html#tox.hookspecs.tox_runtest_post and https://tox.readthedocs.io/en/latest/plugins.html#tox.hookspecs.tox_runtest_pre. We should allow users to add this inside their config file too, so they don't have to create tox plugins for one time behaviours (e.g. setup/cleanup operations of dependencies).
tox-dev/tox
diff --git a/src/tox/_pytestplugin.py b/src/tox/_pytestplugin.py index f0f35ec7..c79a5fd1 100644 --- a/src/tox/_pytestplugin.py +++ b/src/tox/_pytestplugin.py @@ -69,8 +69,8 @@ def cmd(request, capfd, monkeypatch): request.addfinalizer(py.path.local().chdir) def run(*argv): - key = str(b"PYTHONPATH") - python_paths = (i for i in (str(os.getcwd()), os.getenv(key)) if i) + key = str("PYTHONPATH") + python_paths = (i for i in (os.getcwd(), os.getenv(key)) if i) monkeypatch.setenv(key, os.pathsep.join(python_paths)) with RunResult(capfd, argv) as result: @@ -403,34 +403,95 @@ def mock_venv(monkeypatch): Note: because we inherit, to keep things sane you must call the py environment and only that; and cannot install any packages. """ + # first ensure we have a clean python path + monkeypatch.delenv("PYTHONPATH", raising=False) + + # object to collect some data during the execution + class Result(object): + def __init__(self): + self.popens = [] + self.cwd = None + self.session = None + + res = Result() + + # convince tox that the current running virtual environment is already the env we would create class ProxyCurrentPython: @classmethod def readconfig(cls, path): - assert path.dirname.endswith("{}py".format(os.sep)) - return CreationConfig( - md5=getdigest(sys.executable), - python=sys.executable, - version=tox.__version__, - sitepackages=False, - usedevelop=False, - deps=[], - alwayscopy=False, - ) + if path.dirname.endswith("{}py".format(os.sep)): + return CreationConfig( + md5=getdigest(sys.executable), + python=sys.executable, + version=tox.__version__, + sitepackages=False, + usedevelop=False, + deps=[], + alwayscopy=False, + ) + elif path.dirname.endswith("{}.package".format(os.sep)): + return CreationConfig( + md5=getdigest(sys.executable), + python=sys.executable, + version=tox.__version__, + sitepackages=False, + usedevelop=False, + deps=[(getdigest(""), "setuptools >= 35.0.2"), (getdigest(""), "wheel")], + alwayscopy=False, + ) + assert False # pragma: no cover monkeypatch.setattr(CreationConfig, "readconfig", ProxyCurrentPython.readconfig) + # provide as Python the current python executable def venv_lookup(venv, name): assert name == "python" + venv.envconfig.envdir = py.path.local(sys.executable).join("..", "..") return sys.executable monkeypatch.setattr(VirtualEnv, "_venv_lookup", venv_lookup) + # don't allow overriding the tox config data for the host Python + def finish_venv(self): + return + + monkeypatch.setattr(VirtualEnv, "finish", finish_venv) + + # we lie that it's an environment with no packages in it @tox.hookimpl def tox_runenvreport(venv, action): return [] monkeypatch.setattr(venv, "tox_runenvreport", tox_runenvreport) + # intercept the build session to save it and we intercept the popen invocations + prev_build = tox.session.build_session + + def build_session(config): + res.session = prev_build(config) + res._popen = res.session.popen + monkeypatch.setattr(res.session, "popen", popen) + return res.session + + monkeypatch.setattr(tox.session, "build_session", build_session) + + # collect all popen calls + def popen(cmd, **kwargs): + # we don't want to perform installation of new packages, just replace with an always ok cmd + if "pip" in cmd and "install" in cmd: + cmd = ["python", "-c", "print({!r})".format(cmd)] + activity_id = res.session._actions[-1].id + activity_name = res.session._actions[-1].activity + try: + ret = res._popen(cmd, **kwargs) + except tox.exception.InvocationError as exception: # pragma: no cover + ret = exception # pragma: no cover + finally: + res.popens.append((activity_id, activity_name, kwargs.get("env"), ret, cmd)) + return ret + + return res + @pytest.fixture(scope="session") def current_tox_py(): diff --git a/tests/unit/session/test_session.py b/tests/unit/session/test_session.py index 565728d5..2436b1e6 100644 --- a/tests/unit/session/test_session.py +++ b/tests/unit/session/test_session.py @@ -1,5 +1,7 @@ import os import re +import sys +import textwrap import uuid from threading import Thread @@ -304,3 +306,72 @@ def assert_popen_env(res): if tox_id != "tox": assert env["TOX_ENV_NAME"] == tox_id assert env["TOX_ENV_DIR"] == os.path.join(res.cwd, ".tox", tox_id) + + +def test_command_prev_post_ok(cmd, initproj, mock_venv): + initproj( + "pkg_command_test_123-0.7", + filedefs={ + "tox.ini": """ + [tox] + envlist = py + + [testenv] + commands_pre = python -c 'print("pre")' + commands = python -c 'print("command")' + commands_post = python -c 'print("post")' + """ + }, + ) + result = cmd() + assert result.ret == 0 + expected = textwrap.dedent( + """ + py run-test-pre: commands[0] | python -c 'print("pre")' + pre + py runtests: commands[0] | python -c 'print("command")' + command + py run-test-post: commands[0] | python -c 'print("post")' + post + ___________________________________ summary ___________________________________{} + py: commands succeeded + congratulations :) + """.format( + "_" if sys.platform != "win32" else "" + ) + ) + actual = result.out.replace(os.linesep, "\n") + assert expected in actual + + +def test_command_prev_fail_command_skip_post_run(cmd, initproj, mock_venv): + initproj( + "pkg_command_test_123-0.7", + filedefs={ + "tox.ini": """ + [tox] + envlist = py + + [testenv] + commands_pre = python -c 'raise SystemExit(2)' + commands = python -c 'print("command")' + commands_post = python -c 'print("post")' + """ + }, + ) + result = cmd() + assert result.ret == 1 + expected = textwrap.dedent( + """ + py run-test-pre: commands[0] | python -c 'raise SystemExit(2)' + ERROR: InvocationError for command '{} -c raise SystemExit(2)' (exited with code 2) + py run-test-post: commands[0] | python -c 'print("post")' + post + ___________________________________ summary ___________________________________{} + ERROR: py: commands failed + """.format( + sys.executable.replace("\\", "\\\\"), "_" if sys.platform != "win32" else "" + ) + ) + actual = result.out.replace(os.linesep, "\n") + assert expected in actual diff --git a/tests/unit/test_package.py b/tests/unit/test_package.py index ea885544..81605e86 100644 --- a/tests/unit/test_package.py +++ b/tests/unit/test_package.py @@ -229,3 +229,28 @@ def test_package_isolated_toml_bad_backend(initproj, cmd): build-backend = [] """, ) + + +def test_dist_exists_version_change(mock_venv, initproj, cmd): + base = initproj( + "package_toml-{}".format("0.1"), + filedefs={ + "tox.ini": """ + [tox] + isolated_build = true + """, + "pyproject.toml": """ + [build-system] + requires = ["setuptools >= 35.0.2"] + build-backend = 'setuptools.build_meta' + """, + }, + ) + result = cmd("-e", "py") + assert result.ret == 0, result.out + + new_code = base.join("setup.py").read_text("utf-8").replace("0.1", "0.2") + base.join("setup.py").write_text(new_code, "utf-8") + + result = cmd("-e", "py") + assert result.ret == 0, result.out diff --git a/tests/unit/test_venv.py b/tests/unit/test_venv.py index 5b830956..6c89119f 100644 --- a/tests/unit/test_venv.py +++ b/tests/unit/test_venv.py @@ -344,8 +344,8 @@ def test_test_hashseed_is_in_output(newmocksession, monkeypatch): venv = mocksession.getenv("python") action = mocksession.newaction(venv, "update") venv.update(action) - venv.test() - mocksession.report.expect("verbosity0", "runtests: PYTHONHASHSEED='{}'".format(seed)) + tox.venv.tox_runtest_pre(venv) + mocksession.report.expect("verbosity0", "run-test-pre: PYTHONHASHSEED='{}'".format(seed)) def test_test_runtests_action_command_is_in_output(newmocksession): @@ -389,6 +389,7 @@ def test_install_command_not_installed(newmocksession): """, ) venv = mocksession.getenv("python") + venv.status = 0 venv.test() mocksession.report.expect("warning", "*test command found but not*") assert venv.status == 0 diff --git a/tests/unit/test_z_cmdline.py b/tests/unit/test_z_cmdline.py index 2bcd57ae..95e628cb 100644 --- a/tests/unit/test_z_cmdline.py +++ b/tests/unit/test_z_cmdline.py @@ -639,6 +639,7 @@ def test_alwayscopy_default(initproj, cmd): assert "virtualenv --always-copy" not in result.out [email protected]("sys.platform == 'win32'") def test_empty_activity_ignored(initproj, cmd): initproj( "example123", @@ -655,6 +656,7 @@ def test_empty_activity_ignored(initproj, cmd): assert "installed:" not in result.out [email protected]("sys.platform == 'win32'") def test_empty_activity_shown_verbose(initproj, cmd): initproj( "example123",
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 9 }
3.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 distlib==0.3.9 exceptiongroup==1.2.2 execnet==2.0.2 filelock==3.12.2 importlib-metadata==6.7.0 iniconfig==2.0.0 packaging==24.0 platformdirs==4.0.0 pluggy==0.13.1 py==1.11.0 pytest==7.4.4 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 six==1.17.0 toml==0.10.2 tomli==2.0.1 -e git+https://github.com/tox-dev/tox.git@cf6afcecaca22df7b509facaea43c09a15570f75#egg=tox typing_extensions==4.7.1 virtualenv==20.26.6 zipp==3.15.0
name: tox channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.2.7 - distlib==0.3.9 - exceptiongroup==1.2.2 - execnet==2.0.2 - filelock==3.12.2 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - packaging==24.0 - platformdirs==4.0.0 - pluggy==0.13.1 - py==1.11.0 - pytest==7.4.4 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - six==1.17.0 - toml==0.10.2 - tomli==2.0.1 - tox==3.3.1.dev32+gcf6afcec - typing-extensions==4.7.1 - virtualenv==20.26.6 - zipp==3.15.0 prefix: /opt/conda/envs/tox
[ "tests/unit/session/test_session.py::test_command_prev_post_ok", "tests/unit/session/test_session.py::test_command_prev_fail_command_skip_post_run", "tests/unit/test_venv.py::test_test_hashseed_is_in_output" ]
[ "tests/unit/test_package.py::test_package_isolated_toml_no_build_system", "tests/unit/test_package.py::test_package_isolated_toml_no_requires", "tests/unit/test_package.py::test_package_isolated_toml_no_backend", "tests/unit/test_package.py::test_package_isolated_toml_bad_requires", "tests/unit/test_package.py::test_package_isolated_toml_bad_backend", "tests/unit/test_venv.py::test_create", "tests/unit/test_venv.py::test_install_deps_wildcard", "tests/unit/test_venv.py::test_install_deps_indexserver", "tests/unit/test_venv.py::test_install_deps_pre", "tests/unit/test_venv.py::test_installpkg_indexserver", "tests/unit/test_venv.py::test_install_recreate", "tests/unit/test_venv.py::test_install_sdist_extras", "tests/unit/test_venv.py::test_develop_extras", "tests/unit/test_venv.py::test_install_python3", "tests/unit/test_venv.py::TestCreationConfig::test_python_recreation", "tests/unit/test_venv.py::TestVenvTest::test_envbindir_path", "tests/unit/test_venv.py::TestVenvTest::test_pythonpath_usage", "tests/unit/test_venv.py::test_env_variables_added_to_pcall", "tests/unit/test_venv.py::test_installpkg_no_upgrade", "tests/unit/test_venv.py::test_install_command_verbosity[0-0]", "tests/unit/test_venv.py::test_install_command_verbosity[1-0]", "tests/unit/test_venv.py::test_install_command_verbosity[2-0]", "tests/unit/test_venv.py::test_install_command_verbosity[3-1]", "tests/unit/test_venv.py::test_install_command_verbosity[4-2]", "tests/unit/test_venv.py::test_install_command_verbosity[5-3]", "tests/unit/test_venv.py::test_install_command_verbosity[6-3]", "tests/unit/test_venv.py::test_installpkg_upgrade", "tests/unit/test_venv.py::test_run_install_command", "tests/unit/test_venv.py::test_run_custom_install_command", "tests/unit/test_z_cmdline.py::test_report_protocol", "tests/unit/test_z_cmdline.py::test_venv_special_chars_issue252" ]
[ "tests/unit/session/test_session.py::test__resolve_pkg_missing_directory", "tests/unit/session/test_session.py::test__resolve_pkg_missing_directory_in_distshare", "tests/unit/session/test_session.py::test__resolve_pkg_multiple_valid_versions", "tests/unit/session/test_session.py::test__resolve_pkg_with_invalid_version", "tests/unit/session/test_session.py::test__resolve_pkg_with_alpha_version", "tests/unit/session/test_session.py::test__resolve_pkg_doubledash", "tests/unit/session/test_session.py::test_minversion", "tests/unit/session/test_session.py::test_tox_parallel_build_safe", "tests/unit/session/test_session.py::test_skip_sdist", "tests/unit/session/test_session.py::test_skip_install_skip_package", "tests/unit/session/test_session.py::test_venv_filter_empty_all_active", "tests/unit/session/test_session.py::test_venv_filter_match_all_none_active", "tests/unit/session/test_session.py::test_venv_filter_match_some_some_active", "tests/unit/session/test_session.py::test_tox_env_var_flags_inserted_non_isolated", "tests/unit/session/test_session.py::test_tox_env_var_flags_inserted_isolated", "tests/unit/test_package.py::test_make_sdist", "tests/unit/test_package.py::test_make_sdist_distshare", "tests/unit/test_package.py::test_sdistonly", "tests/unit/test_package.py::test_separate_sdist_no_sdistfile", "tests/unit/test_package.py::test_separate_sdist", "tests/unit/test_package.py::test_sdist_latest", "tests/unit/test_package.py::test_installpkg", "tests/unit/test_package.py::test_package_isolated_no_pyproject_toml", "tests/unit/test_package.py::test_dist_exists_version_change", "tests/unit/test_venv.py::test_getdigest", "tests/unit/test_venv.py::test_getsupportedinterpreter", "tests/unit/test_venv.py::test_commandpath_venv_precedence", "tests/unit/test_venv.py::test_create_sitepackages", "tests/unit/test_venv.py::test_env_variables_added_to_needs_reinstall", "tests/unit/test_venv.py::test_test_runtests_action_command_is_in_output", "tests/unit/test_venv.py::test_install_error", "tests/unit/test_venv.py::test_install_command_not_installed", "tests/unit/test_venv.py::test_install_command_whitelisted", "tests/unit/test_venv.py::test_install_command_not_installed_bash", "tests/unit/test_venv.py::TestCreationConfig::test_basic", "tests/unit/test_venv.py::TestCreationConfig::test_matchingdependencies", "tests/unit/test_venv.py::TestCreationConfig::test_matchingdependencies_file", "tests/unit/test_venv.py::TestCreationConfig::test_matchingdependencies_latest", "tests/unit/test_venv.py::TestCreationConfig::test_dep_recreation", "tests/unit/test_venv.py::TestCreationConfig::test_develop_recreation", "tests/unit/test_venv.py::test_command_relative_issue36", "tests/unit/test_venv.py::test_ignore_outcome_failing_cmd", "tests/unit/test_venv.py::test_tox_testenv_create", "tests/unit/test_venv.py::test_tox_testenv_pre_post", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_empty_instance", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_empty_interpreter", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_empty_interpreter_ws", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_non_utf8", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_simple", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_ws", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_arg", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_interpreter_args", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_real", "tests/unit/test_venv.py::test_tox_testenv_interpret_shebang_long_example", "tests/unit/test_z_cmdline.py::TestSession::test_log_pcall", "tests/unit/test_z_cmdline.py::TestSession::test_summary_status", "tests/unit/test_z_cmdline.py::TestSession::test_getvenv", "tests/unit/test_z_cmdline.py::test_notoxini_help_still_works", "tests/unit/test_z_cmdline.py::test_notoxini_help_ini_still_works", "tests/unit/test_z_cmdline.py::test_envdir_equals_toxini_errors_out", "tests/unit/test_z_cmdline.py::test_run_custom_install_command_error", "tests/unit/test_z_cmdline.py::test_unknown_interpreter_and_env", "tests/unit/test_z_cmdline.py::test_unknown_interpreter", "tests/unit/test_z_cmdline.py::test_skip_platform_mismatch", "tests/unit/test_z_cmdline.py::test_skip_unknown_interpreter", "tests/unit/test_z_cmdline.py::test_skip_unknown_interpreter_result_json", "tests/unit/test_z_cmdline.py::test_unknown_dep", "tests/unit/test_z_cmdline.py::test_unknown_environment", "tests/unit/test_z_cmdline.py::test_minimal_setup_py_empty", "tests/unit/test_z_cmdline.py::test_minimal_setup_py_comment_only", "tests/unit/test_z_cmdline.py::test_minimal_setup_py_non_functional", "tests/unit/test_z_cmdline.py::test_sdist_fails", "tests/unit/test_z_cmdline.py::test_no_setup_py_exits", "tests/unit/test_z_cmdline.py::test_package_install_fails", "tests/unit/test_z_cmdline.py::test_toxuone_env", "tests/unit/test_z_cmdline.py::test_different_config_cwd", "tests/unit/test_z_cmdline.py::test_json", "tests/unit/test_z_cmdline.py::test_developz", "tests/unit/test_z_cmdline.py::test_usedevelop", "tests/unit/test_z_cmdline.py::test_usedevelop_mixed", "tests/unit/test_z_cmdline.py::test_test_usedevelop[.]", "tests/unit/test_z_cmdline.py::test_test_usedevelop[src]", "tests/unit/test_z_cmdline.py::test_alwayscopy", "tests/unit/test_z_cmdline.py::test_alwayscopy_default", "tests/unit/test_z_cmdline.py::test_empty_activity_ignored", "tests/unit/test_z_cmdline.py::test_empty_activity_shown_verbose", "tests/unit/test_z_cmdline.py::test_test_piphelp", "tests/unit/test_z_cmdline.py::test_notest", "tests/unit/test_z_cmdline.py::test_PYC", "tests/unit/test_z_cmdline.py::test_env_VIRTUALENV_PYTHON", "tests/unit/test_z_cmdline.py::test_envsitepackagesdir", "tests/unit/test_z_cmdline.py::test_envsitepackagesdir_skip_missing_issue280", "tests/unit/test_z_cmdline.py::test_verbosity[]", "tests/unit/test_z_cmdline.py::test_verbosity[-v]", "tests/unit/test_z_cmdline.py::test_verbosity[-vv]", "tests/unit/test_z_cmdline.py::test_envtmpdir", "tests/unit/test_z_cmdline.py::test_missing_env_fails", "tests/unit/test_z_cmdline.py::test_tox_console_script", "tests/unit/test_z_cmdline.py::test_tox_quickstart_script", "tests/unit/test_z_cmdline.py::test_tox_cmdline_no_args", "tests/unit/test_z_cmdline.py::test_tox_cmdline_args", "tests/unit/test_z_cmdline.py::test_exit_code[0]", "tests/unit/test_z_cmdline.py::test_exit_code[6]" ]
[]
MIT License
3,093
[ "changelog/1004.feature.rst", "doc/config.rst", "changelog/1003.bugfix.rst", "setup.py", "src/tox/package.py", "src/tox/venv.py", "src/tox/session.py", ".gitignore", "changelog/167.feature.rst", "pyproject.toml", "tox.ini", "src/tox/config.py" ]
[ "changelog/1004.feature.rst", "doc/config.rst", "changelog/1003.bugfix.rst", "setup.py", "src/tox/package.py", "src/tox/venv.py", "src/tox/session.py", ".gitignore", "changelog/167.feature.rst", "pyproject.toml", "tox.ini", "src/tox/config.py" ]
pydicom__deid-69
f046a885f44f2b65e92b2e31719e95aacc9d059c
2018-09-18 21:38:37
2c23ec2c4443e4c8d23adc026b5dc1fde9f905c3
diff --git a/CHANGELOG.md b/CHANGELOG.md index 9fa0995..0a00e65 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ and **Merged pull requests**. Critical items to know are: Referenced versions in headers are tagged on Github, in parentheses are for pypi. ## [vxx](https://github.com/pydicom/deid/tree/master) (master) + - need to clean up temporary directory (mkdtemp), issue #68 (0.1.18) - fixing issue #65, save for compressed data (0.1.17) - matplotlib must be less than or equal to 2.1.2 for install (0.1.16) - fixing bug with clean coordinate flipping rectangle diff --git a/deid/dicom/pixels/clean.py b/deid/dicom/pixels/clean.py index b01417e..57e0642 100644 --- a/deid/dicom/pixels/clean.py +++ b/deid/dicom/pixels/clean.py @@ -24,10 +24,10 @@ SOFTWARE. from deid.config import DeidRecipe from deid.logger import bot +from deid.utils import get_temporary_name from pydicom import read_file import matplotlib matplotlib.use('pdf') -import tempfile import os import re @@ -52,7 +52,7 @@ class DicomCleaner(): force=True): if output_folder is None: - output_folder = tempfile.mkdtemp() + output_folder = get_temporary_name(prefix="clean") if font is None: font = self.default_font() @@ -141,15 +141,25 @@ class DicomCleaner(): def _get_clean_name(self, output_folder, extension='dcm'): '''return a full path to an output file, with custom folder and - extension + extension. If the output folder isn't yet created, make it. + + Parameters + ========== + output_folder: the output folder to create, will be created if doesn't + exist. + extension: the extension of the file to create a name for, should + not start with "." ''' if output_folder is None: output_folder = self.output_folder + if not os.path.exists(output_folder): + bot.debug('Creating output folder %s' % output_folder) + os.mkdir(output_folder) + basename = re.sub('[.]dicom|[.]dcm', '', os.path.basename(self.dicom_file)) return "%s/cleaned-%s.%s" %(output_folder, basename, extension) - def save_png(self, output_folder=None, image_type="cleaned", title=None): '''save an original or cleaned dicom as png to disk. Default image_format is "cleaned" and can be set @@ -157,8 +167,8 @@ class DicomCleaner(): flagged) the cleaned image is just a copy of original ''' from matplotlib import pyplot as plt - - if hasattr(self,image_type): + + if hasattr(self, image_type): png_file = self._get_clean_name(output_folder, 'png') plt = self.get_figure(image_type=image_type, title=title) plt.savefig(png_file) diff --git a/deid/utils.py b/deid/utils.py index 636a454..4af24b1 100644 --- a/deid/utils.py +++ b/deid/utils.py @@ -29,7 +29,7 @@ import json import os import re import requests -import json +import tempfile from deid.logger import bot from collections import OrderedDict import sys @@ -40,9 +40,9 @@ if sys.version_info[0] < 3: from exceptions import OSError -###################################################################################### +################################################################################ # Local commands and requests -###################################################################################### +################################################################################ def get_installdir(): '''get_installdir returns the installation directory of the application @@ -50,10 +50,30 @@ def get_installdir(): return os.path.abspath(os.path.dirname(__file__)) +def get_temporary_name(prefix=None, ext=None): + '''get a temporary name, can be used for a directory or file. This does so + without creating the file, and adds an optional prefix + + Parameters + ========== + prefix: if defined, add the prefix after deid + ext: if defined, return the file extension appended. Do not specify "." + ''' + deid_prefix = 'deid-' + if prefix: + deid_prefix = 'deid-%s-' % prefix + + tmpname = os.path.join(tempfile.gettempdir(), + '%s%s' % (deid_prefix, + next(tempfile._get_candidate_names()))) + if ext: + tmpname = '%s.%s' % (tmpname, ext) + return tmpname + -############################################################################ -## FILE OPERATIONS ######################################################### -############################################################################ +################################################################################ +## FILE OPERATIONS ############################################################# +################################################################################ def write_file(filename,content,mode="w"): '''write_file will open a file, "filename" and write content, "content" diff --git a/deid/version.py b/deid/version.py index da29ba8..e6987c8 100644 --- a/deid/version.py +++ b/deid/version.py @@ -1,5 +1,5 @@ ''' -Copyright (c) 2017 Vanessa Sochat +Copyright (c) 2017-2018 Vanessa Sochat Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -20,7 +20,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' -__version__ = "0.1.17" +__version__ = "0.1.18" AUTHOR = 'Vanessa Sochat' AUTHOR_EMAIL = '[email protected]' NAME = 'deid'
Runaway temp folder appears to be leaked for every clean operation. In clean.py, line 54-55 a temp folder is created and never deleted: if output_folder is None: output_folder = tempfile.mkdtemp() The user of mkdtemp() is responsible for deleting the temporary directory and its contents when done with it.
pydicom/deid
diff --git a/deid/tests/test_utils.py b/deid/tests/test_utils.py index 9d6fd37..6cc5905 100644 --- a/deid/tests/test_utils.py +++ b/deid/tests/test_utils.py @@ -28,6 +28,7 @@ SOFTWARE. ''' from deid.utils import get_installdir + from numpy.testing import ( assert_array_equal, assert_almost_equal, @@ -52,6 +53,19 @@ class TestUtils(unittest.TestCase): shutil.rmtree(self.tmpdir) print("\n######################END########################") + def test_get_temporary_name(self): + '''test_get_temporary_name will test the generation of a temporary + file name. + ''' + from deid.utils import get_temporary_name + print("Testing utils.get_temporary_name...") + tmpname = get_temporary_name() + self.assertTrue(not os.path.exists(tmpname)) + self.assertTrue('deid' in tmpname) + tmpname = get_temporary_name(prefix='clean') + self.assertTrue('deid-clean' in tmpname) + tmpname = get_temporary_name(ext='.dcm') + self.assertTrue(tmpname.endswith('.dcm')) def test_write_read_files(self): '''test_write_read_files will test the functions
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 4 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pylint", "pydicom", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.5", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==2.11.7 attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 charset-normalizer==2.0.12 cycler==0.11.0 -e git+https://github.com/pydicom/deid.git@f046a885f44f2b65e92b2e31719e95aacc9d059c#egg=deid dill==0.3.4 idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work isort==5.10.1 lazy-object-proxy==1.7.1 matplotlib==2.1.2 mccabe==0.7.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work numpy==1.19.5 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work platformdirs==2.4.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pydicom==2.3.1 Pygments==2.14.0 pylint==2.13.9 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.27.1 retrying==1.3.4 simplejson==3.20.1 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 typed-ast==1.5.5 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.21.1 validator.py==1.3.0 wrapt==1.16.0 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: deid channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==2.11.7 - charset-normalizer==2.0.12 - cycler==0.11.0 - dill==0.3.4 - idna==3.10 - isort==5.10.1 - lazy-object-proxy==1.7.1 - matplotlib==2.1.2 - mccabe==0.7.0 - numpy==1.19.5 - platformdirs==2.4.0 - pydicom==2.3.1 - pygments==2.14.0 - pylint==2.13.9 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.27.1 - retrying==1.3.4 - simplejson==3.20.1 - six==1.17.0 - tomli==1.2.3 - typed-ast==1.5.5 - urllib3==1.21.1 - validator-py==1.3.0 - wrapt==1.16.0 prefix: /opt/conda/envs/deid
[ "deid/tests/test_utils.py::TestUtils::test_get_temporary_name" ]
[]
[ "deid/tests/test_utils.py::TestUtils::test_get_installdir", "deid/tests/test_utils.py::TestUtils::test_recursive_find", "deid/tests/test_utils.py::TestUtils::test_recursive_find_as_list", "deid/tests/test_utils.py::TestUtils::test_write_read_files" ]
[]
MIT License
3,094
[ "deid/utils.py", "deid/version.py", "CHANGELOG.md", "deid/dicom/pixels/clean.py" ]
[ "deid/utils.py", "deid/version.py", "CHANGELOG.md", "deid/dicom/pixels/clean.py" ]