instance_id
stringlengths
10
57
base_commit
stringlengths
40
40
created_at
stringdate
2014-04-30 14:58:36
2025-04-30 20:14:11
environment_setup_commit
stringlengths
40
40
hints_text
stringlengths
0
273k
patch
stringlengths
251
7.06M
problem_statement
stringlengths
11
52.5k
repo
stringlengths
7
53
test_patch
stringlengths
231
997k
meta
dict
version
stringclasses
851 values
install_config
dict
requirements
stringlengths
93
34.2k
environment
stringlengths
760
20.5k
FAIL_TO_PASS
listlengths
1
9.39k
FAIL_TO_FAIL
listlengths
0
2.69k
PASS_TO_PASS
listlengths
0
7.87k
PASS_TO_FAIL
listlengths
0
192
license_name
stringclasses
55 values
__index_level_0__
int64
0
21.4k
before_filepaths
listlengths
1
105
after_filepaths
listlengths
1
105
mjs__imapclient-291
f02732d6a55f3f3b55ea20a5451693d7869cd419
2017-09-26 11:50:15
f2f27e5cd6fe83ec68ba8087fb74930727ac4159
NicolasLM: @mjs There is an undocumented `ParseError` in `imapclient.response_parser`. Should it be replaced by this new error? mlorant: Could you add the following test in `test_imapclient.py` ? ``` def test_tagged_response_with_parse_error(self): client = self.client client._imap.tagged_commands = {sentinel.tag: None} client._imap._get_response = lambda: b'NOT-A-STAR 99 EXISTS' with self.assertRaises(ProtocolException): client._consume_until_tagged_response(sentinel.tag, b'IDLE') ``` It may not be perfect (With a star, the command does not finish since the response is not complete) but as long as it raises the ProtocolError... NicolasLM: Test case added, thanks. mjs: @NicolasLM : Yes, please have the parser raise ProtocolError instead of ParseError. I've looked at where ParseError is used and that makes sense. NicolasLM: Done.
diff --git a/doc/src/releases.rst b/doc/src/releases.rst index e2a07dd..95d087c 100644 --- a/doc/src/releases.rst +++ b/doc/src/releases.rst @@ -21,6 +21,8 @@ Changed are not used anymore. - More precise exceptions available in `imapclient.exceptions` are raised when an error happens +- `imapclient.exceptions.ProtocolError` is now raised when the reply from a + remote server violates the IMAP protocol. - GMail labels are now strings instead of bytes in Python 3. Fixed diff --git a/imapclient/exceptions.py b/imapclient/exceptions.py index 1130da1..26fd51c 100644 --- a/imapclient/exceptions.py +++ b/imapclient/exceptions.py @@ -36,3 +36,7 @@ class InvalidCriteriaError(IMAPClientError): A command using a search criteria failed, probably due to a syntax error in the criteria string. """ + + +class ProtocolError(IMAPClientError): + """The server replied with a response that violates the IMAP protocol.""" diff --git a/imapclient/imapclient.py b/imapclient/imapclient.py index 58fae81..fea196b 100644 --- a/imapclient/imapclient.py +++ b/imapclient/imapclient.py @@ -24,7 +24,7 @@ from . import tls from .datetime_util import datetime_to_INTERNALDATE, format_criteria_date from .imap_utf7 import encode as encode_utf7, decode as decode_utf7 from .response_parser import parse_response, parse_message_list, parse_fetch_response -from .util import to_bytes, to_unicode +from .util import to_bytes, to_unicode, assert_imap_protocol xrange = moves.xrange if PY3: @@ -1526,7 +1526,7 @@ def _maybe_int_to_bytes(val): def _parse_untagged_response(text): - assert text.startswith(b'* ') + assert_imap_protocol(text.startswith(b'* ')) text = text[2:] if text.startswith((b'OK ', b'NO ')): return tuple(text.split(b' ', 1)) diff --git a/imapclient/response_lexer.py b/imapclient/response_lexer.py index 88e09fb..2504c65 100644 --- a/imapclient/response_lexer.py +++ b/imapclient/response_lexer.py @@ -13,6 +13,8 @@ from __future__ import unicode_literals import six +from .util import assert_imap_protocol + __all__ = ["TokenSource"] CTRL_CHARS = frozenset(c for c in range(32)) @@ -97,7 +99,7 @@ class Lexer(object): if nextchar in whitespace: yield token elif nextchar == DOUBLE_QUOTE: - assert not token + assert_imap_protocol(not token) token.append(nextchar) token.extend(read_until(stream_i, nextchar)) yield token @@ -138,7 +140,7 @@ class LiteralHandlingIter: # A 'record' with a string which includes a literal marker, and # the literal itself. self.src_text = resp_record[0] - assert self.src_text.endswith(b"}"), self.src_text + assert_imap_protocol(self.src_text.endswith(b"}"), self.src_text) self.literal = resp_record[1] else: # just a line with no literals. diff --git a/imapclient/response_parser.py b/imapclient/response_parser.py index 1b65ad4..4331be6 100644 --- a/imapclient/response_parser.py +++ b/imapclient/response_parser.py @@ -22,14 +22,11 @@ import six from .datetime_util import parse_to_datetime from .response_lexer import TokenSource from .response_types import BodyData, Envelope, Address, SearchIds +from .exceptions import ProtocolError xrange = six.moves.xrange -__all__ = ['parse_response', 'parse_message_list', 'ParseError'] - - -class ParseError(ValueError): - pass +__all__ = ['parse_response', 'parse_message_list'] def parse_response(data): @@ -93,11 +90,11 @@ def gen_parsed_response(text): try: for token in src: yield atom(src, token) - except ParseError: + except ProtocolError: raise except ValueError: _, err, _ = sys.exc_info() - raise ParseError("%s: %s" % (str(err), token)) + raise ProtocolError("%s: %s" % (str(err), token)) def parse_fetch_response(text, normalise_times=True, uid_is_key=True): @@ -121,12 +118,12 @@ def parse_fetch_response(text, normalise_times=True, uid_is_key=True): try: msg_response = six.next(response) except StopIteration: - raise ParseError('unexpected EOF') + raise ProtocolError('unexpected EOF') if not isinstance(msg_response, tuple): - raise ParseError('bad response type: %s' % repr(msg_response)) + raise ProtocolError('bad response type: %s' % repr(msg_response)) if len(msg_response) % 2: - raise ParseError('uneven number of response items: %s' % repr(msg_response)) + raise ProtocolError('uneven number of response items: %s' % repr(msg_response)) # always return the sequence of the message, so it is available # even if we return keyed by UID. @@ -159,7 +156,7 @@ def _int_or_error(value, error_text): try: return int(value) except (TypeError, ValueError): - raise ParseError('%s: %s' % (error_text, repr(value))) + raise ProtocolError('%s: %s' % (error_text, repr(value))) def _convert_INTERNALDATE(date_string, normalise_times=True): @@ -212,9 +209,9 @@ def atom(src, token): literal_len = int(token[1:-1]) literal_text = src.current_literal if literal_text is None: - raise ParseError('No literal corresponds to %r' % token) + raise ProtocolError('No literal corresponds to %r' % token) if len(literal_text) != literal_len: - raise ParseError('Expecting literal of size %d, got %d' % ( + raise ProtocolError('Expecting literal of size %d, got %d' % ( literal_len, len(literal_text))) return literal_text elif len(token) >= 2 and (token[:1] == token[-1:] == b'"'): @@ -232,7 +229,7 @@ def parse_tuple(src): return tuple(out) out.append(atom(src, token)) # no terminator - raise ParseError('Tuple incomplete before "(%s"' % _fmt_tuple(out)) + raise ProtocolError('Tuple incomplete before "(%s"' % _fmt_tuple(out)) def _fmt_tuple(t): diff --git a/imapclient/util.py b/imapclient/util.py index 3314ea5..aa89115 100644 --- a/imapclient/util.py +++ b/imapclient/util.py @@ -7,6 +7,8 @@ from __future__ import unicode_literals import logging from six import binary_type, text_type +from . import exceptions + logger = logging.getLogger(__name__) @@ -27,3 +29,11 @@ def to_bytes(s, charset='ascii'): if isinstance(s, text_type): return s.encode(charset) return s + + +def assert_imap_protocol(condition, message=None): + if not condition: + msg = "Server replied with a response that violates the IMAP protocol" + if message: + msg += "{}: {}".format(msg, message) + raise exceptions.ProtocolError(msg)
Avoid using assert when parsing server response Some servers are not totally compliant with the IMAP protocol or sometimes give buggy output like: `"TEXT" "PLAIN" ("charset " " "utf-8"")`. I believe IMAPClient should not `assert` when parsing the response from a server. Assert makes it hard to recover from such errors. A custom IMAPClient exception would be more helpful to the user.
mjs/imapclient
diff --git a/tests/test_imapclient.py b/tests/test_imapclient.py index f8f458f..f56ca5b 100644 --- a/tests/test_imapclient.py +++ b/tests/test_imapclient.py @@ -12,7 +12,9 @@ import logging import six -from imapclient.exceptions import CapabilityError, IMAPClientError +from imapclient.exceptions import ( + CapabilityError, IMAPClientError, ProtocolError +) from imapclient.imapclient import IMAPlibLoggerAdapter from imapclient.fixed_offset import FixedOffset from imapclient.testable_imapclient import TestableIMAPClient as IMAPClient @@ -651,3 +653,14 @@ class TestContextManager(IMAPClientTest): with self.assertRaises(ValueError): with self.client as _: raise ValueError("Error raised inside the context manager") + + +class TestProtocolError(IMAPClientTest): + + def test_tagged_response_with_parse_error(self): + client = self.client + client._imap.tagged_commands = {sentinel.tag: None} + client._imap._get_response = lambda: b'NOT-A-STAR 99 EXISTS' + + with self.assertRaises(ProtocolError): + client._consume_until_tagged_response(sentinel.tag, b'IDLE') \ No newline at end of file diff --git a/tests/test_response_parser.py b/tests/test_response_parser.py index 946cd16..04694fc 100644 --- a/tests/test_response_parser.py +++ b/tests/test_response_parser.py @@ -16,9 +16,9 @@ from imapclient.response_parser import ( parse_response, parse_message_list, parse_fetch_response, - ParseError, ) from imapclient.response_types import Envelope, Address +from imapclient.exceptions import ProtocolError from tests.util import unittest from .util import patch @@ -160,7 +160,7 @@ class TestParseResponse(unittest.TestCase): def _test_parse_error(self, to_parse, expected_msg): if not isinstance(to_parse, list): to_parse = [to_parse] - self.assertRaisesRegex(ParseError, expected_msg, + self.assertRaisesRegex(ProtocolError, expected_msg, parse_response, to_parse) @@ -200,13 +200,13 @@ class TestParseFetchResponse(unittest.TestCase): self.assertEqual(parse_fetch_response([None]), {}) def test_bad_msgid(self): - self.assertRaises(ParseError, parse_fetch_response, [b'abc ()']) + self.assertRaises(ProtocolError, parse_fetch_response, [b'abc ()']) def test_bad_data(self): - self.assertRaises(ParseError, parse_fetch_response, [b'2 WHAT']) + self.assertRaises(ProtocolError, parse_fetch_response, [b'2 WHAT']) def test_missing_data(self): - self.assertRaises(ParseError, parse_fetch_response, [b'2']) + self.assertRaises(ProtocolError, parse_fetch_response, [b'2']) def test_simple_pairs(self): self.assertEqual(parse_fetch_response([b'23 (ABC 123 StUfF "hello")']), @@ -215,8 +215,8 @@ class TestParseFetchResponse(unittest.TestCase): b'SEQ': 23}}) def test_odd_pairs(self): - self.assertRaises(ParseError, parse_fetch_response, [b'(ONE)']) - self.assertRaises(ParseError, parse_fetch_response, [b'(ONE TWO THREE)']) + self.assertRaises(ProtocolError, parse_fetch_response, [b'(ONE)']) + self.assertRaises(ProtocolError, parse_fetch_response, [b'(ONE TWO THREE)']) def test_UID(self): self.assertEqual(parse_fetch_response([b'23 (UID 76)']), @@ -230,7 +230,7 @@ class TestParseFetchResponse(unittest.TestCase): b'SEQ': 23}}) def test_bad_UID(self): - self.assertRaises(ParseError, parse_fetch_response, [b'(UID X)']) + self.assertRaises(ProtocolError, parse_fetch_response, [b'(UID X)']) def test_FLAGS(self): self.assertEqual(parse_fetch_response([b'23 (FLAGS (\Seen Stuff))']), diff --git a/tests/test_util_functions.py b/tests/test_util_functions.py index b3becb8..ba3f271 100644 --- a/tests/test_util_functions.py +++ b/tests/test_util_functions.py @@ -4,15 +4,17 @@ from __future__ import unicode_literals -from imapclient.exceptions import InvalidCriteriaError +from imapclient.exceptions import InvalidCriteriaError, ProtocolError from imapclient.imapclient import ( join_message_ids, _normalise_search_criteria, normalise_text_list, seq_to_parenstr, seq_to_parenstr_upper, - _quoted + _quoted, + _parse_untagged_response ) +from imapclient.util import assert_imap_protocol from tests.util import unittest @@ -166,3 +168,17 @@ class Test_normalise_search_criteria(unittest.TestCase): def test_empty(self): self.assertRaises(InvalidCriteriaError, _normalise_search_criteria, '', None) + + +class TestAssertIMAPProtocol(unittest.TestCase): + + def test_assert_imap_protocol(self): + assert_imap_protocol(True) + with self.assertRaises(ProtocolError): + assert_imap_protocol(False) + + + def test_assert_imap_protocol_with_message(self): + assert_imap_protocol(True, 'foo') + with self.assertRaises(ProtocolError): + assert_imap_protocol(False, 'foo')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 6 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest pytest-cov", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 coverage==6.2 -e git+https://github.com/mjs/imapclient.git@f02732d6a55f3f3b55ea20a5451693d7869cd419#egg=IMAPClient importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-cov==4.0.0 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: imapclient channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==6.2 - pytest-cov==4.0.0 - six==1.17.0 - tomli==1.2.3 prefix: /opt/conda/envs/imapclient
[ "tests/test_imapclient.py::TestListFolders::test_blanks", "tests/test_imapclient.py::TestListFolders::test_empty_response", "tests/test_imapclient.py::TestListFolders::test_folder_encode_off", "tests/test_imapclient.py::TestListFolders::test_funky_characters", "tests/test_imapclient.py::TestListFolders::test_list_folders", "tests/test_imapclient.py::TestListFolders::test_list_folders_NO", "tests/test_imapclient.py::TestListFolders::test_list_sub_folders", "tests/test_imapclient.py::TestListFolders::test_list_sub_folders_NO", "tests/test_imapclient.py::TestListFolders::test_mixed", "tests/test_imapclient.py::TestListFolders::test_quoted_specials", "tests/test_imapclient.py::TestListFolders::test_simple", "tests/test_imapclient.py::TestListFolders::test_unquoted_numeric_folder_name", "tests/test_imapclient.py::TestListFolders::test_unquoted_numeric_folder_name_parsed_as_long", "tests/test_imapclient.py::TestListFolders::test_utf7_decoding", "tests/test_imapclient.py::TestListFolders::test_without_quotes", "tests/test_imapclient.py::TestSelectFolder::test_normal", "tests/test_imapclient.py::TestSelectFolder::test_unselect", "tests/test_imapclient.py::TestAppend::test_with_msg_time", "tests/test_imapclient.py::TestAppend::test_without_msg_time", "tests/test_imapclient.py::TestAclMethods::test_getacl", "tests/test_imapclient.py::TestAclMethods::test_setacl", "tests/test_imapclient.py::TestIdleAndNoop::test_consume_until_tagged_response", "tests/test_imapclient.py::TestIdleAndNoop::test_idle", "tests/test_imapclient.py::TestIdleAndNoop::test_idle_check_blocking", "tests/test_imapclient.py::TestIdleAndNoop::test_idle_check_timeout", "tests/test_imapclient.py::TestIdleAndNoop::test_idle_check_with_data", "tests/test_imapclient.py::TestIdleAndNoop::test_idle_done", "tests/test_imapclient.py::TestIdleAndNoop::test_noop", "tests/test_imapclient.py::TestDebugLogging::test_IMAP_is_patched", "tests/test_imapclient.py::TestTimeNormalisation::test_default", "tests/test_imapclient.py::TestTimeNormalisation::test_pass_through", "tests/test_imapclient.py::TestNamespace::test_complex", "tests/test_imapclient.py::TestNamespace::test_folder_decoding", "tests/test_imapclient.py::TestNamespace::test_other_only", "tests/test_imapclient.py::TestNamespace::test_simple", "tests/test_imapclient.py::TestNamespace::test_without_folder_decoding", "tests/test_imapclient.py::TestCapabilities::test_caching", "tests/test_imapclient.py::TestCapabilities::test_has_capability", "tests/test_imapclient.py::TestCapabilities::test_post_auth_request", "tests/test_imapclient.py::TestCapabilities::test_preauth", "tests/test_imapclient.py::TestCapabilities::test_server_returned_capability_after_auth", "tests/test_imapclient.py::TestCapabilities::test_with_starttls", "tests/test_imapclient.py::TestId::test_id", "tests/test_imapclient.py::TestId::test_invalid_parameters", "tests/test_imapclient.py::TestId::test_no_support", "tests/test_imapclient.py::TestRawCommand::test_complex", "tests/test_imapclient.py::TestRawCommand::test_embedded_literal", "tests/test_imapclient.py::TestRawCommand::test_failed_continuation_wait", "tests/test_imapclient.py::TestRawCommand::test_invalid_input_type", "tests/test_imapclient.py::TestRawCommand::test_literal_at_end", "tests/test_imapclient.py::TestRawCommand::test_multiple_literals", "tests/test_imapclient.py::TestRawCommand::test_not_uid", "tests/test_imapclient.py::TestRawCommand::test_plain", "tests/test_imapclient.py::TestExpunge::test_expunge", "tests/test_imapclient.py::TestExpunge::test_id_expunge", "tests/test_imapclient.py::TestShutdown::test_shutdown", "tests/test_imapclient.py::TestContextManager::test_context_manager", "tests/test_imapclient.py::TestContextManager::test_context_manager_fail_closing", "tests/test_imapclient.py::TestContextManager::test_exception_inside_context_manager", "tests/test_imapclient.py::TestProtocolError::test_tagged_response_with_parse_error", "tests/test_response_parser.py::TestParseResponse::test_bad_literal", "tests/test_response_parser.py::TestParseResponse::test_bad_quoting", "tests/test_response_parser.py::TestParseResponse::test_complex_mixed", "tests/test_response_parser.py::TestParseResponse::test_deeper_nest_tuple", "tests/test_response_parser.py::TestParseResponse::test_empty_tuple", "tests/test_response_parser.py::TestParseResponse::test_envelopey", "tests/test_response_parser.py::TestParseResponse::test_envelopey_quoted", "tests/test_response_parser.py::TestParseResponse::test_incomplete_tuple", "tests/test_response_parser.py::TestParseResponse::test_int", "tests/test_response_parser.py::TestParseResponse::test_int_and_tuple", "tests/test_response_parser.py::TestParseResponse::test_literal", "tests/test_response_parser.py::TestParseResponse::test_literal_with_more", "tests/test_response_parser.py::TestParseResponse::test_nested_tuple", "tests/test_response_parser.py::TestParseResponse::test_nil", "tests/test_response_parser.py::TestParseResponse::test_quoted_specials", "tests/test_response_parser.py::TestParseResponse::test_square_brackets", "tests/test_response_parser.py::TestParseResponse::test_string", "tests/test_response_parser.py::TestParseResponse::test_tuple", "tests/test_response_parser.py::TestParseResponse::test_unquoted", "tests/test_response_parser.py::TestParseMessageList::test_basic", "tests/test_response_parser.py::TestParseMessageList::test_modseq", "tests/test_response_parser.py::TestParseMessageList::test_modseq_interleaved", "tests/test_response_parser.py::TestParseMessageList::test_modseq_no_space", "tests/test_response_parser.py::TestParseMessageList::test_one_id", "tests/test_response_parser.py::TestParseFetchResponse::test_Address_str", "tests/test_response_parser.py::TestParseFetchResponse::test_Address_str_ignores_encoding_error", "tests/test_response_parser.py::TestParseFetchResponse::test_BODY", "tests/test_response_parser.py::TestParseFetchResponse::test_BODYSTRUCTURE", "tests/test_response_parser.py::TestParseFetchResponse::test_BODY_HEADER_FIELDS", "tests/test_response_parser.py::TestParseFetchResponse::test_ENVELOPE", "tests/test_response_parser.py::TestParseFetchResponse::test_ENVELOPE_with_empty_addresses", "tests/test_response_parser.py::TestParseFetchResponse::test_ENVELOPE_with_invalid_date", "tests/test_response_parser.py::TestParseFetchResponse::test_ENVELOPE_with_no_date", "tests/test_response_parser.py::TestParseFetchResponse::test_FLAGS", "tests/test_response_parser.py::TestParseFetchResponse::test_INTERNALDATE", "tests/test_response_parser.py::TestParseFetchResponse::test_INTERNALDATE_normalised", "tests/test_response_parser.py::TestParseFetchResponse::test_UID", "tests/test_response_parser.py::TestParseFetchResponse::test_bad_UID", "tests/test_response_parser.py::TestParseFetchResponse::test_bad_data", "tests/test_response_parser.py::TestParseFetchResponse::test_bad_msgid", "tests/test_response_parser.py::TestParseFetchResponse::test_basic", "tests/test_response_parser.py::TestParseFetchResponse::test_literals", "tests/test_response_parser.py::TestParseFetchResponse::test_literals_and_keys_with_square_brackets", "tests/test_response_parser.py::TestParseFetchResponse::test_missing_data", "tests/test_response_parser.py::TestParseFetchResponse::test_mixed_types", "tests/test_response_parser.py::TestParseFetchResponse::test_multiple_messages", "tests/test_response_parser.py::TestParseFetchResponse::test_none_special_case", "tests/test_response_parser.py::TestParseFetchResponse::test_not_uid_is_key", "tests/test_response_parser.py::TestParseFetchResponse::test_odd_pairs", "tests/test_response_parser.py::TestParseFetchResponse::test_partial_fetch", "tests/test_response_parser.py::TestParseFetchResponse::test_same_message_appearing_multiple_times", "tests/test_response_parser.py::TestParseFetchResponse::test_simple_pairs", "tests/test_util_functions.py::Test_normalise_text_list::test_binary", "tests/test_util_functions.py::Test_normalise_text_list::test_iter", "tests/test_util_functions.py::Test_normalise_text_list::test_list", "tests/test_util_functions.py::Test_normalise_text_list::test_mixed_list", "tests/test_util_functions.py::Test_normalise_text_list::test_tuple", "tests/test_util_functions.py::Test_normalise_text_list::test_unicode", "tests/test_util_functions.py::Test_seq_to_parenstr::test_binary", "tests/test_util_functions.py::Test_seq_to_parenstr::test_iter", "tests/test_util_functions.py::Test_seq_to_parenstr::test_list", "tests/test_util_functions.py::Test_seq_to_parenstr::test_mixed_list", "tests/test_util_functions.py::Test_seq_to_parenstr::test_tuple", "tests/test_util_functions.py::Test_seq_to_parenstr::test_unicode", "tests/test_util_functions.py::Test_seq_to_parenstr_upper::test_binary", "tests/test_util_functions.py::Test_seq_to_parenstr_upper::test_iter", "tests/test_util_functions.py::Test_seq_to_parenstr_upper::test_list", "tests/test_util_functions.py::Test_seq_to_parenstr_upper::test_mixed_list", "tests/test_util_functions.py::Test_seq_to_parenstr_upper::test_tuple", "tests/test_util_functions.py::Test_seq_to_parenstr_upper::test_unicode", "tests/test_util_functions.py::Test_join_message_ids::test_binary", "tests/test_util_functions.py::Test_join_message_ids::test_binary_non_numeric", "tests/test_util_functions.py::Test_join_message_ids::test_int", "tests/test_util_functions.py::Test_join_message_ids::test_iter", "tests/test_util_functions.py::Test_join_message_ids::test_mixed_list", "tests/test_util_functions.py::Test_join_message_ids::test_tuple", "tests/test_util_functions.py::Test_join_message_ids::test_unicode", "tests/test_util_functions.py::Test_join_message_ids::test_unicode_non_numeric", "tests/test_util_functions.py::Test_normalise_search_criteria::test_None", "tests/test_util_functions.py::Test_normalise_search_criteria::test_binary", "tests/test_util_functions.py::Test_normalise_search_criteria::test_binary_with_charset", "tests/test_util_functions.py::Test_normalise_search_criteria::test_empty", "tests/test_util_functions.py::Test_normalise_search_criteria::test_ints", "tests/test_util_functions.py::Test_normalise_search_criteria::test_list", "tests/test_util_functions.py::Test_normalise_search_criteria::test_mixed_list", "tests/test_util_functions.py::Test_normalise_search_criteria::test_no_quoting_when_criteria_given_as_string", "tests/test_util_functions.py::Test_normalise_search_criteria::test_quoting", "tests/test_util_functions.py::Test_normalise_search_criteria::test_tuple", "tests/test_util_functions.py::Test_normalise_search_criteria::test_unicode", "tests/test_util_functions.py::Test_normalise_search_criteria::test_unicode_with_charset", "tests/test_util_functions.py::TestAssertIMAPProtocol::test_assert_imap_protocol", "tests/test_util_functions.py::TestAssertIMAPProtocol::test_assert_imap_protocol_with_message" ]
[ "tests/test_imapclient.py::TestDebugLogging::test_redacted_password" ]
[]
[]
BSD License
1,702
[ "doc/src/releases.rst", "imapclient/imapclient.py", "imapclient/response_lexer.py", "imapclient/response_parser.py", "imapclient/util.py", "imapclient/exceptions.py" ]
[ "doc/src/releases.rst", "imapclient/imapclient.py", "imapclient/response_lexer.py", "imapclient/response_parser.py", "imapclient/util.py", "imapclient/exceptions.py" ]
OpenMined__PySyft-254
6c84afb0d4d541039bdcad4357cc7b62a3d24084
2017-09-26 21:01:03
06ce023225dd613d8fb14ab2046135b93ab22376
diff --git a/syft/tensor.py b/syft/tensor.py index 2bb335111e..a5f21740c9 100644 --- a/syft/tensor.py +++ b/syft/tensor.py @@ -725,19 +725,72 @@ class TensorBase(object): else: return [TensorBase(x) for x in np.array_split(self.data, n, dim)] - def gt(self, t): + def gt(self, other): """Returns a new Tensor having boolean True values where an element of the calling tensor is greater than the second Tensor, False otherwise. The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" - if self.encrypted: + other = _ensure_tensorbase(other) + if self.encrypted or other.encrypted: return NotImplemented - return TensorBase(np.greater(self.data, _ensure_tensorbase(t).data)) + return TensorBase(np.greater(self.data, other.data)) - def gt_(self, t): + def gt_(self, other): """Writes in-place, boolean True values where an element of the calling tensor is greater than the second Tensor, False otherwise. The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" - if self.encrypted: + other = _ensure_tensorbase(other) + if self.encrypted or other.encrypted: + return NotImplemented + self.data = np.greater(self.data, other.data) + return self + + def lt(self, other): + """Returns a new Tensor having boolean True values where an element of the calling tensor is less than the second Tensor, False otherwise. + The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" + other = _ensure_tensorbase(other) + if self.encrypted or other.encrypted: + return NotImplemented + return TensorBase(np.less(self.data, other.data)) + + def lt_(self, other): + """Writes in-place, boolean True values where an element of the calling tensor is less than the second Tensor, False otherwise. + The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" + other = _ensure_tensorbase(other) + if self.encrypted or other.encrypted: + return NotImplemented + self.data = np.less(self.data, other.data) + return self + + def ge(self, other): + """Returns a new Tensor having boolean True values where an element of the calling tensor is greater or equal than the second Tensor, False otherwise. + The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" + other = _ensure_tensorbase(other) + if self.encrypted or other.encrypted: + return NotImplemented + return TensorBase(np.greater_equal(self.data, other.data)) + + def ge_(self, other): + """Writes in-place, boolean True values where an element of the calling tensor is greater or equal than the second Tensor, False otherwise. + The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" + other = _ensure_tensorbase(other) + if self.encrypted or other.encrypted: + return NotImplemented + self.data = np.greater_equal(self.data, other.data) + return self + + def le(self, other): + """Returns a new Tensor having boolean True values where an element of the calling tensor is less or equal than the second Tensor, False otherwise. + The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" + other = _ensure_tensorbase(other) + if self.encrypted or other.encrypted: + return NotImplemented + return TensorBase(np.less_equal(self.data, other.data)) + + def le_(self, other): + """Writes in-place, boolean True values where an element of the calling tensor is less or equal than the second Tensor, False otherwise. + The second Tensor can be a number or a tensor whose shape is broadcastable with the calling Tensor.""" + other = _ensure_tensorbase(other) + if self.encrypted or other.encrypted: return NotImplemented - self.data = np.greater(self.data, _ensure_tensorbase(t).data) + self.data = np.less_equal(self.data, other.data) return self def bernoulli(self, p):
Implement Default ge Functionality for Base Tensor Type **User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, ge() should return a new tensor and ge_() should perform the operation inline. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation. **Acceptance Criteria:** - If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error. - a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors. - inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator.
OpenMined/PySyft
diff --git a/tests/test_tensor.py b/tests/test_tensor.py index 62fc6336bb..55a16ceab9 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -560,28 +560,107 @@ class chunkTests(unittest.TestCase): self.assertEqual(t2.shape(), t3.shape()) -class gtTests(unittest.TestCase): +class inequalityTest(unittest.TestCase): + def setUp(self): + self.a1 = np.array([-2, -1, 0, 1, 2]) + self.a2 = np.array([-4, -1, 5, 2, 2]) + + self.t1 = TensorBase(self.a1) + self.t2 = TensorBase(self.a2) + + self.enc = TensorBase(self.a1, encrypted=True) + + +class gtTests(inequalityTest): def testGtWithTensor(self): - t1 = TensorBase(np.arange(10)) - t2 = TensorBase(np.arange(10)[-1::-1]) - truth_values = t1.gt(t2) - self.assertEqual(truth_values, [False, False, False, False, False, True, True, True, True, True]) + self.assertEqual(self.t1.gt(self.t2), self.a1 > self.a2) def testGtWithNumber(self): - t1 = TensorBase(np.arange(10)) - truth_values = t1.gt(-1) - self.assertEqual(truth_values, [True] * 10) + self.assertEqual(self.t1.gt(1), self.a1 > 1) def testGtInPlaceWithTensor(self): - t1 = TensorBase(np.arange(10)) - t2 = TensorBase(np.arange(10)[-1::-1]) - t1.gt_(t2) - self.assertEqual(t1, [False, False, False, False, False, True, True, True, True, True]) + self.t1.gt_(self.t2) + self.assertEqual(self.t1, self.a1 > self.a2) def testGtInPlaceWithNumber(self): - t1 = TensorBase(np.arange(10)) - t1.gt_(-1) - self.assertEqual(t1, [True] * 10) + self.t1.gt_(1) + self.assertEqual(self.t1, self.a1 > 1) + + def testWithEncrypted(self): + res = self.t1.gt(self.enc) + self.assertEqual(res, NotImplemented) + + res = self.enc.gt(self.t1) + self.assertEqual(res, NotImplemented) + + +class geTests(inequalityTest): + def testGeWithTensor(self): + self.assertEqual(self.t1.ge(self.t2), self.a1 >= self.a2) + + def testGeWithNumber(self): + self.assertEqual(self.t1.ge(1), self.a1 >= 1) + + def testGeInPlaceWithTensor(self): + self.t1.ge_(self.t2) + self.assertEqual(self.t1, self.a1 >= self.a2) + + def testGeInPlaceWithNumber(self): + self.t1.ge_(1) + self.assertEqual(self.t1, self.a1 >= 1) + + def testWithEncrypted(self): + res = self.t1.ge(self.enc) + self.assertEqual(res, NotImplemented) + + res = self.enc.ge(self.t1) + self.assertEqual(res, NotImplemented) + + +class ltTests(inequalityTest): + def testLtWithTensor(self): + self.assertEqual(self.t1.lt(self.t2), self.a1 < self.a2) + + def testLtWithNumber(self): + self.assertEqual(self.t1.lt(1), self.a1 < 1) + + def testLtInPlaceWithTensor(self): + self.t1.lt_(self.t2) + self.assertEqual(self.t1, self.a1 < self.a2) + + def testLtInPlaceWithNumber(self): + self.t1.lt_(1) + self.assertEqual(self.t1, self.a1 < 1) + + def testWithEncrypted(self): + res = self.t1.lt(self.enc) + self.assertEqual(res, NotImplemented) + + res = self.enc.lt(self.t1) + self.assertEqual(res, NotImplemented) + + +class leTests(inequalityTest): + def testLeWithTensor(self): + self.assertEqual(self.t1.le(self.t2), self.a1 <= self.a2) + + def testLeWithNumber(self): + self.assertEqual(self.t1.le(1), self.a1 <= 1) + + def testLeInPlaceWithTensor(self): + self.t1.le_(self.t2) + self.assertEqual(self.t1, self.a1 <= self.a2) + + def testLeInPlaceWithNumber(self): + self.t1.le_(1) + self.assertEqual(self.t1, self.a1 <= 1) + + def testWithEncrypted(self): + res = self.t1.le(self.enc) + self.assertEqual(res, NotImplemented) + + res = self.enc.le(self.t1) + self.assertEqual(res, NotImplemented) class bernoulliTests(unittest.TestCase):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
PySyft/hydrogen
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "line_profiler", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates libmpc-dev" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
args==0.1.0 attrs==22.2.0 certifi==2021.5.30 clint==0.5.1 flake8==5.0.4 importlib-metadata==4.2.0 iniconfig==1.1.1 line-profiler==4.1.3 mccabe==0.7.0 numpy==1.19.5 packaging==21.3 phe==1.5.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 pyparsing==3.1.4 pyRserve==1.0.4 pytest==7.0.1 pytest-flake8==1.1.1 scipy==1.5.4 -e git+https://github.com/OpenMined/PySyft.git@6c84afb0d4d541039bdcad4357cc7b62a3d24084#egg=syft tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: PySyft channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - args==0.1.0 - attrs==22.2.0 - clint==0.5.1 - flake8==5.0.4 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - line-profiler==4.1.3 - mccabe==0.7.0 - numpy==1.19.5 - packaging==21.3 - phe==1.5.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pyparsing==3.1.4 - pyrserve==1.0.4 - pytest==7.0.1 - pytest-flake8==1.1.1 - scipy==1.5.4 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/PySyft
[ "tests/test_tensor.py::gtTests::testWithEncrypted", "tests/test_tensor.py::geTests::testGeInPlaceWithNumber", "tests/test_tensor.py::geTests::testGeInPlaceWithTensor", "tests/test_tensor.py::geTests::testGeWithNumber", "tests/test_tensor.py::geTests::testGeWithTensor", "tests/test_tensor.py::geTests::testWithEncrypted", "tests/test_tensor.py::ltTests::testLtInPlaceWithNumber", "tests/test_tensor.py::ltTests::testLtInPlaceWithTensor", "tests/test_tensor.py::ltTests::testLtWithNumber", "tests/test_tensor.py::ltTests::testLtWithTensor", "tests/test_tensor.py::ltTests::testWithEncrypted", "tests/test_tensor.py::leTests::testLeInPlaceWithNumber", "tests/test_tensor.py::leTests::testLeInPlaceWithTensor", "tests/test_tensor.py::leTests::testLeWithNumber", "tests/test_tensor.py::leTests::testLeWithTensor", "tests/test_tensor.py::leTests::testWithEncrypted" ]
[]
[ "tests/test_tensor.py::DimTests::testAsView", "tests/test_tensor.py::DimTests::testDimOne", "tests/test_tensor.py::DimTests::testResize", "tests/test_tensor.py::DimTests::testResizeAs", "tests/test_tensor.py::DimTests::testSize", "tests/test_tensor.py::DimTests::testView", "tests/test_tensor.py::AddTests::testInplace", "tests/test_tensor.py::AddTests::testScalar", "tests/test_tensor.py::AddTests::testSimple", "tests/test_tensor.py::CeilTests::testCeil", "tests/test_tensor.py::CeilTests::testCeil_", "tests/test_tensor.py::ZeroTests::testZero", "tests/test_tensor.py::FloorTests::testFloor_", "tests/test_tensor.py::SubTests::testInplace", "tests/test_tensor.py::SubTests::testScalar", "tests/test_tensor.py::SubTests::testSimple", "tests/test_tensor.py::MaxTests::testAxis", "tests/test_tensor.py::MaxTests::testNoDim", "tests/test_tensor.py::MultTests::testInplace", "tests/test_tensor.py::MultTests::testScalar", "tests/test_tensor.py::MultTests::testSimple", "tests/test_tensor.py::DivTests::testInplace", "tests/test_tensor.py::DivTests::testScalar", "tests/test_tensor.py::DivTests::testSimple", "tests/test_tensor.py::AbsTests::testabs", "tests/test_tensor.py::AbsTests::testabs_", "tests/test_tensor.py::ShapeTests::testShape", "tests/test_tensor.py::SqrtTests::testSqrt", "tests/test_tensor.py::SqrtTests::testSqrt_", "tests/test_tensor.py::SumTests::testDimIsNotNoneInt", "tests/test_tensor.py::SumTests::testDimNoneInt", "tests/test_tensor.py::EqualTests::testEqOp", "tests/test_tensor.py::EqualTests::testEqual", "tests/test_tensor.py::EqualTests::testIneqOp", "tests/test_tensor.py::EqualTests::testNotEqual", "tests/test_tensor.py::IndexTests::testIndexing", "tests/test_tensor.py::sigmoidTests::testSigmoid", "tests/test_tensor.py::addmm::testaddmm1d", "tests/test_tensor.py::addmm::testaddmm2d", "tests/test_tensor.py::addmm::testaddmm_1d", "tests/test_tensor.py::addmm::testaddmm_2d", "tests/test_tensor.py::addcmulTests::testaddcmul1d", "tests/test_tensor.py::addcmulTests::testaddcmul2d", "tests/test_tensor.py::addcmulTests::testaddcmul_1d", "tests/test_tensor.py::addcmulTests::testaddcmul_2d", "tests/test_tensor.py::addcdivTests::testaddcdiv1d", "tests/test_tensor.py::addcdivTests::testaddcdiv2d", "tests/test_tensor.py::addcdivTests::testaddcdiv_1d", "tests/test_tensor.py::addcdivTests::testaddcdiv_2d", "tests/test_tensor.py::addmvTests::testaddmv", "tests/test_tensor.py::addmvTests::testaddmv_", "tests/test_tensor.py::addbmmTests::testaddbmm", "tests/test_tensor.py::addbmmTests::testaddbmm_", "tests/test_tensor.py::baddbmmTests::testbaddbmm", "tests/test_tensor.py::baddbmmTests::testbaddbmm_", "tests/test_tensor.py::transposeTests::testT", "tests/test_tensor.py::transposeTests::testT_", "tests/test_tensor.py::transposeTests::testTranspose", "tests/test_tensor.py::transposeTests::testTranspose_", "tests/test_tensor.py::unsqueezeTests::testUnsqueeze", "tests/test_tensor.py::unsqueezeTests::testUnsqueeze_", "tests/test_tensor.py::expTests::testexp", "tests/test_tensor.py::expTests::testexp_", "tests/test_tensor.py::fracTests::testfrac", "tests/test_tensor.py::fracTests::testfrac_", "tests/test_tensor.py::rsqrtTests::testrsqrt", "tests/test_tensor.py::rsqrtTests::testrsqrt_", "tests/test_tensor.py::signTests::testsign", "tests/test_tensor.py::signTests::testsign_", "tests/test_tensor.py::numpyTests::testnumpy", "tests/test_tensor.py::reciprocalTests::testreciprocal", "tests/test_tensor.py::reciprocalTests::testrsqrt_", "tests/test_tensor.py::logTests::testLog", "tests/test_tensor.py::logTests::testLog1p", "tests/test_tensor.py::logTests::testLog1p_", "tests/test_tensor.py::logTests::testLog_", "tests/test_tensor.py::clampTests::testClampFloat", "tests/test_tensor.py::clampTests::testClampFloatInPlace", "tests/test_tensor.py::clampTests::testClampInt", "tests/test_tensor.py::clampTests::testClampIntInPlace", "tests/test_tensor.py::cloneTests::testClone", "tests/test_tensor.py::chunkTests::testChunk", "tests/test_tensor.py::chunkTests::testChunkSameSize", "tests/test_tensor.py::gtTests::testGtInPlaceWithNumber", "tests/test_tensor.py::gtTests::testGtInPlaceWithTensor", "tests/test_tensor.py::gtTests::testGtWithNumber", "tests/test_tensor.py::gtTests::testGtWithTensor", "tests/test_tensor.py::bernoulliTests::testBernoulli", "tests/test_tensor.py::bernoulliTests::testBernoulli_", "tests/test_tensor.py::uniformTests::testUniform", "tests/test_tensor.py::uniformTests::testUniform_", "tests/test_tensor.py::fillTests::testFill_", "tests/test_tensor.py::topkTests::testTopK", "tests/test_tensor.py::tolistTests::testToList", "tests/test_tensor.py::traceTests::testTrace", "tests/test_tensor.py::roundTests::testRound", "tests/test_tensor.py::roundTests::testRound_", "tests/test_tensor.py::repeatTests::testRepeat", "tests/test_tensor.py::powTests::testPow", "tests/test_tensor.py::powTests::testPow_", "tests/test_tensor.py::prodTests::testProd", "tests/test_tensor.py::randomTests::testRandom_", "tests/test_tensor.py::nonzeroTests::testNonZero", "tests/test_tensor.py::cumprodTest::testCumprod", "tests/test_tensor.py::cumprodTest::testCumprod_", "tests/test_tensor.py::splitTests::testSplit", "tests/test_tensor.py::squeezeTests::testSqueeze", "tests/test_tensor.py::expandAsTests::testExpandAs", "tests/test_tensor.py::meanTests::testMean", "tests/test_tensor.py::notEqualTests::testNe", "tests/test_tensor.py::notEqualTests::testNe_", "tests/test_tensor.py::index_selectTests::testIndex_select", "tests/test_tensor.py::gatherTests::testGatherNumerical1", "tests/test_tensor.py::gatherTests::testGatherNumerical2", "tests/test_tensor.py::scatterTests::testScatter_DimOutOfRange", "tests/test_tensor.py::scatterTests::testScatter_IndexOutOfRange", "tests/test_tensor.py::scatterTests::testScatter_IndexType", "tests/test_tensor.py::scatterTests::testScatter_Numerical0", "tests/test_tensor.py::scatterTests::testScatter_Numerical1", "tests/test_tensor.py::scatterTests::testScatter_Numerical2", "tests/test_tensor.py::scatterTests::testScatter_Numerical3", "tests/test_tensor.py::scatterTests::testScatter_Numerical4", "tests/test_tensor.py::scatterTests::testScatter_Numerical5", "tests/test_tensor.py::scatterTests::testScatter_Numerical6", "tests/test_tensor.py::scatterTests::testScatter_index_src_dimension_mismatch", "tests/test_tensor.py::remainderTests::testRemainder", "tests/test_tensor.py::remainderTests::testRemainder_", "tests/test_tensor.py::remainderTests::testRemainder_broadcasting", "tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_1", "tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_braodcasting1", "tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_braodcasting2", "tests/test_tensor.py::masked_fill_Tests::testMasked_fill_", "tests/test_tensor.py::masked_fill_Tests::testMasked_fill_broadcasting", "tests/test_tensor.py::eqTests::testEqInPlaceWithNumber", "tests/test_tensor.py::eqTests::testEqInPlaceWithTensor", "tests/test_tensor.py::eqTests::testEqWithNumber", "tests/test_tensor.py::eqTests::testEqWithTensor" ]
[]
Apache License 2.0
1,703
[ "syft/tensor.py" ]
[ "syft/tensor.py" ]
pre-commit__pre-commit-hooks-240
efdceb4e40cda10780f4646ec944f55b5786190d
2017-09-27 14:48:20
efdceb4e40cda10780f4646ec944f55b5786190d
diff --git a/pre_commit_hooks/mixed_line_ending.py b/pre_commit_hooks/mixed_line_ending.py index 301c654..a163726 100644 --- a/pre_commit_hooks/mixed_line_ending.py +++ b/pre_commit_hooks/mixed_line_ending.py @@ -55,7 +55,8 @@ def fix_filename(filename, fix): else: target_ending = FIX_TO_LINE_ENDING[fix] # find if there are lines with *other* endings - del counts[target_ending] + # It's possible there's no line endings of the target type + counts.pop(target_ending, None) other_endings = bool(sum(counts.values())) if other_endings: _fix(filename, contents, target_ending)
mixed-line-ending -- KeyError: b'\n' `pre-commit 1.1.2` ```bash Mixed line ending........................................................Failed hookid: mixed-line-ending Traceback (most recent call last): File "/home/gary/.cache/pre-commit/repos71slzol/py_env-python3.6/bin/mixed-line-ending", line 11, in <module> load_entry_point('pre-commit-hooks==0.9.4', 'console_scripts', 'mixed-line-ending')() File "/home/gary/.cache/pre-commit/repos71slzol/py_env-python3.6/lib/python3.6/site-packages/pre_commit_hooks/mixed_line_ending.py", line 78, in main retv |= fix_filename(filename, args.fix) File "/home/gary/.cache/pre-commit/repos71slzol/py_env-python3.6/lib/python3.6/site-packages/pre_commit_hooks/mixed_line_ending.py", line 58, in fix_filename del counts[target_ending] KeyError: b'\n' ``` ```yaml - repo: git://github.com/pre-commit/pre-commit-hooks sha: v0.9.4 hooks: - id: mixed-line-ending args: [--fix=lf] ```
pre-commit/pre-commit-hooks
diff --git a/tests/mixed_line_ending_test.py b/tests/mixed_line_ending_test.py index 808295b..23837cd 100644 --- a/tests/mixed_line_ending_test.py +++ b/tests/mixed_line_ending_test.py @@ -101,3 +101,13 @@ def test_fix_crlf(tmpdir): assert ret == 1 assert path.read_binary() == b'foo\r\nbar\r\nbaz\r\n' + + +def test_fix_lf_all_crlf(tmpdir): + """Regression test for #239""" + path = tmpdir.join('input.txt') + path.write_binary(b'foo\r\nbar\r\n') + ret = main(('--fix=lf', path.strpath)) + + assert ret == 1 + assert path.read_binary() == b'foo\nbar\n'
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 autopep8==2.0.4 certifi==2021.5.30 cfgv==3.3.1 coverage==6.2 distlib==0.3.9 execnet==1.9.0 filelock==3.4.1 flake8==2.5.5 identify==2.4.4 importlib-metadata==4.2.0 importlib-resources==5.2.3 iniconfig==1.1.1 mccabe==0.4.0 mock==5.2.0 nodeenv==1.6.0 packaging==21.3 pep8==1.7.1 platformdirs==2.4.0 pluggy==1.0.0 pre-commit==2.17.0 -e git+https://github.com/pre-commit/pre-commit-hooks.git@efdceb4e40cda10780f4646ec944f55b5786190d#egg=pre_commit_hooks py==1.11.0 pycodestyle==2.10.0 pyflakes==1.0.0 pyparsing==3.1.4 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 PyYAML==6.0.1 simplejson==3.20.1 six==1.17.0 toml==0.10.2 tomli==1.2.3 typing_extensions==4.1.1 virtualenv==20.16.2 zipp==3.6.0
name: pre-commit-hooks channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - autopep8==2.0.4 - cfgv==3.3.1 - coverage==6.2 - distlib==0.3.9 - execnet==1.9.0 - filelock==3.4.1 - flake8==2.5.5 - identify==2.4.4 - importlib-metadata==4.2.0 - importlib-resources==5.2.3 - iniconfig==1.1.1 - mccabe==0.4.0 - mock==5.2.0 - nodeenv==1.6.0 - packaging==21.3 - pep8==1.7.1 - platformdirs==2.4.0 - pluggy==1.0.0 - pre-commit==2.17.0 - py==1.11.0 - pycodestyle==2.10.0 - pyflakes==1.0.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - pyyaml==6.0.1 - simplejson==3.20.1 - six==1.17.0 - toml==0.10.2 - tomli==1.2.3 - typing-extensions==4.1.1 - virtualenv==20.16.2 - zipp==3.6.0 prefix: /opt/conda/envs/pre-commit-hooks
[ "tests/mixed_line_ending_test.py::test_fix_lf_all_crlf" ]
[]
[ "tests/mixed_line_ending_test.py::test_mixed_line_ending_fixes_auto[foo\\r\\nbar\\nbaz\\n-foo\\nbar\\nbaz\\n]", "tests/mixed_line_ending_test.py::test_mixed_line_ending_fixes_auto[foo\\r\\nbar\\nbaz\\r\\n-foo\\r\\nbar\\r\\nbaz\\r\\n]", "tests/mixed_line_ending_test.py::test_mixed_line_ending_fixes_auto[foo\\rbar\\nbaz\\r-foo\\rbar\\rbaz\\r]", "tests/mixed_line_ending_test.py::test_mixed_line_ending_fixes_auto[foo\\r\\nbar\\n-foo\\nbar\\n]", "tests/mixed_line_ending_test.py::test_mixed_line_ending_fixes_auto[foo\\rbar\\n-foo\\nbar\\n]", "tests/mixed_line_ending_test.py::test_mixed_line_ending_fixes_auto[foo\\r\\nbar\\r-foo\\r\\nbar\\r\\n]", "tests/mixed_line_ending_test.py::test_mixed_line_ending_fixes_auto[foo\\r\\nbar\\nbaz\\r-foo\\nbar\\nbaz\\n]", "tests/mixed_line_ending_test.py::test_non_mixed_no_newline_end_of_file", "tests/mixed_line_ending_test.py::test_mixed_no_newline_end_of_file", "tests/mixed_line_ending_test.py::test_line_endings_ok[--fix=auto-foo\\r\\nbar\\r\\nbaz\\r\\n]", "tests/mixed_line_ending_test.py::test_line_endings_ok[--fix=auto-foo\\rbar\\rbaz\\r]", "tests/mixed_line_ending_test.py::test_line_endings_ok[--fix=auto-foo\\nbar\\nbaz\\n]", "tests/mixed_line_ending_test.py::test_line_endings_ok[--fix=crlf-foo\\r\\nbar\\r\\nbaz\\r\\n]", "tests/mixed_line_ending_test.py::test_line_endings_ok[--fix=lf-foo\\nbar\\nbaz\\n]", "tests/mixed_line_ending_test.py::test_no_fix_does_not_modify", "tests/mixed_line_ending_test.py::test_fix_lf", "tests/mixed_line_ending_test.py::test_fix_crlf" ]
[]
MIT License
1,704
[ "pre_commit_hooks/mixed_line_ending.py" ]
[ "pre_commit_hooks/mixed_line_ending.py" ]
mkdocs__mkdocs-1294
b8123ffb86da6c2eef114db0c2eea7c6315a281a
2017-09-27 18:07:01
84906a7a6c936719539339b2f46658c1a561527f
diff --git a/mkdocs/utils/__init__.py b/mkdocs/utils/__init__.py index eb9f7a42..2b335111 100644 --- a/mkdocs/utils/__init__.py +++ b/mkdocs/utils/__init__.py @@ -103,12 +103,15 @@ def reduce_list(data_set): def copy_file(source_path, output_path): """ Copy source_path to output_path, making sure any parent directories exist. - """ + The output_path may be a directory. + """ output_dir = os.path.dirname(output_path) if not os.path.exists(output_dir): os.makedirs(output_dir) - shutil.copy(source_path, output_path) + if os.path.isdir(output_path): + output_path = os.path.join(output_path, os.path.basename(source_path)) + shutil.copyfile(source_path, output_path) def write_file(content, output_path):
Permission denied when copying a template from a read-only path There are two things uncommon about my setup that cause the error. 1. I have installed mkdocs using [Nix](https://nixos.org/nix/). Long story short, all the mkdocs files (the ones in `lib/python2.7/site-packages/mkdocs`) have mode 0444 (that is, read-only). 2. I have `theme_dir` set in `mkdocs.yml` and I use it to overwrite one of the theme files, namely `js/highlight.pack.js`. This is what I get: ~~~~ $ mkdocs build WARNING - Config value: 'extra_javascript'. Warning: The following files have been automatically included in the documentation build and will be added to the HTML: highlight/theme/js/highlight.pack.js. This behavior is deprecated. In version 1.0 and later they will need to be explicitly listed in the 'extra_javascript' config setting. INFO - Cleaning site directory INFO - Building documentation to directory: <project path>/build/site Traceback (most recent call last): File "/nix/store/2zkwsgan90gl63pqnq01vrdrpf11fm1m-mkdocs-0.16.3/bin/.mkdocs-wrapped", line 12, in <module> sys.exit(cli()) File "/nix/store/cjhms7xja78pbh5gnh9ii7hlxizq2iy7-python2.7-click-6.7/lib/python2.7/site-packages/click/core.py", line 722, in __call__ return self.main(*args, **kwargs) File "/nix/store/cjhms7xja78pbh5gnh9ii7hlxizq2iy7-python2.7-click-6.7/lib/python2.7/site-packages/click/core.py", line 697, in main rv = self.invoke(ctx) File "/nix/store/cjhms7xja78pbh5gnh9ii7hlxizq2iy7-python2.7-click-6.7/lib/python2.7/site-packages/click/core.py", line 1066, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/nix/store/cjhms7xja78pbh5gnh9ii7hlxizq2iy7-python2.7-click-6.7/lib/python2.7/site-packages/click/core.py", line 895, in invoke return ctx.invoke(self.callback, **ctx.params) File "/nix/store/cjhms7xja78pbh5gnh9ii7hlxizq2iy7-python2.7-click-6.7/lib/python2.7/site-packages/click/core.py", line 535, in invoke return callback(*args, **kwargs) File "/nix/store/2zkwsgan90gl63pqnq01vrdrpf11fm1m-mkdocs-0.16.3/lib/python2.7/site-packages/mkdocs/__main__.py", line 156, in build_command ), dirty=not clean) File "/nix/store/2zkwsgan90gl63pqnq01vrdrpf11fm1m-mkdocs-0.16.3/lib/python2.7/site-packages/mkdocs/commands/build.py", line 373, in build theme_dir, config['site_dir'], exclude=['*.py', '*.pyc', '*.html'], dirty=dirty File "/nix/store/2zkwsgan90gl63pqnq01vrdrpf11fm1m-mkdocs-0.16.3/lib/python2.7/site-packages/mkdocs/utils/__init__.py", line 175, in copy_media_files copy_file(source_path, output_path) File "/nix/store/2zkwsgan90gl63pqnq01vrdrpf11fm1m-mkdocs-0.16.3/lib/python2.7/site-packages/mkdocs/utils/__init__.py", line 110, in copy_file shutil.copy(source_path, output_path) File "/nix/store/w8zld7z4gq4b36z0szgrh6yv5zi30915-python-2.7.13/lib/python2.7/shutil.py", line 119, in copy copyfile(src, dst) File "/nix/store/w8zld7z4gq4b36z0szgrh6yv5zi30915-python-2.7.13/lib/python2.7/shutil.py", line 83, in copyfile with open(dst, 'wb') as fdst: IOError: [Errno 13] Permission denied: u'<project path>/build/site/js/highlight.pack.js' $ ls -l build/site/js/ total 396 -r--r--r-- 1 kirelagin staff 300764 Sep 26 16:03 highlight.pack.js -r--r--r-- 1 kirelagin staff 84245 Sep 26 16:03 jquery-2.1.1.min.js -r--r--r-- 1 kirelagin staff 11084 Sep 26 16:03 modernizr-2.8.3.min.js -r--r--r-- 1 kirelagin staff 2676 Sep 26 16:03 theme.js $ ls -ld build/site/js/ drwxr-xr-x 6 kirelagin staff 204 Sep 26 16:03 build/site/js/ ~~~~ What happens is, the built-in theme files get copied with their permissions preserved, so `site/js/highlight.pack.js` ends up having mode 0444. Next mkdocs tries to overwrite this file with the one from the `theme_dir` and at this point `shutil.copyfile` fails, because that’s how it works. I’m not really sure what to do with that. Probably, catching the exception and adjusting the permissions would make sense.
mkdocs/mkdocs
diff --git a/mkdocs/tests/utils/utils_tests.py b/mkdocs/tests/utils/utils_tests.py index 5ffe24fd..fec697f7 100644 --- a/mkdocs/tests/utils/utils_tests.py +++ b/mkdocs/tests/utils/utils_tests.py @@ -6,6 +6,9 @@ from __future__ import unicode_literals import mock import os import unittest +import tempfile +import shutil +import stat from mkdocs import nav, utils, exceptions from mkdocs.tests.base import dedent @@ -248,3 +251,71 @@ class UtilsTests(unittest.TestCase): config = utils.yaml_load(yaml_src) self.assertTrue(isinstance(config['key'], utils.text_type)) self.assertTrue(isinstance(config['key2'][0], utils.text_type)) + + def test_copy_files(self): + src_paths = [ + 'foo.txt', + 'bar.txt', + 'baz.txt', + ] + dst_paths = [ + 'foo.txt', + 'foo/', # ensure src filename is appended + 'foo/bar/baz.txt' # ensure missing dirs are created + ] + expected = [ + 'foo.txt', + 'foo/bar.txt', + 'foo/bar/baz.txt', + ] + + src_dir = tempfile.mkdtemp() + dst_dir = tempfile.mkdtemp() + + try: + for i, src in enumerate(src_paths): + src = os.path.join(src_dir, src) + with open(src, 'w') as f: + f.write('content') + dst = os.path.join(dst_dir, dst_paths[i]) + utils.copy_file(src, dst) + self.assertTrue(os.path.isfile(os.path.join(dst_dir, expected[i]))) + finally: + shutil.rmtree(src_dir) + shutil.rmtree(dst_dir) + + def test_copy_files_without_permissions(self): + src_paths = [ + 'foo.txt', + 'bar.txt', + 'baz.txt', + ] + expected = [ + 'foo.txt', + 'bar.txt', + 'baz.txt', + ] + + src_dir = tempfile.mkdtemp() + dst_dir = tempfile.mkdtemp() + + try: + for i, src in enumerate(src_paths): + src = os.path.join(src_dir, src) + with open(src, 'w') as f: + f.write('content') + # Set src file to read-only + os.chmod(src, stat.S_IRUSR) + utils.copy_file(src, dst_dir) + self.assertTrue(os.path.isfile(os.path.join(dst_dir, expected[i]))) + self.assertNotEqual(os.stat(src).st_mode, os.stat(os.path.join(dst_dir, expected[i])).st_mode) + # While src was read-only, dst must remain writable + self.assertTrue(os.access(os.path.join(dst_dir, expected[i]), os.W_OK)) + finally: + for src in src_paths: + # Undo read-only so we can delete temp files + src = os.path.join(src_dir, src) + if os.path.exists(src): + os.chmod(src, stat.S_IRUSR | stat.S_IWUSR) + shutil.rmtree(src_dir) + shutil.rmtree(dst_dir)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.16
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "mock" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/project.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
click==8.1.8 coverage==7.8.0 exceptiongroup==1.2.2 importlib_metadata==8.6.1 iniconfig==2.1.0 Jinja2==3.1.6 livereload==2.7.1 Markdown==3.7 MarkupSafe==3.0.2 -e git+https://github.com/mkdocs/mkdocs.git@b8123ffb86da6c2eef114db0c2eea7c6315a281a#egg=mkdocs mock==5.2.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-cov==6.0.0 PyYAML==6.0.2 tomli==2.2.1 tornado==6.4.2 zipp==3.21.0
name: mkdocs channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - click==8.1.8 - coverage==7.8.0 - exceptiongroup==1.2.2 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jinja2==3.1.6 - livereload==2.7.1 - markdown==3.7 - markupsafe==3.0.2 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==6.0.0 - pyyaml==6.0.2 - tomli==2.2.1 - tornado==6.4.2 - zipp==3.21.0 prefix: /opt/conda/envs/mkdocs
[ "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_copy_files_without_permissions" ]
[ "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_create_media_urls", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_create_relative_media_url_sub_index", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_create_relative_media_url_sub_index_windows" ]
[ "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_copy_files", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_get_theme_dir", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_get_theme_dir_importerror", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_get_theme_dir_keyerror", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_get_themes", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_get_themes_error", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_get_themes_warning", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_html_path", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_is_html_file", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_is_markdown_file", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_nest_paths", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_reduce_list", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_unicode_yaml", "mkdocs/tests/utils/utils_tests.py::UtilsTests::test_url_path" ]
[]
BSD 2-Clause "Simplified" License
1,705
[ "mkdocs/utils/__init__.py" ]
[ "mkdocs/utils/__init__.py" ]
Rambatino__CHAID-83
6a29f62c0ca5cee05eaa9ab512071d5048a85123
2017-09-27 19:11:35
17a4cbaed359b644e7ef34a93b44c38a2e24874e
codecov[bot]: # [Codecov](https://codecov.io/gh/Rambatino/CHAID/pull/83?src=pr&el=h1) Report > Merging [#83](https://codecov.io/gh/Rambatino/CHAID/pull/83?src=pr&el=desc) into [master](https://codecov.io/gh/Rambatino/CHAID/commit/6a29f62c0ca5cee05eaa9ab512071d5048a85123?src=pr&el=desc) will **decrease** coverage by `0.48%`. > The diff coverage is `93.33%`. [![Impacted file tree graph](https://codecov.io/gh/Rambatino/CHAID/pull/83/graphs/tree.svg?token=SkOhpCa986&src=pr&height=150&width=650)](https://codecov.io/gh/Rambatino/CHAID/pull/83?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #83 +/- ## ========================================== - Coverage 92.67% 92.18% -0.49% ========================================== Files 7 7 Lines 505 512 +7 ========================================== + Hits 468 472 +4 - Misses 37 40 +3 ``` | [Impacted Files](https://codecov.io/gh/Rambatino/CHAID/pull/83?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [CHAID/stats.py](https://codecov.io/gh/Rambatino/CHAID/pull/83?src=pr&el=tree#diff-Q0hBSUQvc3RhdHMucHk=) | `98.4% <90%> (-0.21%)` | :arrow_down: | | [CHAID/column.py](https://codecov.io/gh/Rambatino/CHAID/pull/83?src=pr&el=tree#diff-Q0hBSUQvY29sdW1uLnB5) | `89.09% <96%> (-0.2%)` | :arrow_down: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/Rambatino/CHAID/pull/83?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/Rambatino/CHAID/pull/83?src=pr&el=footer). Last update [6a29f62...66a2080](https://codecov.io/gh/Rambatino/CHAID/pull/83?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/CHAID/column.py b/CHAID/column.py index ce1a712..444c073 100644 --- a/CHAID/column.py +++ b/CHAID/column.py @@ -3,6 +3,14 @@ from math import isnan from itertools import combinations from .mapping_dict import MappingDict +def is_sorted(ndarr, nan_val=None): + store = [] + for arr in ndarr: + if arr == [] or len(arr) == 1: continue + if nan_val is not None and nan_val in arr: + arr.remove(nan_val) + store.append(arr[-1] - arr[0] == len(arr) - 1) + return all(store) class Column(object): """ @@ -49,10 +57,27 @@ class Column(object): def deep_copy(self): """ - Returns a deep copy. + Returns a deep copy """ raise NotImplementedError + def bell_set(self, collection, ordinal=False): + """ + Calculates the Bell set + """ + if len(collection) == 1: + yield [ collection ] + return + + first = collection[0] + for smaller in self.bell_set(collection[1:]): + for n, subset in enumerate(smaller): + if not ordinal or (ordinal and is_sorted(smaller[:n] + [[ first ] + subset] + smaller[n+1:], self._nan)): + yield smaller[:n] + [[ first ] + subset] + smaller[n+1:] + + if not ordinal or (ordinal and is_sorted([ [ first ] ] + smaller, self._nan)): + yield [ [ first ] ] + smaller + class NominalColumn(Column): """ @@ -124,6 +149,11 @@ class NominalColumn(Column): def possible_groupings(self): return combinations(self._groupings.keys(), 2) + def all_combinations(self): + bell_set = self.bell_set(sorted(list(self._groupings.keys()))) + next(bell_set) + return bell_set + def group(self, x, y): self._groupings[x] += self._groupings[y] del self._groupings[y] @@ -215,6 +245,12 @@ class OrdinalColumn(Column): ] return self._possible_groups.__iter__() + def all_combinations(self): + bell_set = self.bell_set(sorted(list(self._groupings.keys())), True) + next(bell_set) + return bell_set + + def group(self, x, y): self._possible_groups = None if y != self._nan: diff --git a/CHAID/stats.py b/CHAID/stats.py index 6789241..212923d 100644 --- a/CHAID/stats.py +++ b/CHAID/stats.py @@ -76,68 +76,37 @@ class Stats(object): if len(list(ind_var.possible_groupings())) == 0: split.invalid_reason = InvalidSplitReason.PURE_NODE - while next(ind_var.possible_groupings(), None) is not None: - choice, highest_p_join, split_chi = None, None, None - for comb in ind_var.possible_groupings(): - col1_freq = freq[comb[0]] - col2_freq = freq[comb[1]] - - keys = set(col1_freq.keys()).union(col2_freq.keys()) - - n_ij = np.array([ - [col1_freq.get(k, 0) for k in keys], - [col2_freq.get(k, 0) for k in keys] - ]) + choice, highest_p_join, split_chi, dof = None, None, None, None + for comb in ind_var.all_combinations(): + freqs = [ sum( [ cl.Counter(freq[key]) for key in c ], cl.Counter()) for c in comb ] + keys = set(sum([ list(f.keys()) for f in freqs ], [])) - chi, p_split, dof = chisquare(n_ij, dep.weights is not None) - - if choice is None or p_split > highest_p_join or (p_split == highest_p_join and chi > split_chi): - choice, highest_p_join, split_chi = comb, p_split, chi - - invalid_reason = None - sufficient_split = highest_p_join < self.alpha_merge - if not sufficient_split: invalid_reason = InvalidSplitReason.ALPHA_MERGE - - sufficient_split = sufficient_split and all( - # what if a greater p-value on a different grouping would satisfy alpha merge _and_ min_child_node_size? - sum(node_v.values()) >= self.min_child_node_size for node_v in freq.values() + n_ij = np.array( + [ [ col.get(k, 0) for k in keys ] for col in freqs ] ) - if not sufficient_split: invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE - if sufficient_split and len(freq.values()) > 1: - n_ij = np.array([ - [f[dep_val] for dep_val in all_dep] for f in freq.values() - ]) + chi, p_split, dof = chisquare(n_ij, dep.weights is not None) - dof = (n_ij.shape[0] - 1) * (n_ij.shape[1] - 1) - chi, p_split, dof = chisquare(n_ij, dep.weights is not None) + if (choice is None or p_split < highest_p_join or (p_split == highest_p_join and chi > split_chi)) and (n_ij.sum(axis=1) >= self.min_child_node_size).all() and p_split < self.alpha_merge: + choice, highest_p_join, split_chi = comb, p_split, chi - temp_split = Split(i, ind_var.groups(), chi, p_split, dof, split_name=ind_var.name) - better_split = not split.valid() or p_split < split.p or (p_split == split.p and chi > split.score) + temp_split = Split(i, choice, split_chi, highest_p_join, dof, split_name=ind_var.name) + better_split = (not split.valid() or p_split < split.p or (p_split == split.p and chi > split.score)) and choice is not None + if better_split: split, temp_split = temp_split, split - if better_split: - split, temp_split = temp_split, split - - chi_threshold = self.split_threshold * split.score + if split.valid() and choice is not None: + chi_threshold = self.split_threshold * split.score - if temp_split.valid() and temp_split.score >= chi_threshold: - for sur in temp_split.surrogates: - if sur.column_id != i and sur.score >= chi_threshold: - split.surrogates.append(sur) + if temp_split.valid() and temp_split.score >= chi_threshold: + for sur in temp_split.surrogates: + if sur.column_id != i and sur.score >= chi_threshold: + split.surrogates.append(sur) - temp_split.surrogates = [] - split.surrogates.append(temp_split) + temp_split.surrogates = [] + split.surrogates.append(temp_split) - break - else: - split.invalid_reason = invalid_reason + split.sub_split_values(ind[split.column_id].metadata) - ind_var.group(choice[0], choice[1]) - for val, count in freq[choice[1]].items(): - freq[choice[0]][val] += count - del freq[choice[1]] - if split.valid(): - split.sub_split_values(ind[split.column_id].metadata) return split def best_con_split(self, ind, dep): diff --git a/README.md b/README.md index 1982bee..71a197f 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ cols = [ NominalColumn(ndarr[:,1], name='b') NominalColumn(ndarr[:,2], name='c') ] -tree = Tree.from_numpy(cols, NominalColumn(arr, name='d'), {'min_child_node_size': 5}) +tree = Tree(cols, NominalColumn(arr, name='d'), {'min_child_node_size': 5}) >>> tree.print_tree() ([], {1: 5, 2: 5}, ('a', p=0.001565402258, score=10.0, groups=[[1], [2]]), dof=1))
Valid splits discounted when most significant split is generated below base size
Rambatino/CHAID
diff --git a/tests/setup_tests.py b/tests/setup_tests.py index de72e47..c8f7eea 100644 --- a/tests/setup_tests.py +++ b/tests/setup_tests.py @@ -13,7 +13,6 @@ sys.path = [ROOT_FOLDER] + sys.path import CHAID - def islist(a): return isinstance(a, Iterable) and not isinstance(a, str) diff --git a/tests/test_nominal_column.py b/tests/test_nominal_column.py index 34e4916..fc81df9 100644 --- a/tests/test_nominal_column.py +++ b/tests/test_nominal_column.py @@ -120,6 +120,24 @@ def test_fix_metadata_if_passed_in(): nominal = CHAID.NominalColumn(arr, metadata={1.0: 'Cat', 2.0: 'Haz', 3.0: 'Cheezburger'}) assert [nominal.metadata[x] for x in nominal.arr] == ['Cat', 'Haz', 'Cheezburger'] +def test_all_combinations(): + arr = np.array([1.0, 2.0, 3.0, 4.0]) + nominal = CHAID.NominalColumn(arr) + assert [ i for i in nominal.all_combinations()] == [[[0.0], [1.0, 2.0, 3.0]], + [[0.0, 1.0], [2.0, 3.0]], + [[1.0], [0.0, 2.0, 3.0]], + [[0.0], [1.0], [2.0, 3.0]], + [[0.0, 1.0, 2.0], [3.0]], + [[1.0, 2.0], [0.0, 3.0]], + [[0.0], [1.0, 2.0], [3.0]], + [[0.0, 2.0], [1.0, 3.0]], + [[2.0], [0.0, 1.0, 3.0]], + [[0.0], [2.0], [1.0, 3.0]], + [[0.0, 1.0], [2.0], [3.0]], + [[1.0], [0.0, 2.0], [3.0]], + [[1.0], [2.0], [0.0, 3.0]], + [[0.0], [1.0], [2.0], [3.0]]] + class TestDeepCopy(TestCase): """ Test fixture class for deep copy method """ diff --git a/tests/test_ordinal_column.py b/tests/test_ordinal_column.py index 8b7f2ed..60758b9 100644 --- a/tests/test_ordinal_column.py +++ b/tests/test_ordinal_column.py @@ -6,6 +6,36 @@ import numpy as np from numpy import nan from setup_tests import list_ordered_equal, list_unordered_equal, CHAID +def test_all_ordinal_combinations(): + arr = np.array([1.0, 2.0, 3.0, 4.0]) + ordinal = CHAID.OrdinalColumn(arr) + assert [ + i for i in ordinal.all_combinations() + ] == [[[1], [2, 3, 4]], + [[1, 2], [3, 4]], + [[1], [2], [3, 4]], + [[1, 2, 3], [4]], + [[1], [2, 3], [4]], + [[1, 2], [3], [4]], + [[1], [2], [3], [4]]] + +def test_all_ordinal_combinations_with_nan(): + arr = np.array([1.0, 2.0, 3.0, np.nan]) + ordinal = CHAID.OrdinalColumn(arr) + nan_val = np.array([np.nan]).astype(int)[0] + assert [ + i for i in ordinal.all_combinations() + ] == [[[nan_val], [1, 2, 3]], + [[nan_val, 1], [2, 3]], + [[1], [nan_val, 2, 3]], + [[nan_val], [1], [2, 3]], + [[nan_val, 1, 2], [3]], + [[1, 2], [nan_val, 3]], + [[nan_val], [1, 2], [3]], + [[nan_val, 1], [2], [3]], + [[1], [nan_val, 2], [3]], + [[1], [2], [nan_val, 3]], + [[nan_val], [1], [2], [3]]] class TestOrdinalDeepCopy(TestCase): """ Test fixture class for deep copy method """ diff --git a/tests/test_tree.py b/tests/test_tree.py index e411020..1a2cafb 100644 --- a/tests/test_tree.py +++ b/tests/test_tree.py @@ -126,7 +126,7 @@ def test_best_split_with_combination(): assert list_ordered_equal(ndarr, orig_ndarr), 'Calling chaid should have no side affects for original numpy arrays' assert list_ordered_equal(arr, orig_arr), 'Calling chaid should have no side affects for original numpy arrays' assert split.column_id == 0, 'Identifies correct column to split on' - assert list_unordered_equal(split.split_map, [[1], [2], [3]]), 'Correctly identifies catagories' + assert list_unordered_equal(split.split_map, [[1], [2, 3]]), 'Correctly identifies categories' assert list_unordered_equal(split.surrogates, []), 'No surrogates should be generated' assert split.p < 0.015 @@ -138,13 +138,13 @@ def test_new_columns_constructor(): orientation = np.array([0,0,1,1,0,0,1,1,0,0,1,2,2,2,2,2,2,2,2,1]) age = np.array([0,1,1,0,2,2,2,2,1,1,1,0,0,0,0,0,0,0,0,0]) income = np.array([0,0,1,1,2,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0]) - + metadata = {0: '0-5', 1: '6-10', 2: '11-15'} cols = [ - CHAID.OrdinalColumn(orientation, name="orientation"), - CHAID.OrdinalColumn(age, name="age", metadata={0: '0-5', 1: '6-10', 2: '11-15'}), + CHAID.OrdinalColumn(orientation, name="orientation", metadata=metadata), + CHAID.OrdinalColumn(age, name="age", metadata=metadata), ] tree = CHAID.Tree(cols, CHAID.NominalColumn(income), {'min_child_node_size': 1}) - assert tree.tree_store[0].split.groupings == "[['0-5'], ['6-10', '11-15']]" + assert tree.tree_store[0].split.groupings == "[['0-5'], ['6-10'], ['11-15']]" class TestSurrogate(TestCase): @@ -313,7 +313,11 @@ def test_node_predictions(): ndarr = np.transpose(np.vstack([gender])) tree = CHAID.Tree.from_numpy(ndarr, income, alpha_merge=0.9, max_depth=1, min_child_node_size=1, min_parent_node_size=1) - assert (tree.node_predictions() == np.array([1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 3, 3, 3., 3, 3, 3, 3, 3, 2])).all() == True + + assert (tree.node_predictions() == np.array([ + 2.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, + 2.0, 2.0, 2.0, 2.0, 2.0, 1.0 + ])).all() == True class TestTreeGenerated(TestCase): """ Test case class to check that the tree is correcly lazy loaded """ @@ -369,7 +373,7 @@ class TestBugFixes(TestCase): def test_incorrect_weighted_counts(self): """ - Fix bug wherby the weights was using the class weights + Fix bug whereby the weights was using the class weights and not the sliced weights in node() """ tree = CHAID.Tree.from_numpy(self.ndarr, self.arr, alpha_merge=0.999, weights=self.wt, max_depth=5, min_parent_node_size=2, min_child_node_size=0) @@ -389,6 +393,30 @@ class TestBugFixes(TestCase): no_exception = False assert no_exception, 'Raised error while printing the tree' + def test_splits_shouldnt_carry_on_splitting_below_min_child_node_size(self): + """ + Fix bug whereby no splits occur when valid split is segmented below + min_child_node_size threshold + """ + region = np.array([ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 2, 3, 2, 2, 2, + 3, 2, 4, 4, 2, 4, 4, 4, 2, 2, 2, 2, 3, 2, 3, 2, 3, 2, 2, 2]) + age = np.array([ + 3, 4, 4, 3, 2, 4, 2, 3, 3, 2, 2, 3, 4, 3, 4, 2, 2, 3, 2, 3, + 2, 4, 4, 3, 2, 3, 1, 2, 4, 4, 3, 4, 4, 3, 2, 4, 2, 3, 3, 2, + 2, 3, 4, 3, 4, 2, 2, 3, 2, 3, 2, 4, 4, 3, 2, 3, 1, 2, 4, 4]) + gender = np.array([ + 1, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, + 2, 2, 2, 1, 2, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 2, 2, 2, 1, 2, + 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 2, 1, 2, 1, 2, 2]) + lover = np.array([1] * 25 + [0] * 35) + tree = CHAID.Tree.from_numpy( + np.vstack([region, age, gender]).transpose(), + lover, + alpha_merge=0.05 + ) + assert len(tree.tree_store) == 3 class TestStoppingRules(TestCase): """ Testing that stopping rules are being applied correctly """ @@ -403,7 +431,7 @@ class TestStoppingRules(TestCase): Check that minimum child node size causes the tree to terminate correctly """ - tree = CHAID.Tree.from_numpy(self.ndarr, self.arr, alpha_merge=0.999, max_depth=5, min_child_node_size=11) + tree = CHAID.Tree.from_numpy(self.ndarr, self.arr, alpha_merge=0.999, max_depth=5, min_child_node_size=31) assert len(tree.tree_store) == 1 def test_min_child_node_size_does_not_stop_for_unweighted_case(self): @@ -420,7 +448,8 @@ class TestStoppingRules(TestCase): terminate correctly """ tree = CHAID.Tree.from_numpy(self.ndarr, self.arr, alpha_merge=0.999, weights=self.wt, max_depth=5, min_child_node_size=10.7) - assert len(tree.tree_store) == 4 + assert len(tree.tree_store) == 3 + assert round(tree.tree_store[0].split.p, 5) == 0.00029 def test_min_child_node_size_does_not_stop_for_weighted_case(self): """
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 3 }
5.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "codecov", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 -e git+https://github.com/Rambatino/CHAID.git@6a29f62c0ca5cee05eaa9ab512071d5048a85123#egg=CHAID charset-normalizer==2.0.12 codecov==2.1.13 coverage==6.2 Cython==3.0.12 detox==0.19 distlib==0.3.9 dnspython==2.2.1 eventlet==0.33.3 filelock==3.4.1 greenlet==2.0.2 idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 numpy==1.19.5 packaging==21.3 pandas==1.1.5 platformdirs==2.4.0 pluggy==0.13.1 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.27.1 savReaderWriter==3.4.2 scipy==1.5.4 six==1.17.0 toml==0.10.2 tomli==1.2.3 tox==3.6.1 tox-pyenv==1.1.0 treelib==1.7.1 typing_extensions==4.1.1 urllib3==1.26.20 virtualenv==20.17.1 zipp==3.6.0
name: CHAID channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - cython==3.0.12 - detox==0.19 - distlib==0.3.9 - dnspython==2.2.1 - eventlet==0.33.3 - filelock==3.4.1 - greenlet==2.0.2 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - platformdirs==2.4.0 - pluggy==0.13.1 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.27.1 - savreaderwriter==3.4.2 - scipy==1.5.4 - six==1.17.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.6.1 - tox-pyenv==1.1.0 - treelib==1.7.1 - typing-extensions==4.1.1 - urllib3==1.26.20 - virtualenv==20.17.1 - zipp==3.6.0 prefix: /opt/conda/envs/CHAID
[ "tests/test_nominal_column.py::test_all_combinations", "tests/test_ordinal_column.py::test_all_ordinal_combinations", "tests/test_ordinal_column.py::test_all_ordinal_combinations_with_nan", "tests/test_tree.py::test_best_split_with_combination", "tests/test_tree.py::test_new_columns_constructor", "tests/test_tree.py::test_node_predictions", "tests/test_tree.py::TestBugFixes::test_splits_shouldnt_carry_on_splitting_below_min_child_node_size", "tests/test_tree.py::TestStoppingRules::test_min_child_node_size_does_stop_for_weighted_case" ]
[]
[ "tests/test_nominal_column.py::test_chaid_vector_converts_strings", "tests/test_nominal_column.py::test_chaid_vector_with_ints", "tests/test_nominal_column.py::test_chaid_vector_with_ints_and_nan", "tests/test_nominal_column.py::test_chaid_vector_with_floats", "tests/test_nominal_column.py::test_chaid_vector_with_floats_and_nan", "tests/test_nominal_column.py::test_chaid_vector_with_dtype_object", "tests/test_nominal_column.py::test_chaid_vector_with_dtype_object_and_nans", "tests/test_nominal_column.py::test_column_stores_weights", "tests/test_nominal_column.py::test_fix_metadata_if_passed_in", "tests/test_nominal_column.py::TestDeepCopy::test_changing_copy", "tests/test_nominal_column.py::TestDeepCopy::test_deep_copy_does_copy", "tests/test_nominal_column.py::TestDeepCopy::test_metadata", "tests/test_nominal_column.py::TestBugFixes::test_comparison_of_different_object_types", "tests/test_ordinal_column.py::TestOrdinalDeepCopy::test_changing_copy", "tests/test_ordinal_column.py::TestOrdinalDeepCopy::test_deep_copy_does_copy", "tests/test_ordinal_column.py::TestOrdinalDeepCopy::test_metadata", "tests/test_ordinal_column.py::TestOrdinalGrouping::test_groups_after_copy", "tests/test_ordinal_column.py::TestOrdinalGrouping::test_groups_after_grouping", "tests/test_ordinal_column.py::TestOrdinalGrouping::test_possible_groups", "tests/test_ordinal_column.py::TestOrdinalWithObjects::test_groups_after_copy", "tests/test_ordinal_column.py::TestOrdinalWithObjects::test_groups_after_copy_with_nan", "tests/test_ordinal_column.py::TestOrdinalWithObjects::test_groups_after_grouping", "tests/test_ordinal_column.py::TestOrdinalWithObjects::test_groups_grouping_with_nan", "tests/test_ordinal_column.py::TestOrdinalWithObjects::test_possible_groups", "tests/test_ordinal_column.py::TestOrdinalGroupingWithnan::test_groups_after_copy", "tests/test_ordinal_column.py::TestOrdinalGroupingWithnan::test_groups_after_copy_with_nan", "tests/test_ordinal_column.py::TestOrdinalGroupingWithnan::test_groups_after_grouping", "tests/test_ordinal_column.py::TestOrdinalGroupingWithnan::test_groups_grouping_with_nan", "tests/test_ordinal_column.py::TestOrdinalGroupingWithnan::test_possible_groups", "tests/test_ordinal_column.py::TestOrdinalConstructor::test_correctly_subs_floated_metadata", "tests/test_ordinal_column.py::TestOrdinalConstructor::test_correctly_subs_floats_for_ints", "tests/test_ordinal_column.py::TestOrdinalConstructor::test_correctly_subs_nan_values", "tests/test_tree.py::TestClassificationRules::test_all_paths", "tests/test_tree.py::TestClassificationRules::test_single_path", "tests/test_tree.py::test_best_split_unique_values", "tests/test_tree.py::test_spliting_identical_values", "tests/test_tree.py::TestSurrogate::test_surrgate_detection", "tests/test_tree.py::TestSurrogate::test_surrogate_default_min_p", "tests/test_tree.py::test_p_and_chi_values", "tests/test_tree.py::test_p_and_chi_values_when_weighting_applied", "tests/test_tree.py::test_correct_dof", "tests/test_tree.py::test_zero_subbed_weighted_ndarry", "tests/test_tree.py::test_min_child_node_size_is_30", "tests/test_tree.py::test_to_tree_returns_a_tree", "tests/test_tree.py::test_max_depth_returns_correct_invalid_message", "tests/test_tree.py::TestTreeGenerated::test_deletion", "tests/test_tree.py::TestTreeGenerated::test_iter", "tests/test_tree.py::TestTreeGenerated::test_modification", "tests/test_tree.py::TestComplexStructures::test_p_and_chi_values_selectivity", "tests/test_tree.py::TestBugFixes::test_incorrect_weighted_counts", "tests/test_tree.py::TestBugFixes::test_unicode_printing", "tests/test_tree.py::TestStoppingRules::test_min_child_node_size_does_not_stop_for_unweighted_case", "tests/test_tree.py::TestStoppingRules::test_min_child_node_size_does_not_stop_for_weighted_case", "tests/test_tree.py::TestStoppingRules::test_min_child_node_size_does_stop_for_unweighted_case", "tests/test_tree.py::TestContinuousDependentVariable::test_bartlett_significance", "tests/test_tree.py::TestContinuousDependentVariable::test_continuous_dependent_variable", "tests/test_tree.py::TestContinuousDependentVariable::test_continuous_dependent_variable_with_weighting" ]
[]
Apache License 2.0
1,706
[ "CHAID/column.py", "README.md", "CHAID/stats.py" ]
[ "CHAID/column.py", "README.md", "CHAID/stats.py" ]
google__mobly-336
cde39b9a7bba85195e93a36e46676bddf396f8bd
2017-09-28 06:13:18
7e5e62af4ab4537bf619f0ee403c05f004c5baf0
diff --git a/docs/tutorial.md b/docs/tutorial.md index 035f634..59ea651 100644 --- a/docs/tutorial.md +++ b/docs/tutorial.md @@ -56,12 +56,14 @@ if __name__ == '__main__': test_runner.main() ```   -*To execute:* -  - $ python hello_world_test.py -c sample_config.yml -  -*Expect* -  +To execute: + +``` +$ python hello_world_test.py -c sample_config.yml +``` + +*Expect*: + A "Hello World!" toast notification appears on your device's screen.   Within SampleTestBed's `Controllers` section, we used `AndroidDevice: '*'` to tell @@ -106,21 +108,24 @@ if __name__ == '__main__': ```   *To execute:* + +``` +$ python hello_world_test.py -c sample_config.yml --test_case test_bye +```   - $ python hello_world_test.py -c sample_config.yml --test_case test_bye -  -  -*Expect* -  +*Expect*: + A "Goodbye!" toast notification appears on your device's screen.   You can dictate what test cases to execute within a test script and their -execution order, shown below: -  - $ python hello_world_test.py -c sample_config.yml --test_case test_bye test_hello test_bye -  -*Expect* -  +execution order, for example: + +``` +$ python hello_world_test.py -c sample_config.yml --test_case test_bye test_hello test_bye +``` + +*Expect*: + Toast notifications appear on your device's screen in the following order: "Goodbye!", "Hello World!", "Goodbye!".   @@ -184,11 +189,13 @@ TestBeds:   You can choose which one to execute on with the command line argument `--test_bed`: -  - $ python hello_world_test.py -c sample_config.yml --test_bed AbcTestBed -  -*Expect* -  + +``` +$ python hello_world_test.py -c sample_config.yml --test_bed AbcTestBed +``` + +*Expect*: + A "Hello World!" and a "Goodbye!" toast notification appear on your device's screen.   diff --git a/mobly/controllers/android_device.py b/mobly/controllers/android_device.py index e52e5fd..f1f6e9d 100644 --- a/mobly/controllers/android_device.py +++ b/mobly/controllers/android_device.py @@ -723,17 +723,18 @@ class AndroidDevice(object): self, 'Snippet package "%s" has already been loaded under name' ' "%s".' % (package, client_name)) - client = snippet_client.SnippetClient( - package=package, adb_proxy=self.adb, log=self.log) + client = snippet_client.SnippetClient(package=package, ad=self) try: client.start_app_and_connect() except Exception as e: + # Log the stacktrace of `e` as re-raising doesn't preserve trace. + self.log.exception('Failed to start app and connect.') # If errors happen, make sure we clean up before raising. try: client.stop_app() except: self.log.exception( - 'Failed to stop app after failure to launch.') + 'Failed to stop app after failure to start app and connect.') # Raise the error from start app failure. raise e self._snippet_clients[name] = client @@ -836,8 +837,10 @@ class AndroidDevice(object): extra_params = self.adb_logcat_param except AttributeError: extra_params = '' - cmd = '"%s" -s %s logcat -v threadtime %s >> "%s"' % ( - adb.ADB, self.serial, extra_params, logcat_file_path) + cmd = '"%s" -s %s logcat -v threadtime %s >> "%s"' % (adb.ADB, + self.serial, + extra_params, + logcat_file_path) process = utils.start_standing_subprocess(cmd, shell=True) self._adb_logcat_process = process self.adb_logcat_file_path = logcat_file_path @@ -853,12 +856,14 @@ class AndroidDevice(object): utils.stop_standing_subprocess(self._adb_logcat_process) self._adb_logcat_process = None - def take_bug_report(self, test_name, begin_time): + def take_bug_report(self, test_name, begin_time, timetout=300): """Takes a bug report on the device and stores it in a file. Args: test_name: Name of the test method that triggered this bug report. begin_time: Timestamp of when the test started. + timeout: float, the number of seconds to wait for bugreport to + complete, default is 5min. """ new_br = True try: @@ -881,7 +886,8 @@ class AndroidDevice(object): self.wait_for_boot_completion() self.log.info('Taking bugreport for %s.', test_name) if new_br: - out = self.adb.shell('bugreportz').decode('utf-8') + out = self.adb.shell( + 'bugreportz', timeout=timetout).decode('utf-8') if not out.startswith('OK'): raise DeviceError(self, 'Failed to take bugreport: %s' % out) br_out_path = out.split(':')[1].strip() @@ -889,7 +895,8 @@ class AndroidDevice(object): else: # shell=True as this command redirects the stdout to a local file # using shell redirection. - self.adb.bugreport(' > %s' % full_out_path, shell=True) + self.adb.bugreport( + ' > %s' % full_out_path, shell=True, timeout=timetout) self.log.info('Bugreport for %s taken at %s.', test_name, full_out_path) diff --git a/mobly/controllers/android_device_lib/callback_handler.py b/mobly/controllers/android_device_lib/callback_handler.py index 5ef611b..b36cf8e 100644 --- a/mobly/controllers/android_device_lib/callback_handler.py +++ b/mobly/controllers/android_device_lib/callback_handler.py @@ -14,6 +14,7 @@ import time +from mobly.controllers.android_device_lib import errors from mobly.controllers.android_device_lib import snippet_event # The max timeout cannot be larger than the max time the socket waits for a @@ -23,7 +24,7 @@ MAX_TIMEOUT = 60 * 10 DEFAULT_TIMEOUT = 120 # two minutes -class Error(Exception): +class Error(errors.DeviceError): pass @@ -54,11 +55,12 @@ class CallbackHandler(object): ret_value: The direct return value of the async Rpc call. """ - def __init__(self, callback_id, event_client, ret_value, method_name): + def __init__(self, callback_id, event_client, ret_value, method_name, ad): self._id = callback_id self._event_client = event_client self.ret_value = ret_value self._method_name = method_name + self._ad = ad @property def callback_id(self): @@ -82,7 +84,7 @@ class CallbackHandler(object): """ if timeout: if timeout > MAX_TIMEOUT: - raise Error( + raise Error(self._ad, 'Specified timeout %s is longer than max timeout %s.' % (timeout, MAX_TIMEOUT)) # Convert to milliseconds for java side. @@ -92,7 +94,7 @@ class CallbackHandler(object): self._id, event_name, timeout_ms) except Exception as e: if 'EventSnippetException: timeout.' in str(e): - raise TimeoutError( + raise TimeoutError(self._ad, 'Timed out after waiting %ss for event "%s" triggered by' ' %s (%s).' % (timeout, event_name, self._method_name, self._id)) @@ -140,7 +142,7 @@ class CallbackHandler(object): break if predicate(event): return event - raise TimeoutError( + raise TimeoutError(self._ad, 'Timed out after %ss waiting for an "%s" event that satisfies the ' 'predicate "%s".' % (timeout, event_name, predicate.__name__)) diff --git a/mobly/controllers/android_device_lib/errors.py b/mobly/controllers/android_device_lib/errors.py new file mode 100644 index 0000000..c5d889a --- /dev/null +++ b/mobly/controllers/android_device_lib/errors.py @@ -0,0 +1,28 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Module for errors thrown from AndroidDevice object. + +from mobly import signals + + +class Error(signals.ControllerError): + pass + + +class DeviceError(Error): + """Raised for errors specific to an AndroidDevice object.""" + def __init__(self, ad, msg): + new_msg = '%s %s' % (repr(ad), msg) + super(DeviceError, self).__init__(new_msg) diff --git a/mobly/controllers/android_device_lib/jsonrpc_client_base.py b/mobly/controllers/android_device_lib/jsonrpc_client_base.py index efbcdcc..db19f36 100644 --- a/mobly/controllers/android_device_lib/jsonrpc_client_base.py +++ b/mobly/controllers/android_device_lib/jsonrpc_client_base.py @@ -42,6 +42,7 @@ import socket import threading from mobly.controllers.android_device_lib import callback_handler +from mobly.controllers.android_device_lib import errors # UID of the 'unknown' jsonrpc session. Will cause creation of a new session. UNKNOWN_UID = -1 @@ -53,7 +54,7 @@ _SOCKET_CONNECTION_TIMEOUT = 60 _SOCKET_READ_TIMEOUT = callback_handler.MAX_TIMEOUT -class Error(Exception): +class Error(errors.DeviceError): pass @@ -102,17 +103,18 @@ class JsonRpcClientBase(object): uid: (int) The uid of this session. """ - def __init__(self, app_name, log=logging.getLogger()): + def __init__(self, app_name, ad): """ Args: app_name: (str) The user-visible name of the app being communicated with. - log: (logging.Logger) logger to which to send log messages. + ad: (AndroidDevice) The device object associated with a client. """ self.host_port = None self.device_port = None self.app_name = app_name - self.log = log + self._ad = ad + self.log = self._ad.log self.uid = None self._client = None # prevent close errors on connect failure self._conn = None @@ -207,7 +209,8 @@ class JsonRpcClientBase(object): resp = self._cmd(cmd, uid) if not resp: - raise ProtocolError(ProtocolError.NO_RESPONSE_FROM_HANDSHAKE) + raise ProtocolError( + self._ad, ProtocolError.NO_RESPONSE_FROM_HANDSHAKE) result = json.loads(str(resp, encoding='utf8')) if result['status']: self.uid = result['uid'] @@ -262,12 +265,13 @@ class JsonRpcClientBase(object): self._client.flush() response = self._client.readline() if not response: - raise ProtocolError(ProtocolError.NO_RESPONSE_FROM_SERVER) + raise ProtocolError(self._ad, + ProtocolError.NO_RESPONSE_FROM_SERVER) result = json.loads(str(response, encoding="utf8")) if result['error']: - raise ApiError(result['error']) + raise ApiError(self._ad, result['error']) if result['id'] != apiid: - raise ProtocolError(ProtocolError.MISMATCHED_API_ID) + raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID) if result.get('callback') is not None: if self._event_client is None: self._event_client = self._start_event_client() @@ -275,7 +279,8 @@ class JsonRpcClientBase(object): callback_id=result['callback'], event_client=self._event_client, ret_value=result['result'], - method_name=method) + method_name=method, + ad=self._ad) return result['result'] def __getattr__(self, name): diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py index 46477f4..6395b66 100644 --- a/mobly/controllers/android_device_lib/snippet_client.py +++ b/mobly/controllers/android_device_lib/snippet_client.py @@ -18,6 +18,7 @@ import time from mobly import utils from mobly.controllers.android_device_lib import adb +from mobly.controllers.android_device_lib import errors from mobly.controllers.android_device_lib import jsonrpc_client_base _INSTRUMENTATION_RUNNER_PACKAGE = ( @@ -55,10 +56,6 @@ _SETSID_COMMAND = 'setsid' _NOHUP_COMMAND = 'nohup' -class Error(Exception): - pass - - class ProtocolVersionError(jsonrpc_client_base.AppStartError): """Raised when the protocol reported by the snippet is unknown.""" @@ -72,18 +69,18 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase): mobly-snippet-lib, SnippetRunner.java. """ - def __init__(self, package, adb_proxy, log=logging.getLogger()): + def __init__(self, package, ad): """Initializes a SnippetClient. Args: package: (str) The package name of the apk where the snippets are defined. - adb_proxy: (adb.AdbProxy) Adb proxy for running adb commands. - log: (logging.Logger) logger to which to send log messages. + ad: (AndroidDevice) the device object associated with this client. """ - super(SnippetClient, self).__init__(app_name=package, log=log) + super(SnippetClient, self).__init__(app_name=package, ad=ad) self.package = package - self._adb = adb_proxy + self._ad = ad + self._adb = ad.adb self._proc = None def start_app_and_connect(self): @@ -106,12 +103,12 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase): line = self._read_protocol_line() match = re.match('^SNIPPET START, PROTOCOL ([0-9]+) ([0-9]+)$', line) if not match or match.group(1) != '1': - raise ProtocolVersionError(line) + raise ProtocolVersionError(self._ad, line) line = self._read_protocol_line() match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line) if not match: - raise ProtocolVersionError(line) + raise ProtocolVersionError(self._ad, line) self.device_port = int(match.group(1)) # Forward the device port to a new host port, and connect to that port @@ -147,7 +144,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase): self.connect() except: # Failed to connect to app, something went wrong. - raise jsonrpc_client_base.AppRestoreConnectionError( + raise jsonrpc_client_base.AppRestoreConnectionError(self._ad ('Failed to restore app connection for %s at host port %s, ' 'device port %s'), self.package, self.host_port, self.device_port) @@ -170,8 +167,8 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase): utils.stop_standing_subprocess(self._proc) out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8') if 'OK (0 tests)' not in out: - raise Error('Failed to stop existing apk. Unexpected ' - 'output: %s' % out) + raise errors.DeviceError(self._ad, + 'Failed to stop existing apk. Unexpected output: %s' % out) finally: # Always clean up the adb port if self.host_port: @@ -179,8 +176,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase): def _start_event_client(self): """Overrides superclass.""" - event_client = SnippetClient( - package=self.package, adb_proxy=self._adb, log=self.log) + event_client = SnippetClient(package=self.package, ad=self) event_client.host_port = self.host_port event_client.device_port = self.device_port event_client.connect(self.uid, @@ -201,16 +197,15 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase): out = self._adb.shell('pm list package') if not utils.grep('^package:%s$' % self.package, out): raise jsonrpc_client_base.AppStartError( - '%s is not installed on %s' % (self.package, self._adb.serial)) + self._ad, '%s is not installed.' % self.package) # Check that the app is instrumented. out = self._adb.shell('pm list instrumentation') matched_out = utils.grep('^instrumentation:%s/%s' % (self.package, _INSTRUMENTATION_RUNNER_PACKAGE), out) if not matched_out: - raise jsonrpc_client_base.AppStartError( - '%s is installed on %s, but it is not instrumented.' % - (self.package, self._adb.serial)) + raise jsonrpc_client_base.AppStartError(self._ad, + '%s is installed, but it is not instrumented.' % self.package) match = re.search('^instrumentation:(.*)\/(.*) \(target=(.*)\)$', matched_out[0]) target_name = match.group(3) @@ -219,9 +214,9 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase): if target_name != self.package: out = self._adb.shell('pm list package') if not utils.grep('^package:%s$' % target_name, out): - raise jsonrpc_client_base.AppStartError( - 'Instrumentation target %s is not installed on %s' % - (target_name, self._adb.serial)) + raise jsonrpc_client_base.AppStartError(self._ad, + 'Instrumentation target %s is not installed.' % + target_name) def _do_start_app(self, launch_cmd): adb_cmd = [adb.ADB] @@ -246,7 +241,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase): while True: line = self._proc.stdout.readline().decode('utf-8') if not line: - raise jsonrpc_client_base.AppStartError( + raise jsonrpc_client_base.AppStartError(self._ad, 'Unexpected EOF waiting for app to start') # readline() uses an empty string to mark EOF, and a single newline # to mark regular empty lines in the output. Don't move the strip() diff --git a/mobly/utils.py b/mobly/utils.py index 79bf9e2..ac3b1aa 100644 --- a/mobly/utils.py +++ b/mobly/utils.py @@ -357,6 +357,11 @@ def stop_standing_subprocess(proc, kill_signal=signal.SIGTERM): logging.exception('Failed to kill standing subprocess %d', pid) if failed: raise Error('Failed to kill standing subprocesses: %s' % failed) + # Call wait and close pipes on the original Python object so we don't get + # runtime warnings. + proc.stdout.close() + proc.stderr.close() + proc.wait() logging.debug('Stopped standing subprocess %d', pid)
Properly identify exceptions thrown from service modules within AndroidDevice Similar to #105, it'll be good if all the exceptions from service modules like `SnippetClient` is also tagged with the device identity prefix.
google/mobly
diff --git a/tests/lib/mock_android_device.py b/tests/lib/mock_android_device.py index caa587e..567160e 100755 --- a/tests/lib/mock_android_device.py +++ b/tests/lib/mock_android_device.py @@ -68,7 +68,7 @@ class MockAdbProxy(object): self.fail_br = fail_br self.fail_br_before_N = fail_br_before_N - def shell(self, params): + def shell(self, params, timeout=None): if params == "id -u": return b"root" elif params == "bugreportz": @@ -90,7 +90,7 @@ class MockAdbProxy(object): elif params == "sys.boot_completed": return "1" - def bugreport(self, args, shell=False): + def bugreport(self, args, shell=False, timeout=None): expected = os.path.join(logging.log_path, 'AndroidDevice%s' % self.serial, 'BugReports', 'test_something,sometime,%s' % self.serial) diff --git a/tests/mobly/controllers/android_device_lib/callback_handler_test.py b/tests/mobly/controllers/android_device_lib/callback_handler_test.py index 6a3e6d1..5aeacae 100755 --- a/tests/mobly/controllers/android_device_lib/callback_handler_test.py +++ b/tests/mobly/controllers/android_device_lib/callback_handler_test.py @@ -45,7 +45,8 @@ class CallbackHandlerTest(unittest.TestCase): callback_id=MOCK_CALLBACK_ID, event_client=mock_event_client, ret_value=None, - method_name=None) + method_name=None, + ad=mock.Mock()) self.assertEqual(handler.callback_id, MOCK_CALLBACK_ID) with self.assertRaisesRegex(AttributeError, "can't set attribute"): handler.callback_id = 'ha' @@ -58,7 +59,8 @@ class CallbackHandlerTest(unittest.TestCase): callback_id=MOCK_CALLBACK_ID, event_client=mock_event_client, ret_value=None, - method_name=None) + method_name=None, + ad=mock.Mock()) event = handler.waitAndGet('ha') self.assertEqual(event.name, MOCK_RAW_EVENT['name']) self.assertEqual(event.creation_time, MOCK_RAW_EVENT['time']) @@ -70,12 +72,14 @@ class CallbackHandlerTest(unittest.TestCase): java_timeout_msg = ('com.google.android.mobly.snippet.event.' 'EventSnippet$EventSnippetException: timeout.') mock_event_client.eventWaitAndGet = mock.Mock( - side_effect=jsonrpc_client_base.ApiError(java_timeout_msg)) + side_effect=jsonrpc_client_base.ApiError(mock.Mock(), + java_timeout_msg)) handler = callback_handler.CallbackHandler( callback_id=MOCK_CALLBACK_ID, event_client=mock_event_client, ret_value=None, - method_name=None) + method_name=None, + ad=mock.Mock()) expected_msg = 'Timed out after waiting .*s for event "ha" .*' with self.assertRaisesRegex(callback_handler.TimeoutError, expected_msg): @@ -89,7 +93,8 @@ class CallbackHandlerTest(unittest.TestCase): callback_id=MOCK_CALLBACK_ID, event_client=mock_event_client, ret_value=None, - method_name=None) + method_name=None, + ad=mock.Mock()) def some_condition(event): return event.data['successful'] @@ -104,7 +109,8 @@ class CallbackHandlerTest(unittest.TestCase): callback_id=MOCK_CALLBACK_ID, event_client=mock_event_client, ret_value=None, - method_name=None) + method_name=None, + ad=mock.Mock()) expected_msg = ( 'Timed out after 0.01s waiting for an "AsyncTaskResult" event that' ' satisfies the predicate "some_condition".') diff --git a/tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py b/tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py index aea7771..a696d88 100755 --- a/tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py +++ b/tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py @@ -25,7 +25,8 @@ from tests.lib import jsonrpc_client_test_base class FakeRpcClient(jsonrpc_client_base.JsonRpcClientBase): def __init__(self): - super(FakeRpcClient, self).__init__(app_name='FakeRpcClient') + super(FakeRpcClient, self).__init__(app_name='FakeRpcClient', + ad=mock.Mock()) class JsonRpcClientBaseTest(jsonrpc_client_test_base.JsonRpcClientTestBase): diff --git a/tests/mobly/controllers/android_device_lib/snippet_client_test.py b/tests/mobly/controllers/android_device_lib/snippet_client_test.py index 589bc7f..47a25c6 100755 --- a/tests/mobly/controllers/android_device_lib/snippet_client_test.py +++ b/tests/mobly/controllers/android_device_lib/snippet_client_test.py @@ -77,14 +77,14 @@ class SnippetClientTest(jsonrpc_client_test_base.JsonRpcClientTestBase): def test_check_app_installed_fail_app_not_installed(self): sc = self._make_client(MockAdbProxy(apk_not_installed=True)) - expected_msg = '%s is not installed on .*' % MOCK_PACKAGE_NAME + expected_msg = '.* %s is not installed.' % MOCK_PACKAGE_NAME with self.assertRaisesRegex(jsonrpc_client_base.AppStartError, expected_msg): sc._check_app_installed() def test_check_app_installed_fail_not_instrumented(self): sc = self._make_client(MockAdbProxy(apk_not_instrumented=True)) - expected_msg = ('%s is installed on .*, but it is not instrumented.' % + expected_msg = ('.* %s is installed, but it is not instrumented.' % MOCK_PACKAGE_NAME) with self.assertRaisesRegex(jsonrpc_client_base.AppStartError, expected_msg): @@ -92,7 +92,7 @@ class SnippetClientTest(jsonrpc_client_test_base.JsonRpcClientTestBase): def test_check_app_installed_fail_target_not_installed(self): sc = self._make_client(MockAdbProxy(target_not_installed=True)) - expected_msg = ('Instrumentation target %s is not installed on .*' % + expected_msg = ('.* Instrumentation target %s is not installed.' % MOCK_MISSING_PACKAGE_NAME) with self.assertRaisesRegex(jsonrpc_client_base.AppStartError, expected_msg): @@ -321,8 +321,10 @@ class SnippetClientTest(jsonrpc_client_test_base.JsonRpcClientTestBase): def _make_client(self, adb_proxy=None): adb_proxy = adb_proxy or MockAdbProxy() + ad = mock.Mock() + ad.adb = adb_proxy return snippet_client.SnippetClient( - package=MOCK_PACKAGE_NAME, adb_proxy=adb_proxy) + package=MOCK_PACKAGE_NAME, ad=ad) def _setup_mock_instrumentation_cmd(self, mock_start_standing_subprocess, resp_lines): diff --git a/tests/mobly/utils_test.py b/tests/mobly/utils_test.py index cc24cbf..ce36c96 100755 --- a/tests/mobly/utils_test.py +++ b/tests/mobly/utils_test.py @@ -13,6 +13,7 @@ # limitations under the License. import mock +import platform import socket import time from future.tests.base import unittest @@ -29,13 +30,22 @@ class UtilsTest(unittest.TestCase): under mobly.utils. """ + def setUp(self): + system = platform.system() + self.sleep_cmd = 'timeout' if system == 'Windows' else 'sleep' + def test_start_standing_subproc(self): - p = utils.start_standing_subprocess(['sleep', '1']) - p1 = psutil.Process(p.pid) - self.assertTrue(p1.is_running()) + try: + p = utils.start_standing_subprocess([self.sleep_cmd, '0.1']) + p1 = psutil.Process(p.pid) + self.assertTrue(p1.is_running()) + finally: + p.stdout.close() + p.stderr.close() + p.wait() def test_stop_standing_subproc(self): - p = utils.start_standing_subprocess(['sleep', '4']) + p = utils.start_standing_subprocess([self.sleep_cmd, '4']) p1 = psutil.Process(p.pid) utils.stop_standing_subprocess(p) self.assertFalse(p1.is_running())
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_issue_reference", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 6 }
1.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work future==1.0.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/google/mobly.git@cde39b9a7bba85195e93a36e46676bddf396f8bd#egg=mobly mock==1.0.1 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work portpicker==1.6.0 psutil==7.0.0 pytest @ file:///croot/pytest_1738938843180/work pytz==2025.2 PyYAML==6.0.2 timeout-decorator==0.5.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: mobly channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - future==1.0.0 - mock==1.0.1 - portpicker==1.6.0 - psutil==7.0.0 - pytz==2025.2 - pyyaml==6.0.2 - timeout-decorator==0.5.0 prefix: /opt/conda/envs/mobly
[ "tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_callback_id_property", "tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_event_dict_to_snippet_event", "tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_and_get_timeout", "tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_for_event", "tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_for_event_negative", "tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py::JsonRpcClientBaseTest::test_connect_handshake", "tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py::JsonRpcClientBaseTest::test_connect_handshake_unknown_status", "tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py::JsonRpcClientBaseTest::test_connect_timeout", "tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py::JsonRpcClientBaseTest::test_handshake_error", "tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py::JsonRpcClientBaseTest::test_open_timeout_io_error", "tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py::JsonRpcClientBaseTest::test_rpc_call_increment_counter", "tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py::JsonRpcClientBaseTest::test_rpc_callback_response", "tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py::JsonRpcClientBaseTest::test_rpc_error_response", "tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py::JsonRpcClientBaseTest::test_rpc_id_mismatch", "tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py::JsonRpcClientBaseTest::test_rpc_no_response", "tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py::JsonRpcClientBaseTest::test_rpc_send_to_socket", "tests/mobly/controllers/android_device_lib/jsonrpc_client_base_test.py::JsonRpcClientBaseTest::test_rpc_send_to_socket_without_callback", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_app_not_installed", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_not_instrumented", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_target_not_installed", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_normal", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_restore_event_client", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_header_junk", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_no_valid_line", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_persistent_session", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_unknown_protocol", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_crash", "tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_event_client" ]
[]
[ "tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_timeout_value", "tests/mobly/utils_test.py::UtilsTest::test_get_available_port_negative", "tests/mobly/utils_test.py::UtilsTest::test_get_available_port_positive", "tests/mobly/utils_test.py::UtilsTest::test_get_available_port_returns_free_port", "tests/mobly/utils_test.py::UtilsTest::test_start_standing_subproc", "tests/mobly/utils_test.py::UtilsTest::test_stop_standing_subproc" ]
[]
Apache License 2.0
1,707
[ "mobly/controllers/android_device_lib/snippet_client.py", "mobly/controllers/android_device.py", "docs/tutorial.md", "mobly/controllers/android_device_lib/jsonrpc_client_base.py", "mobly/controllers/android_device_lib/callback_handler.py", "mobly/utils.py", "mobly/controllers/android_device_lib/errors.py" ]
[ "mobly/controllers/android_device_lib/snippet_client.py", "mobly/controllers/android_device.py", "docs/tutorial.md", "mobly/controllers/android_device_lib/jsonrpc_client_base.py", "mobly/controllers/android_device_lib/callback_handler.py", "mobly/utils.py", "mobly/controllers/android_device_lib/errors.py" ]
OpenMined__PySyft-257
b0d646922b7529f5198dcc21fc856c7e9e598976
2017-09-28 06:36:51
06ce023225dd613d8fb14ab2046135b93ab22376
diff --git a/syft/__init__.py b/syft/__init__.py index a172d04425..39d8074ab4 100644 --- a/syft/__init__.py +++ b/syft/__init__.py @@ -6,7 +6,7 @@ from syft import test from syft.tensor import equal, TensorBase from syft.math import cumprod, cumsum, ceil, dot, matmul, addmm, addcmul from syft.math import addcdiv, addmv, addbmm, baddbmm, transpose -from syft.math import unsqueeze, zeros, ones, rand, randn +from syft.math import unsqueeze, zeros, ones, rand, randn, mm s = str(he) s += str(nn) @@ -17,3 +17,4 @@ s += str(dot) + str(matmul) + str(addmm) + str(addcmul) + str(addcdiv) s += str(addmv) + str(addbmm) + str(baddbmm) s += str(transpose) + str(rand) + str(randn) + str(ones) + str(zeros) s += str(unsqueeze) +s += str(mm) diff --git a/syft/math.py b/syft/math.py index 4969a9a256..99e36958f3 100644 --- a/syft/math.py +++ b/syft/math.py @@ -9,7 +9,7 @@ from .tensor import _ensure_tensorbase __all__ = [ 'cumprod', 'cumsum', 'ceil', 'dot', 'floor', 'matmul', 'addmm', 'addcmul', 'addcdiv', 'addmv', 'addbmm', 'baddbmm', 'sigmoid', 'unsqueeze', 'tanh', 'relu', - 'zeros', 'ones', 'rand', 'randn' + 'zeros', 'ones', 'rand', 'randn', 'mm' ] @@ -341,3 +341,23 @@ def unsqueeze(tensor1, dim): raise NotImplemented else: return TensorBase(np.expand_dims(tensor1.data, dim)) + + +def mm(tensor1, tensor2): + """ + Performs a matrix multiplication of :attr:`tensor1` and :attr:`tensor2`. + + If :attr:`tensor1` is a `n x m` Tensor, :attr:`tensor2` is a `m x p` Tensor, + output will be a `n x p` Tensor. + + Args: + tensor1 (Tensor): First Tensor to be multiplied + tensor2 (Tensor): Second Tensor to be multiplied""" + + _ensure_tensorbase(tensor1) + _ensure_tensorbase(tensor2) + + if tensor1.encrypted or tensor2.encrypted: + return NotImplemented + else: + return TensorBase(np.array(np.matmul(tensor1.data, tensor2.data))) diff --git a/syft/tensor.py b/syft/tensor.py index 9117e74a13..7e4560fcf4 100644 --- a/syft/tensor.py +++ b/syft/tensor.py @@ -1067,11 +1067,13 @@ class TensorBase(object): else: if tensor.shape() == self.shape(): - tensor2 = np.array([1 if x else 0 for x in np.equal(tensor.data.flatten(), self.data.flatten()).tolist()]) + tensor2 = np.array([1 if x else 0 for x in np.equal( + tensor.data.flatten(), self.data.flatten()).tolist()]) result = tensor2.reshape(self.data.shape) return TensorBase(result) else: - raise ValueError('inconsistent dimensions {} and {}'.format(self.shape(), tensor.shape())) + raise ValueError('inconsistent dimensions {} and {}'.format( + self.shape(), tensor.shape())) def ne_(self, tensor): """ @@ -1118,7 +1120,8 @@ class TensorBase(object): """Computes the histogram of a tensor and Returns it""" if self.encrypted: return NotImplemented - hist, edges = np.histogram(np.array(self.data), bins=bins, range=(min, max)) + hist, edges = np.histogram( + np.array(self.data), bins=bins, range=(min, max)) return TensorBase(hist) def scatter_(self, dim, index, src): @@ -1136,19 +1139,22 @@ class TensorBase(object): if index.data.dtype != np.dtype('int_'): raise TypeError("The values of index must be integers") if self.data.ndim != index.data.ndim: - raise ValueError("Index should have the same number of dimensions as output") + raise ValueError( + "Index should have the same number of dimensions as output") if dim >= self.data.ndim or dim < -self.data.ndim: raise IndexError("dim is out of range") if dim < 0: # Not sure why scatter should accept dim < 0, but that is the behavior in PyTorch's scatter dim = self.data.ndim + dim - idx_xsection_shape = index.data.shape[:dim] + index.data.shape[dim + 1:] + idx_xsection_shape = index.data.shape[:dim] + \ + index.data.shape[dim + 1:] self_xsection_shape = self.data.shape[:dim] + self.data.shape[dim + 1:] if idx_xsection_shape != self_xsection_shape: raise ValueError("Except for dimension " + str(dim) + ", all dimensions of index and output should be the same size") if (index.data >= self.data.shape[dim]).any() or (index.data < 0).any(): - raise IndexError("The values of index must be between 0 and (self.data.shape[dim] -1)") + raise IndexError( + "The values of index must be between 0 and (self.data.shape[dim] -1)") def make_slice(arr, dim, i): slc = [slice(None)] * arr.ndim @@ -1165,7 +1171,8 @@ class TensorBase(object): if not np.isscalar(src): src = _ensure_tensorbase(src) if index.data.shape[dim] > src.data.shape[dim]: - raise IndexError("Dimension " + str(dim) + "of index can not be bigger than that of src ") + raise IndexError("Dimension " + str(dim) + + "of index can not be bigger than that of src ") src_shape = src.data.shape[:dim] + src.data.shape[dim + 1:] if idx_xsection_shape != src_shape: raise ValueError("Except for dimension " + @@ -1173,7 +1180,8 @@ class TensorBase(object): # src_idx is a NumPy advanced index for indexing of elements in the src src_idx = list(idx) src_idx.pop(dim) - src_idx.insert(dim, np.repeat(np.arange(index.data.shape[dim]), np.prod(idx_xsection_shape))) + src_idx.insert(dim, np.repeat( + np.arange(index.data.shape[dim]), np.prod(idx_xsection_shape))) self.data[idx] = src.data[src_idx] else: @@ -1195,7 +1203,8 @@ class TensorBase(object): index = _ensure_tensorbase(index) if self.encrypted or index.encrypted: return NotImplemented - idx_xsection_shape = index.data.shape[:dim] + index.data.shape[dim + 1:] + idx_xsection_shape = index.data.shape[:dim] + \ + index.data.shape[dim + 1:] self_xsection_shape = self.data.shape[:dim] + self.data.shape[dim + 1:] if idx_xsection_shape != self_xsection_shape: raise ValueError("Except for dimension " + str(dim) + @@ -1281,7 +1290,8 @@ class TensorBase(object): return NotImplemented mask_self_iter = np.nditer([mask.data, self.data]) source_iter = np.nditer(source.data) - out_flat = [s if m == 0 else source_iter.__next__().item() for m, s in mask_self_iter] + out_flat = [s if m == 0 else source_iter.__next__().item() + for m, s in mask_self_iter] self.data = np.reshape(out_flat, self.data.shape) return self @@ -1325,13 +1335,26 @@ class TensorBase(object): self.data = np.equal(self.data, _ensure_tensorbase(t).data) return self + def mm(self, tensor2): + """Performs a matrix multiplication of :attr:`tensor1` and :attr:`tensor2`. + + If :attr:`tensor1` is a `n x m` Tensor, :attr:`tensor2` is a `m x p` Tensor, + output will be a `n x p` Tensor. + + Args: + tensor1 (Tensor): First Tensor to be multiplied + tensor2 (Tensor): Second Tensor to be multiplied""" + + return syft.mm(self, tensor2) + def mv(tensormat, tensorvector): """ matrix and vector multiplication """ if tensormat.encrypted or tensorvector.encrypted: raise NotImplemented elif not len(tensorvector.data.shape) == 1: - raise ValueError('Vector dimensions not correct {}'.format(tensorvector.data.shape)) + raise ValueError('Vector dimensions not correct {}'.format( + tensorvector.data.shape)) elif tensorvector.data.shape[0] != tensormat.data.shape[1]: raise ValueError('vector dimensions {} not \ compatible with matrix {} '.format(tensorvector.data.shape, tensormat.data.shape)) @@ -1352,6 +1375,7 @@ def masked_select(tensor, mask): tensor = _ensure_tensorbase(tensor) if tensor.encrypted or mask.encrypted: raise NotImplemented - mask_broadcasted, data_broadcasted = np.broadcast_arrays(mask.data, tensor.data) + mask_broadcasted, data_broadcasted = np.broadcast_arrays( + mask.data, tensor.data) indices = np.where(mask_broadcasted) return TensorBase(data_broadcasted[indices])
Implement Default mm Functionality for Base Tensor Type **User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, mm() should return a new tensor. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation. **Acceptance Criteria:** - If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error. - a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors. - inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator.
OpenMined/PySyft
diff --git a/tests/test_math.py b/tests/test_math.py index 1d66568765..56fc48a1d3 100644 --- a/tests/test_math.py +++ b/tests/test_math.py @@ -207,3 +207,23 @@ class unsqueezeTests(unittest.TestCase): expected_shape.insert(i, 1) self.assertTrue(np.array_equal(out.data.shape, expected_shape)) + + +class mmtest(unittest.TestCase): + def testmm1d(self): + t1 = TensorBase(np.array([2, 3, 4])) + t2 = TensorBase(np.array([3, 4, 5])) + out = syft.mm(t1, t2) + self.assertTrue(np.alltrue(out.data == [38])) + + def testmm2d(self): + t1 = TensorBase(np.array([[1, 2], [1, 2]])) + t2 = TensorBase(np.array([[2, 3], [2, 3]])) + out = syft.mm(t1, t2) + self.assertTrue(np.alltrue(out.data == [[6, 9], [6, 9]])) + + def testmm3d(self): + t1 = TensorBase(np.array([[1, 2], [2, 3], [3, 4]])) + t2 = TensorBase(np.array([[1, 2, 3], [2, 3, 4]])) + out = syft.mm(t1, t2) + self.assertTrue(np.alltrue(out.data == [[5, 8, 11], [8, 13, 18], [11, 18, 25]])) diff --git a/tests/test_tensor.py b/tests/test_tensor.py index fe66a1ff28..363f04380d 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -1066,5 +1066,25 @@ class eqTests(unittest.TestCase): self.assertEqual(t1, [False, True, False, False, False]) +class mm_test(unittest.TestCase): + def testmm1d(self): + t1 = TensorBase(np.array([2, 3, 4])) + t2 = TensorBase(np.array([3, 4, 5])) + out = t1.mm(t2) + self.assertTrue(np.alltrue(out.data == [38])) + + def testmm2d(self): + t1 = TensorBase(np.array([[1, 2], [1, 2]])) + t2 = TensorBase(np.array([[2, 3], [2, 3]])) + out = t1.mm(t2) + self.assertTrue(np.alltrue(out.data == [[6, 9], [6, 9]])) + + def testmm3d(self): + t1 = TensorBase(np.array([[1, 2], [2, 3], [3, 4]])) + t2 = TensorBase(np.array([[1, 2, 3], [2, 3, 4]])) + out = t1.mm(t2) + self.assertTrue(np.alltrue(out.data == [[5, 8, 11], [8, 13, 18], [11, 18, 25]])) + + if __name__ == "__main__": unittest.main()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 3 }
PySyft/hydrogen
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "line_profiler", "pytest", "pytest-flake8" ], "pre_install": [ "apt-get update", "apt-get install -y musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates libmpc-dev" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
args==0.1.0 attrs==22.2.0 certifi==2021.5.30 clint==0.5.1 flake8==5.0.4 importlib-metadata==4.2.0 iniconfig==1.1.1 joblib==1.1.1 line-profiler==4.1.3 mccabe==0.7.0 numpy==1.19.5 packaging==21.3 phe==1.5.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 pyparsing==3.1.4 pyRserve==1.0.4 pytest==7.0.1 pytest-flake8==1.1.1 scikit-learn==0.24.2 scipy==1.5.4 sklearn==0.0 -e git+https://github.com/OpenMined/PySyft.git@b0d646922b7529f5198dcc21fc856c7e9e598976#egg=syft threadpoolctl==3.1.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: PySyft channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - args==0.1.0 - attrs==22.2.0 - clint==0.5.1 - flake8==5.0.4 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - joblib==1.1.1 - line-profiler==4.1.3 - mccabe==0.7.0 - numpy==1.19.5 - packaging==21.3 - phe==1.5.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pyparsing==3.1.4 - pyrserve==1.0.4 - pytest==7.0.1 - pytest-flake8==1.1.1 - scikit-learn==0.24.2 - scipy==1.5.4 - sklearn==0.0 - threadpoolctl==3.1.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/PySyft
[ "tests/test_math.py::mmtest::testmm1d", "tests/test_math.py::mmtest::testmm2d", "tests/test_math.py::mmtest::testmm3d", "tests/test_tensor.py::mm_test::testmm1d", "tests/test_tensor.py::mm_test::testmm2d", "tests/test_tensor.py::mm_test::testmm3d" ]
[]
[ "tests/test_math.py::ConvenienceTests::testOnes", "tests/test_math.py::ConvenienceTests::testRand", "tests/test_math.py::ConvenienceTests::testZeros", "tests/test_math.py::DotTests::testDotFloat", "tests/test_math.py::DotTests::testDotInt", "tests/test_math.py::CeilTests::testCeil", "tests/test_math.py::FloorTests::testFloor", "tests/test_math.py::CumsumTests::testCumsum", "tests/test_math.py::CumprodTests::testCumprod", "tests/test_math.py::SigmoidTests::testSigmoid", "tests/test_math.py::MatmulTests::testMatmul1DFloat", "tests/test_math.py::MatmulTests::testMatmul1DInt", "tests/test_math.py::MatmulTests::testMatmul2DFloat", "tests/test_math.py::MatmulTests::testMatmul2DIdentity", "tests/test_math.py::MatmulTests::testMatmul2DInt", "tests/test_math.py::admmTests::testaddmm1d", "tests/test_math.py::admmTests::testaddmm2d", "tests/test_math.py::addcmulTests::testaddcmul1d", "tests/test_math.py::addcmulTests::testaddcmul2d", "tests/test_math.py::addcdivTests::testaddcdiv1d", "tests/test_math.py::addcdivTests::testaddcdiv2d", "tests/test_math.py::addmv::testaddmv", "tests/test_math.py::addbmmTests::testaddbmm", "tests/test_math.py::baddbmmTests::testbaddbmm", "tests/test_math.py::transposeTests::testTranspose", "tests/test_math.py::unsqueezeTests::testUnsqueeze", "tests/test_tensor.py::DimTests::testAsView", "tests/test_tensor.py::DimTests::testDimOne", "tests/test_tensor.py::DimTests::testResize", "tests/test_tensor.py::DimTests::testResizeAs", "tests/test_tensor.py::DimTests::testSize", "tests/test_tensor.py::DimTests::testView", "tests/test_tensor.py::AddTests::testInplace", "tests/test_tensor.py::AddTests::testScalar", "tests/test_tensor.py::AddTests::testSimple", "tests/test_tensor.py::CeilTests::testCeil", "tests/test_tensor.py::CeilTests::testCeil_", "tests/test_tensor.py::ZeroTests::testZero", "tests/test_tensor.py::FloorTests::testFloor_", "tests/test_tensor.py::SubTests::testInplace", "tests/test_tensor.py::SubTests::testScalar", "tests/test_tensor.py::SubTests::testSimple", "tests/test_tensor.py::MaxTests::testAxis", "tests/test_tensor.py::MaxTests::testNoDim", "tests/test_tensor.py::MultTests::testInplace", "tests/test_tensor.py::MultTests::testScalar", "tests/test_tensor.py::MultTests::testSimple", "tests/test_tensor.py::DivTests::testInplace", "tests/test_tensor.py::DivTests::testScalar", "tests/test_tensor.py::DivTests::testSimple", "tests/test_tensor.py::AbsTests::testabs", "tests/test_tensor.py::AbsTests::testabs_", "tests/test_tensor.py::ShapeTests::testShape", "tests/test_tensor.py::SqrtTests::testSqrt", "tests/test_tensor.py::SqrtTests::testSqrt_", "tests/test_tensor.py::SumTests::testDimIsNotNoneInt", "tests/test_tensor.py::SumTests::testDimNoneInt", "tests/test_tensor.py::EqualTests::testEqOp", "tests/test_tensor.py::EqualTests::testEqual", "tests/test_tensor.py::EqualTests::testIneqOp", "tests/test_tensor.py::EqualTests::testNotEqual", "tests/test_tensor.py::IndexTests::testIndexing", "tests/test_tensor.py::sigmoidTests::testSigmoid", "tests/test_tensor.py::addmm::testaddmm1d", "tests/test_tensor.py::addmm::testaddmm2d", "tests/test_tensor.py::addmm::testaddmm_1d", "tests/test_tensor.py::addmm::testaddmm_2d", "tests/test_tensor.py::addcmulTests::testaddcmul1d", "tests/test_tensor.py::addcmulTests::testaddcmul2d", "tests/test_tensor.py::addcmulTests::testaddcmul_1d", "tests/test_tensor.py::addcmulTests::testaddcmul_2d", "tests/test_tensor.py::addcdivTests::testaddcdiv1d", "tests/test_tensor.py::addcdivTests::testaddcdiv2d", "tests/test_tensor.py::addcdivTests::testaddcdiv_1d", "tests/test_tensor.py::addcdivTests::testaddcdiv_2d", "tests/test_tensor.py::addmvTests::testaddmv", "tests/test_tensor.py::addmvTests::testaddmv_", "tests/test_tensor.py::addbmmTests::testaddbmm", "tests/test_tensor.py::addbmmTests::testaddbmm_", "tests/test_tensor.py::baddbmmTests::testbaddbmm", "tests/test_tensor.py::baddbmmTests::testbaddbmm_", "tests/test_tensor.py::transposeTests::testT", "tests/test_tensor.py::transposeTests::testT_", "tests/test_tensor.py::transposeTests::testTranspose", "tests/test_tensor.py::transposeTests::testTranspose_", "tests/test_tensor.py::unsqueezeTests::testUnsqueeze", "tests/test_tensor.py::unsqueezeTests::testUnsqueeze_", "tests/test_tensor.py::expTests::testexp", "tests/test_tensor.py::expTests::testexp_", "tests/test_tensor.py::fracTests::testfrac", "tests/test_tensor.py::fracTests::testfrac_", "tests/test_tensor.py::rsqrtTests::testrsqrt", "tests/test_tensor.py::rsqrtTests::testrsqrt_", "tests/test_tensor.py::signTests::testsign", "tests/test_tensor.py::signTests::testsign_", "tests/test_tensor.py::numpyTests::testnumpy", "tests/test_tensor.py::reciprocalTests::testreciprocal", "tests/test_tensor.py::reciprocalTests::testrsqrt_", "tests/test_tensor.py::logTests::testLog", "tests/test_tensor.py::logTests::testLog1p", "tests/test_tensor.py::logTests::testLog1p_", "tests/test_tensor.py::logTests::testLog_", "tests/test_tensor.py::clampTests::testClampFloat", "tests/test_tensor.py::clampTests::testClampFloatInPlace", "tests/test_tensor.py::clampTests::testClampInt", "tests/test_tensor.py::clampTests::testClampIntInPlace", "tests/test_tensor.py::cloneTests::testClone", "tests/test_tensor.py::chunkTests::testChunk", "tests/test_tensor.py::chunkTests::testChunkSameSize", "tests/test_tensor.py::gtTests::testGtInPlaceWithNumber", "tests/test_tensor.py::gtTests::testGtInPlaceWithTensor", "tests/test_tensor.py::gtTests::testGtWithNumber", "tests/test_tensor.py::gtTests::testGtWithTensor", "tests/test_tensor.py::gtTests::testWithEncrypted", "tests/test_tensor.py::geTests::testGeInPlaceWithNumber", "tests/test_tensor.py::geTests::testGeInPlaceWithTensor", "tests/test_tensor.py::geTests::testGeWithNumber", "tests/test_tensor.py::geTests::testGeWithTensor", "tests/test_tensor.py::geTests::testWithEncrypted", "tests/test_tensor.py::ltTests::testLtInPlaceWithNumber", "tests/test_tensor.py::ltTests::testLtInPlaceWithTensor", "tests/test_tensor.py::ltTests::testLtWithNumber", "tests/test_tensor.py::ltTests::testLtWithTensor", "tests/test_tensor.py::ltTests::testWithEncrypted", "tests/test_tensor.py::leTests::testLeInPlaceWithNumber", "tests/test_tensor.py::leTests::testLeInPlaceWithTensor", "tests/test_tensor.py::leTests::testLeWithNumber", "tests/test_tensor.py::leTests::testLeWithTensor", "tests/test_tensor.py::leTests::testWithEncrypted", "tests/test_tensor.py::bernoulliTests::testBernoulli", "tests/test_tensor.py::bernoulliTests::testBernoulli_", "tests/test_tensor.py::uniformTests::testUniform", "tests/test_tensor.py::uniformTests::testUniform_", "tests/test_tensor.py::fillTests::testFill_", "tests/test_tensor.py::topkTests::testTopK", "tests/test_tensor.py::tolistTests::testToList", "tests/test_tensor.py::traceTests::testTrace", "tests/test_tensor.py::roundTests::testRound", "tests/test_tensor.py::roundTests::testRound_", "tests/test_tensor.py::repeatTests::testRepeat", "tests/test_tensor.py::powTests::testPow", "tests/test_tensor.py::powTests::testPow_", "tests/test_tensor.py::prodTests::testProd", "tests/test_tensor.py::randomTests::testRandom_", "tests/test_tensor.py::nonzeroTests::testNonZero", "tests/test_tensor.py::cumprodTest::testCumprod", "tests/test_tensor.py::cumprodTest::testCumprod_", "tests/test_tensor.py::splitTests::testSplit", "tests/test_tensor.py::squeezeTests::testSqueeze", "tests/test_tensor.py::expandAsTests::testExpandAs", "tests/test_tensor.py::meanTests::testMean", "tests/test_tensor.py::notEqualTests::testNe", "tests/test_tensor.py::notEqualTests::testNe_", "tests/test_tensor.py::index_selectTests::testIndex_select", "tests/test_tensor.py::gatherTests::testGatherNumerical1", "tests/test_tensor.py::gatherTests::testGatherNumerical2", "tests/test_tensor.py::scatterTests::testScatter_DimOutOfRange", "tests/test_tensor.py::scatterTests::testScatter_IndexOutOfRange", "tests/test_tensor.py::scatterTests::testScatter_IndexType", "tests/test_tensor.py::scatterTests::testScatter_Numerical0", "tests/test_tensor.py::scatterTests::testScatter_Numerical1", "tests/test_tensor.py::scatterTests::testScatter_Numerical2", "tests/test_tensor.py::scatterTests::testScatter_Numerical3", "tests/test_tensor.py::scatterTests::testScatter_Numerical4", "tests/test_tensor.py::scatterTests::testScatter_Numerical5", "tests/test_tensor.py::scatterTests::testScatter_Numerical6", "tests/test_tensor.py::scatterTests::testScatter_index_src_dimension_mismatch", "tests/test_tensor.py::remainderTests::testRemainder", "tests/test_tensor.py::remainderTests::testRemainder_", "tests/test_tensor.py::remainderTests::testRemainder_broadcasting", "tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_1", "tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_braodcasting1", "tests/test_tensor.py::masked_scatter_Tests::testMasked_scatter_braodcasting2", "tests/test_tensor.py::masked_fill_Tests::testMasked_fill_", "tests/test_tensor.py::masked_fill_Tests::testMasked_fill_broadcasting", "tests/test_tensor.py::masked_select_Tests::testMasked_select", "tests/test_tensor.py::masked_select_Tests::testMasked_select_broadcasting1", "tests/test_tensor.py::masked_select_Tests::testMasked_select_broadcasting2", "tests/test_tensor.py::masked_select_Tests::testTensorBase_Masked_select", "tests/test_tensor.py::eqTests::testEqInPlaceWithNumber", "tests/test_tensor.py::eqTests::testEqInPlaceWithTensor", "tests/test_tensor.py::eqTests::testEqWithNumber", "tests/test_tensor.py::eqTests::testEqWithTensor" ]
[]
Apache License 2.0
1,708
[ "syft/tensor.py", "syft/__init__.py", "syft/math.py" ]
[ "syft/tensor.py", "syft/__init__.py", "syft/math.py" ]
box__box-python-sdk-245
ded623f4b6de0530d8f983d3c3d2cafe646c126b
2017-09-28 16:04:07
ded623f4b6de0530d8f983d3c3d2cafe646c126b
boxcla: Verified that @Jeff-Meadows has signed the CLA. Thanks for the pull request!
diff --git a/HISTORY.rst b/HISTORY.rst index 5cf6878..c75fe80 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -105,6 +105,8 @@ Release History - Added a ``downscope_token()`` method to the ``OAuth2`` class. This generates a token that has its permissions reduced to the provided scopes and for the optionally provided ``File`` or ``Folder``. +- Added methods for configuring ``JWTAuth`` from config file: ``JWTAuth.from_settings_file`` and + ``JWTAuth.from_settings_dictionary``. **Other** diff --git a/boxsdk/auth/jwt_auth.py b/boxsdk/auth/jwt_auth.py index 6b27618..3a85130 100644 --- a/boxsdk/auth/jwt_auth.py +++ b/boxsdk/auth/jwt_auth.py @@ -3,6 +3,7 @@ from __future__ import absolute_import, unicode_literals from datetime import datetime, timedelta +import json import random import string @@ -360,3 +361,42 @@ def _normalize_rsa_private_key_passphrase(passphrase): .format(passphrase.__class__.__name__) ) return passphrase + + @classmethod + def from_settings_dictionary(cls, settings_dictionary, **kwargs): + """ + Create an auth instance as defined by the given settings dictionary. + + The dictionary should have the structure of the JSON file downloaded from the Box Developer Console. + + :param settings_dictionary: Dictionary containing settings for configuring app auth. + :type settings_dictionary: `dict` + :return: Auth instance configured as specified by the config dictionary. + :rtype: :class:`JWTAuth` + """ + if 'boxAppSettings' not in settings_dictionary: + raise ValueError('boxAppSettings not present in configuration') + return cls( + client_id=settings_dictionary['boxAppSettings']['clientID'], + client_secret=settings_dictionary['boxAppSettings']['clientSecret'], + enterprise_id=settings_dictionary.get('enterpriseID', None), + jwt_key_id=settings_dictionary['boxAppSettings']['appAuth'].get('publicKeyID', None), + rsa_private_key_data=settings_dictionary['boxAppSettings']['appAuth'].get('privateKey', None), + rsa_private_key_passphrase=settings_dictionary['boxAppSettings']['appAuth'].get('passphrase', None), + **kwargs + ) + + @classmethod + def from_settings_file(cls, settings_file_sys_path, **kwargs): + """ + Create an auth instance as defined by a JSON file downloaded from the Box Developer Console. + See https://developer.box.com/v2.0/docs/authentication-with-jwt for more information. + + :param settings_file_sys_path: Path to the JSON file containing the configuration. + :type settings_file_sys_path: `unicode` + :return: Auth instance configured as specified by the JSON file. + :rtype: :class:`JWTAuth` + """ + with open(settings_file_sys_path) as config_file: + config_dictionary = json.load(config_file) + return cls.from_settings_dictionary(config_dictionary, **kwargs)
Add support to load app config file downloaded from dev console Add support to jwt_auth.py to load a box_config.json file downloaded from the developer console. This would make the python SDK consistent with the node an java sdk's. node: https://github.com/box/box-node-sdk/blob/3dcfe69ed1354ed85b54eb4b1d4e179c999f4f99/lib/box-node-sdk.js#L126 java: https://github.com/box/box-java-sdk/blob/master/src/main/java/com/box/sdk/BoxConfig.java https://github.com/box/box-java-sdk/blob/da3332d77c09f1c75afe93184b2a02bb00e84dd8/src/main/java/com/box/sdk/BoxDeveloperEditionAPIConnection.java#L189
box/box-python-sdk
diff --git a/test/unit/auth/test_jwt_auth.py b/test/unit/auth/test_jwt_auth.py index 830dfe0..4512397 100644 --- a/test/unit/auth/test_jwt_auth.py +++ b/test/unit/auth/test_jwt_auth.py @@ -364,3 +364,100 @@ def test_refresh_instance_sends_post_request_with_correct_params(jwt_auth_init_a enterprise_id = 'fake_enterprise_id' with jwt_auth_init_and_auth_mocks(enterprise_id, 'enterprise', enterprise_id=enterprise_id) as oauth: oauth.refresh(None) + + [email protected]() +def jwt_subclass_that_just_stores_params(): + class StoreParamJWTAuth(JWTAuth): + def __init__(self, **kwargs): + self.kwargs = kwargs + super(StoreParamJWTAuth, self).__init__(**kwargs) + + return StoreParamJWTAuth + + [email protected] +def fake_client_id(): + return 'fake_client_id' + + [email protected] +def fake_client_secret(): + return 'fake_client_secret' + + [email protected] +def fake_enterprise_id(): + return 'fake_enterprise_id' + + [email protected] +def app_config_json_content( + fake_client_id, + fake_client_secret, + fake_enterprise_id, + jwt_key_id, + rsa_private_key_bytes, + rsa_passphrase, +): + template = r""" +{{ + "boxAppSettings": {{ + "clientID": "{client_id}", + "clientSecret": "{client_secret}", + "appAuth": {{ + "publicKeyID": "{jwt_key_id}", + "privateKey": "{private_key}", + "passphrase": {passphrase} + }} + }}, + "enterpriseID": {enterprise_id} +}}""" + return template.format( + client_id=fake_client_id, + client_secret=fake_client_secret, + jwt_key_id=jwt_key_id, + private_key=rsa_private_key_bytes.replace(b"\n", b"\\n").decode(), + passphrase=json.dumps(rsa_passphrase and rsa_passphrase.decode()), + enterprise_id=json.dumps(fake_enterprise_id), + ) + + [email protected]() +def assert_jwt_kwargs_expected( + fake_client_id, + fake_client_secret, + fake_enterprise_id, + jwt_key_id, + rsa_private_key_bytes, + rsa_passphrase, +): + def _assert_jwt_kwargs_expected(jwt_auth): + assert jwt_auth.kwargs['client_id'] == fake_client_id + assert jwt_auth.kwargs['client_secret'] == fake_client_secret + assert jwt_auth.kwargs['enterprise_id'] == fake_enterprise_id + assert jwt_auth.kwargs['jwt_key_id'] == jwt_key_id + assert jwt_auth.kwargs['rsa_private_key_data'] == rsa_private_key_bytes.decode() + assert jwt_auth.kwargs['rsa_private_key_passphrase'] == (rsa_passphrase and rsa_passphrase.decode()) + + return _assert_jwt_kwargs_expected + + +def test_from_config_file( + jwt_subclass_that_just_stores_params, + app_config_json_content, + assert_jwt_kwargs_expected, +): + # pylint:disable=redefined-outer-name + with patch('boxsdk.auth.jwt_auth.open', mock_open(read_data=app_config_json_content), create=True): + jwt_auth_from_config_file = jwt_subclass_that_just_stores_params.from_settings_file('fake_config_file_sys_path') + assert_jwt_kwargs_expected(jwt_auth_from_config_file) + + +def test_from_settings_dictionary( + jwt_subclass_that_just_stores_params, + app_config_json_content, + assert_jwt_kwargs_expected, +): + jwt_auth_from_dictionary = jwt_subclass_that_just_stores_params.from_settings_dictionary(json.loads(app_config_json_content)) + assert_jwt_kwargs_expected(jwt_auth_from_dictionary)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-xdist", "mock", "sqlalchemy", "bottle" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
async-timeout==4.0.2 attrs==22.2.0 bottle==0.13.2 -e git+https://github.com/box/box-python-sdk.git@ded623f4b6de0530d8f983d3c3d2cafe646c126b#egg=boxsdk certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 cryptography==40.0.2 execnet==1.9.0 greenlet==2.0.2 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 mock==5.2.0 packaging==21.3 pluggy==1.0.0 py==1.11.0 pycparser==2.21 PyJWT==2.4.0 pyparsing==3.1.4 pytest==7.0.1 pytest-xdist==3.0.2 redis==4.3.6 requests==2.27.1 requests-toolbelt==1.0.0 six==1.17.0 SQLAlchemy==1.4.54 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: box-python-sdk channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - async-timeout==4.0.2 - attrs==22.2.0 - bottle==0.13.2 - cffi==1.15.1 - charset-normalizer==2.0.12 - cryptography==40.0.2 - execnet==1.9.0 - greenlet==2.0.2 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - mock==5.2.0 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pycparser==2.21 - pyjwt==2.4.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-xdist==3.0.2 - redis==4.3.6 - requests==2.27.1 - requests-toolbelt==1.0.0 - six==1.17.0 - sqlalchemy==1.4.54 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/box-python-sdk
[ "test/unit/auth/test_jwt_auth.py::test_from_config_file[None]", "test/unit/auth/test_jwt_auth.py::test_from_config_file[strong_password]", "test/unit/auth/test_jwt_auth.py::test_from_settings_dictionary[None]", "test/unit/auth/test_jwt_auth.py::test_from_settings_dictionary[strong_password]" ]
[]
[ "test/unit/auth/test_jwt_auth.py::test_jwt_auth_init_raises_type_error_unless_exactly_one_of_rsa_private_key_file_or_data_is_given[None-None-None]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_init_raises_type_error_unless_exactly_one_of_rsa_private_key_file_or_data_is_given[None-fake", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_init_raises_type_error_if_rsa_private_key_data_has_unexpected_type[None-key_data0]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_init_raises_type_error_if_rsa_private_key_data_has_unexpected_type[None-\\u0192\\xf8\\xf8]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_init_accepts_rsa_private_key_data[None-BytesIO]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_init_accepts_rsa_private_key_data[None-str]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_init_accepts_rsa_private_key_data[None-bytes]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_init_accepts_rsa_private_key_data[None-RSAPrivateKey]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_init_accepts_rsa_private_key_data[strong_password-BytesIO]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_init_accepts_rsa_private_key_data[strong_password-str]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_init_accepts_rsa_private_key_data[strong_password-bytes]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_init_accepts_rsa_private_key_data[strong_password-RSAPrivateKey]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS256-None-False-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS256-None-False-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS256-None-False-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS256-None-True-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS256-None-True-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS256-None-True-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS256-strong_password-False-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS256-strong_password-False-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS256-strong_password-False-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS256-strong_password-True-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS256-strong_password-True-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS256-strong_password-True-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS512-None-False-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS512-None-False-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS512-None-False-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS512-None-True-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS512-None-True-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS512-None-True-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS512-strong_password-False-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS512-strong_password-False-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS512-strong_password-False-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS512-strong_password-True-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS512-strong_password-True-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_authenticates_with_user_if_enterprise_id_and_user_both_passed_to_constructor[RS512-strong_password-True-128]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS256-None-False-authenticate_user]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS256-None-False-authenticate_instance]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS256-None-True-authenticate_user]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS256-None-True-authenticate_instance]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS256-strong_password-False-authenticate_user]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS256-strong_password-False-authenticate_instance]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS256-strong_password-True-authenticate_user]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS256-strong_password-True-authenticate_instance]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS512-None-False-authenticate_user]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS512-None-False-authenticate_instance]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS512-None-True-authenticate_user]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS512-None-True-authenticate_instance]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS512-strong_password-False-authenticate_user]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS512-strong_password-False-authenticate_instance]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS512-strong_password-True-authenticate_user]", "test/unit/auth/test_jwt_auth.py::test_authenticate_raises_value_error_if_sub_was_never_given[RS512-strong_password-True-authenticate_instance]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_constructor_raises_type_error_if_user_is_unsupported_type[RS256-None-False]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_constructor_raises_type_error_if_user_is_unsupported_type[RS256-None-True]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_constructor_raises_type_error_if_user_is_unsupported_type[RS256-strong_password-False]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_constructor_raises_type_error_if_user_is_unsupported_type[RS256-strong_password-True]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_constructor_raises_type_error_if_user_is_unsupported_type[RS512-None-False]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_constructor_raises_type_error_if_user_is_unsupported_type[RS512-None-True]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_constructor_raises_type_error_if_user_is_unsupported_type[RS512-strong_password-False]", "test/unit/auth/test_jwt_auth.py::test_jwt_auth_constructor_raises_type_error_if_user_is_unsupported_type[RS512-strong_password-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_raises_type_error_if_user_is_unsupported_type[RS256-None-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_raises_type_error_if_user_is_unsupported_type[RS256-None-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_raises_type_error_if_user_is_unsupported_type[RS256-strong_password-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_raises_type_error_if_user_is_unsupported_type[RS256-strong_password-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_raises_type_error_if_user_is_unsupported_type[RS512-None-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_raises_type_error_if_user_is_unsupported_type[RS512-None-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_raises_type_error_if_user_is_unsupported_type[RS512-strong_password-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_raises_type_error_if_user_is_unsupported_type[RS512-strong_password-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-None-False-16-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-None-False-16-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-None-False-32-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-None-False-32-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-None-False-128-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-None-False-128-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-None-True-16-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-None-True-16-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-None-True-32-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-None-True-32-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-None-True-128-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-None-True-128-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-strong_password-False-16-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-strong_password-False-16-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-strong_password-False-32-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-strong_password-False-32-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-strong_password-False-128-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-strong_password-False-128-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-strong_password-True-16-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-strong_password-True-16-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-strong_password-True-32-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-strong_password-True-32-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-strong_password-True-128-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS256-strong_password-True-128-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-None-False-16-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-None-False-16-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-None-False-32-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-None-False-32-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-None-False-128-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-None-False-128-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-None-True-16-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-None-True-16-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-None-True-32-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-None-True-32-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-None-True-128-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-None-True-128-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-strong_password-False-16-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-strong_password-False-16-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-strong_password-False-32-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-strong_password-False-32-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-strong_password-False-128-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-strong_password-False-128-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-strong_password-True-16-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-strong_password-True-16-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-strong_password-True-32-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-strong_password-True-32-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-strong_password-True-128-None]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_saves_user_id_for_future_calls[RS512-strong_password-True-128-fake_user_id_1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_raises_value_error_if_different_enterprise_id_is_given[RS256-None-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_raises_value_error_if_different_enterprise_id_is_given[RS256-None-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_raises_value_error_if_different_enterprise_id_is_given[RS256-strong_password-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_raises_value_error_if_different_enterprise_id_is_given[RS256-strong_password-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_raises_value_error_if_different_enterprise_id_is_given[RS512-None-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_raises_value_error_if_different_enterprise_id_is_given[RS512-None-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_raises_value_error_if_different_enterprise_id_is_given[RS512-strong_password-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_raises_value_error_if_different_enterprise_id_is_given[RS512-strong_password-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS256-None-False-16]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS256-None-False-32]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS256-None-False-128]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS256-None-True-16]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS256-None-True-32]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS256-None-True-128]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS256-strong_password-False-16]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS256-strong_password-False-32]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS256-strong_password-False-128]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS256-strong_password-True-16]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS256-strong_password-True-32]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS256-strong_password-True-128]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS512-None-False-16]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS512-None-False-32]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS512-None-False-128]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS512-None-True-16]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS512-None-True-32]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS512-None-True-128]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS512-strong_password-False-16]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS512-strong_password-False-32]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS512-strong_password-False-128]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS512-strong_password-True-16]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS512-strong_password-True-32]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_saves_enterprise_id_for_future_calls[RS512-strong_password-True-128]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-16-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-16-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-16-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-16-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-16-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-16-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-32-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-32-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-32-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-32-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-32-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-32-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-128-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-128-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-128-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-128-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-128-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-False-128-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-16-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-16-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-16-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-16-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-16-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-16-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-32-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-32-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-32-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-32-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-32-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-32-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-128-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-128-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-128-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-128-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-128-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-None-True-128-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-16-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-16-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-16-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-16-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-16-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-16-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-32-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-32-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-32-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-32-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-32-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-32-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-128-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-128-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-128-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-128-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-128-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-False-128-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-16-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-16-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-16-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-16-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-16-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-16-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-32-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-32-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-32-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-32-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-32-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-32-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-128-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-128-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-128-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-128-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-128-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS256-strong_password-True-128-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-16-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-16-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-16-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-16-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-16-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-16-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-32-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-32-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-32-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-32-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-32-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-32-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-128-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-128-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-128-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-128-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-128-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-False-128-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-16-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-16-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-16-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-16-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-16-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-16-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-32-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-32-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-32-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-32-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-32-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-32-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-128-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-128-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-128-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-128-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-128-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-None-True-128-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-16-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-16-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-16-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-16-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-16-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-16-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-32-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-32-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-32-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-32-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-32-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-32-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-128-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-128-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-128-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-128-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-128-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-False-128-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-16-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-16-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-16-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-16-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-16-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-16-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-32-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-32-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-32-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-32-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-32-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-32-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-128-fake_user_id-False0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-128-fake_user_id-True0]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-128-fake_user_id-False1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-128-fake_user_id-True1]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-128-user4-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_user_sends_post_request_with_correct_params[RS512-strong_password-True-128-user5-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-False-16-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-False-16-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-False-16-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-False-32-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-False-32-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-False-32-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-False-128-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-False-128-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-False-128-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-True-16-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-True-16-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-True-16-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-True-32-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-True-32-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-True-32-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-True-128-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-True-128-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-None-True-128-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-False-16-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-False-16-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-False-16-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-False-32-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-False-32-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-False-32-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-False-128-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-False-128-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-False-128-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-True-16-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-True-16-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-True-16-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-True-32-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-True-32-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-True-32-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-True-128-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-True-128-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS256-strong_password-True-128-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-False-16-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-False-16-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-False-16-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-False-32-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-False-32-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-False-32-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-False-128-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-False-128-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-False-128-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-True-16-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-True-16-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-True-16-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-True-32-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-True-32-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-True-32-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-True-128-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-True-128-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-None-True-128-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-False-16-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-False-16-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-False-16-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-False-32-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-False-32-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-False-32-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-False-128-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-False-128-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-False-128-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-True-16-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-True-16-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-True-16-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-True-32-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-True-32-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-True-32-True-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-True-128-True-False]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-True-128-False-True]", "test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[RS512-strong_password-True-128-True-True]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS256-None-False-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS256-None-False-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS256-None-False-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS256-None-True-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS256-None-True-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS256-None-True-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS256-strong_password-False-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS256-strong_password-False-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS256-strong_password-False-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS256-strong_password-True-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS256-strong_password-True-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS256-strong_password-True-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS512-None-False-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS512-None-False-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS512-None-False-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS512-None-True-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS512-None-True-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS512-None-True-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS512-strong_password-False-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS512-strong_password-False-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS512-strong_password-False-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS512-strong_password-True-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS512-strong_password-True-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[RS512-strong_password-True-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS256-None-False-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS256-None-False-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS256-None-False-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS256-None-True-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS256-None-True-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS256-None-True-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS256-strong_password-False-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS256-strong_password-False-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS256-strong_password-False-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS256-strong_password-True-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS256-strong_password-True-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS256-strong_password-True-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS512-None-False-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS512-None-False-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS512-None-False-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS512-None-True-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS512-None-True-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS512-None-True-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS512-strong_password-False-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS512-strong_password-False-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS512-strong_password-False-128]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS512-strong_password-True-16]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS512-strong_password-True-32]", "test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[RS512-strong_password-True-128]" ]
[]
Apache License 2.0
1,709
[ "HISTORY.rst", "boxsdk/auth/jwt_auth.py" ]
[ "HISTORY.rst", "boxsdk/auth/jwt_auth.py" ]
Azure__msrest-for-python-53
bad8585bcbe5f92f3b2c892c8b373ee367dff70f
2017-09-28 17:58:23
24deba7a7a9e335314058ec2d0b39a710f61be60
codecov-io: # [Codecov](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=h1) Report > Merging [#53](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=desc) into [master](https://codecov.io/gh/Azure/msrest-for-python/commit/bad8585bcbe5f92f3b2c892c8b373ee367dff70f?src=pr&el=desc) will **not change** coverage. > The diff coverage is `n/a`. [![Impacted file tree graph](https://codecov.io/gh/Azure/msrest-for-python/pull/53/graphs/tree.svg?token=Lny6ZO1MUF&src=pr&width=650&height=150)](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #53 +/- ## ======================================= Coverage 77.77% 77.77% ======================================= Files 260 260 Lines 12696 12696 ======================================= Hits 9874 9874 Misses 2822 2822 ``` | [Impacted Files](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [...rmat\_test\_service/operations/queries\_operations.py](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree#diff-dGVzdC92YW5pbGxhL0V4cGVjdGVkL0FjY2VwdGFuY2VUZXN0cy9VcmxNdWx0aUNvbGxlY3Rpb25Gb3JtYXQvYXV0b19yZXN0X3VybF9tdXRsaV9jb2xsZWN0aW9uX2Zvcm1hdF90ZXN0X3NlcnZpY2Uvb3BlcmF0aW9ucy9xdWVyaWVzX29wZXJhdGlvbnMucHk=) | `76.78% <0%> (ø)` | :arrow_up: | | [...ion\_test\_service/operations/duration\_operations.py](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree#diff-dGVzdC92YW5pbGxhL0V4cGVjdGVkL0FjY2VwdGFuY2VUZXN0cy9Cb2R5RHVyYXRpb24vYXV0b19yZXN0X2R1cmF0aW9uX3Rlc3Rfc2VydmljZS9vcGVyYXRpb25zL2R1cmF0aW9uX29wZXJhdGlvbnMucHk=) | `76.62% <0%> (ø)` | :arrow_up: | | [...ger\_bat\_byte\_service/operations/byte\_operations.py](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree#diff-dGVzdC92YW5pbGxhL0V4cGVjdGVkL0FjY2VwdGFuY2VUZXN0cy9Cb2R5Qnl0ZS9hdXRvX3Jlc3Rfc3dhZ2dlcl9iYXRfYnl0ZV9zZXJ2aWNlL29wZXJhdGlvbnMvYnl0ZV9vcGVyYXRpb25zLnB5) | `76.84% <0%> (ø)` | :arrow_up: | | [...rvice/operations/http\_client\_failure\_operations.py](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree#diff-dGVzdC92YW5pbGxhL0V4cGVjdGVkL0FjY2VwdGFuY2VUZXN0cy9IdHRwL2F1dG9fcmVzdF9odHRwX2luZnJhc3RydWN0dXJlX3Rlc3Rfc2VydmljZS9vcGVyYXRpb25zL2h0dHBfY2xpZW50X2ZhaWx1cmVfb3BlcmF0aW9ucy5weQ==) | `71.54% <0%> (ø)` | :arrow_up: | | [...ed\_host\_test\_client/operations/paths\_operations.py](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree#diff-dGVzdC92YW5pbGxhL0V4cGVjdGVkL0FjY2VwdGFuY2VUZXN0cy9DdXN0b21CYXNlVXJpL2F1dG9fcmVzdF9wYXJhbWV0ZXJpemVkX2hvc3RfdGVzdF9jbGllbnQvb3BlcmF0aW9ucy9wYXRoc19vcGVyYXRpb25zLnB5) | `83.33% <0%> (ø)` | :arrow_up: | | [...nal\_test\_service/operations/implicit\_operations.py](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree#diff-dGVzdC92YW5pbGxhL0V4cGVjdGVkL0FjY2VwdGFuY2VUZXN0cy9SZXF1aXJlZE9wdGlvbmFsL2F1dG9fcmVzdF9yZXF1aXJlZF9vcHRpb25hbF90ZXN0X3NlcnZpY2Uvb3BlcmF0aW9ucy9pbXBsaWNpdF9vcGVyYXRpb25zLnB5) | `52.5% <0%> (ø)` | :arrow_up: | | [...ionary\_service/operations/dictionary\_operations.py](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree#diff-dGVzdC92YW5pbGxhL0V4cGVjdGVkL0FjY2VwdGFuY2VUZXN0cy9Cb2R5RGljdGlvbmFyeS9hdXRvX3Jlc3Rfc3dhZ2dlcl9iYV90ZGljdGlvbmFyeV9zZXJ2aWNlL29wZXJhdGlvbnMvZGljdGlvbmFyeV9vcGVyYXRpb25zLnB5) | `74.49% <0%> (ø)` | :arrow_up: | | [...e\_test\_service/operations/http\_retry\_operations.py](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree#diff-dGVzdC92YW5pbGxhL0V4cGVjdGVkL0FjY2VwdGFuY2VUZXN0cy9IdHRwL2F1dG9fcmVzdF9odHRwX2luZnJhc3RydWN0dXJlX3Rlc3Rfc2VydmljZS9vcGVyYXRpb25zL2h0dHBfcmV0cnlfb3BlcmF0aW9ucy5weQ==) | `72.46% <0%> (ø)` | :arrow_up: | | [...t\_service/operations/datetimerfc1123\_operations.py](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree#diff-dGVzdC92YW5pbGxhL0V4cGVjdGVkL0FjY2VwdGFuY2VUZXN0cy9Cb2R5RGF0ZVRpbWVSZmMxMTIzL2F1dG9fcmVzdF9yZmMxMTIzX2RhdGVfdGltZV90ZXN0X3NlcnZpY2Uvb3BlcmF0aW9ucy9kYXRldGltZXJmYzExMjNfb3BlcmF0aW9ucy5weQ==) | `74.39% <0%> (ø)` | :arrow_up: | | [...test\_service/operations/polymorphism\_operations.py](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree#diff-dGVzdC92YW5pbGxhL0V4cGVjdGVkL0FjY2VwdGFuY2VUZXN0cy9Cb2R5Q29tcGxleC9hdXRvX3Jlc3RfY29tcGxleF90ZXN0X3NlcnZpY2Uvb3BlcmF0aW9ucy9wb2x5bW9ycGhpc21fb3BlcmF0aW9ucy5weQ==) | `71.42% <0%> (ø)` | :arrow_up: | | ... and [45 more](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=tree-more) | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=footer). Last update [bad8585...42a6a8b](https://codecov.io/gh/Azure/msrest-for-python/pull/53?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/msrest/configuration.py b/msrest/configuration.py index 7b47b39..4b46b56 100644 --- a/msrest/configuration.py +++ b/msrest/configuration.py @@ -44,6 +44,19 @@ from .pipeline import ( from .version import msrest_version +def default_session_configuration_callback(session, global_config, local_config, **kwargs): + """Configuration callback if you need to change default session configuration. + + :param requests.Session session: The session. + :param Configuration global_config: The global configuration. + :param dict local_config: The on-the-fly configuration passed on the call. + :param dict kwargs: The current computed values for session.request method. + :return: Must return kwargs, to be passed to session.request. If None is return, initial kwargs will be used. + :rtype: dict + """ + return kwargs + + class Configuration(object): """Client configuration. @@ -79,6 +92,8 @@ class Configuration(object): # - kwargs['msrest']['session'] with the current session self.hooks = [] + self.session_configuration_callback = default_session_configuration_callback + self._config = configparser.ConfigParser() self._config.optionxform = str diff --git a/msrest/serialization.py b/msrest/serialization.py index 1902ca2..cc03063 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -350,6 +350,7 @@ class Serializer(object): } self.dependencies = dict(classes) if classes else {} self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True def _serialize(self, target_obj, data_type=None, **kwargs): """Serialize data into a string according to type. @@ -440,9 +441,10 @@ class Serializer(object): raise_with_traceback( SerializationError, "Unable to build a model: "+str(err), err) - errors = _recursive_validate(data_type, data) - if errors: - raise errors[0] + if self.client_side_validation: + errors = _recursive_validate(data_type, data) + if errors: + raise errors[0] return self._serialize(data, data_type, **kwargs) def url(self, name, data, data_type, **kwargs): @@ -454,7 +456,8 @@ class Serializer(object): :raises: TypeError if serialization fails. :raises: ValueError if data is None """ - data = self.validate(data, name, required=True, **kwargs) + if self.client_side_validation: + data = self.validate(data, name, required=True, **kwargs) try: output = self.serialize_data(data, data_type, **kwargs) if data_type == 'bool': @@ -478,7 +481,8 @@ class Serializer(object): :raises: TypeError if serialization fails. :raises: ValueError if data is None """ - data = self.validate(data, name, required=True, **kwargs) + if self.client_side_validation: + data = self.validate(data, name, required=True, **kwargs) try: if data_type in ['[str]']: data = ["" if d is None else d for d in data] @@ -504,7 +508,8 @@ class Serializer(object): :raises: TypeError if serialization fails. :raises: ValueError if data is None """ - data = self.validate(data, name, required=True, **kwargs) + if self.client_side_validation: + data = self.validate(data, name, required=True, **kwargs) try: if data_type in ['[str]']: data = ["" if d is None else d for d in data] diff --git a/msrest/service_client.py b/msrest/service_client.py index d0e6fdb..eed50c5 100644 --- a/msrest/service_client.py +++ b/msrest/service_client.py @@ -145,6 +145,11 @@ class ServiceClient(object): for protocol in self._protocols: session.mount(protocol, requests.adapters.HTTPAdapter(max_retries=max_retries)) + + output_kwargs = self.config.session_configuration_callback(session, self.config, config, **kwargs) + if output_kwargs is not None: + kwargs = output_kwargs + return kwargs def send_formdata(self, request, headers=None, content=None, **config):
Add support to disable validation on the runtime side Should be a flag in configuration to disable runtime validation. This flag should be default to `client-side-validation` value from Autorest: https://github.com/Azure/autorest/issues/1583#issuecomment-311142690 FYI @matthchr
Azure/msrest-for-python
diff --git a/tests/test_client.py b/tests/test_client.py index 8e73444..ee10d48 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -50,6 +50,22 @@ class TestServiceClient(unittest.TestCase): self.creds = mock.create_autospec(OAuthTokenAuthentication) return super(TestServiceClient, self).setUp() + def test_session_callback(self): + + client = ServiceClient(self.creds, self.cfg) + local_session = requests.Session() + + def callback(session, global_config, local_config, **kwargs): + self.assertIs(session, local_session) + self.assertIs(global_config, self.cfg) + self.assertTrue(local_config["test"]) + return {'used_callback': True} + + self.cfg.session_configuration_callback = callback + + output_kwargs = client._configure_session(local_session, **{"test": True}) + self.assertTrue(output_kwargs['used_callback']) + def test_client_request(self): client = ServiceClient(self.creds, self.cfg) diff --git a/tests/test_serialization.py b/tests/test_serialization.py index c6cf07d..4d6a80a 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -172,6 +172,32 @@ class TestRuntimeSerialized(unittest.TestCase): self.s = Serializer({'TestObj': self.TestObj}) return super(TestRuntimeSerialized, self).setUp() + def test_validation_flag(self): + s = Serializer() + s.client_side_validation = True + + with self.assertRaises(ValidationError): + s.query("filter", "", "str", min_length=666) + with self.assertRaises(ValidationError): + s.url("filter", "", "str", min_length=666) + with self.assertRaises(ValidationError): + s.header("filter", "", "str", min_length=666) + + test_obj = self.TestObj() + self.TestObj._validation = { + 'attr_b': {'required': True}, + } + test_obj.attr_b = None + + with self.assertRaises(ValidationError): + self.s.body(test_obj, 'TestObj') + + s.client_side_validation = False + s.query("filter", "", "str", min_length=666) + s.url("filter", "", "str", min_length=666) + s.header("filter", "", "str", min_length=666) + s.body(test_obj, 'TestObj') + def test_serialize_direct_model(self): testobj = self.TestObj() testobj.attr_a = "myid"
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 3 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "dev_requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 charset-normalizer==2.0.12 idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work isodate==0.6.1 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work -e git+https://github.com/Azure/msrest-for-python.git@bad8585bcbe5f92f3b2c892c8b373ee367dff70f#egg=msrest oauthlib==3.2.2 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 requests==2.27.1 requests-oauthlib==2.0.0 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: msrest-for-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - charset-normalizer==2.0.12 - idna==3.10 - isodate==0.6.1 - oauthlib==3.2.2 - requests==2.27.1 - requests-oauthlib==2.0.0 - six==1.17.0 - urllib3==1.26.20 prefix: /opt/conda/envs/msrest-for-python
[ "tests/test_client.py::TestServiceClient::test_session_callback", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_flag" ]
[]
[ "tests/test_client.py::TestServiceClient::test_client_formdata_send", "tests/test_client.py::TestServiceClient::test_client_header", "tests/test_client.py::TestServiceClient::test_client_request", "tests/test_client.py::TestServiceClient::test_client_send", "tests/test_client.py::TestServiceClient::test_format_data", "tests/test_client.py::TestServiceClient::test_format_url", "tests/test_serialization.py::TestModelDeserialization::test_response", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_without_attr_map", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_invalid_json", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality" ]
[]
MIT License
1,710
[ "msrest/service_client.py", "msrest/configuration.py", "msrest/serialization.py" ]
[ "msrest/service_client.py", "msrest/configuration.py", "msrest/serialization.py" ]
Azure__msrest-for-python-54
36172c1011c1a6b62eb57f7608ef571b71747a1a
2017-09-28 18:30:24
24deba7a7a9e335314058ec2d0b39a710f61be60
diff --git a/msrest/serialization.py b/msrest/serialization.py index cc03063..61e811f 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -623,6 +623,8 @@ class Serializer(object): in the iterable into a combined string. Default is 'None'. :rtype: list, str """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") serialized = [] for d in data: try:
Should refuse a string as a valid list of string Ends up in the portal as ['a','b','c'] if we use `Model('abc')` instead of `Model(['abc'])`. Should fail, accepting a string for a list of string is likely an error and not a feature. See https://github.com/Azure/azure-sdk-for-python/issues/1376#issuecomment-323409463
Azure/msrest-for-python
diff --git a/tests/test_serialization.py b/tests/test_serialization.py index 4d6a80a..39f2878 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -736,6 +736,10 @@ class TestRuntimeSerialized(unittest.TestCase): b = self.s.serialize_iter([], 'int') self.assertEqual(b, []) + def test_serialize_str_as_iter(self): + with self.assertRaises(SerializationError): + self.s.serialize_iter("I am a string", 'str') + def test_serialize_json_obj(self): class ComplexId(Model):
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest_v2", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": null, "python": "3.6", "reqs_path": [ "dev_requirements.txt" ], "test_cmd": "pytest -xvs" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 httpretty==1.1.4 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 isodate==0.6.1 -e git+https://github.com/Azure/msrest-for-python.git@36172c1011c1a6b62eb57f7608ef571b71747a1a#egg=msrest oauthlib==3.2.2 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 requests==2.27.1 requests-oauthlib==2.0.0 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: msrest-for-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - coverage==6.2 - httpretty==1.1.4 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isodate==0.6.1 - oauthlib==3.2.2 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - requests==2.27.1 - requests-oauthlib==2.0.0 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/msrest-for-python
[ "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_str_as_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_flag", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_invalid_json", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality" ]
[]
[ "tests/test_serialization.py::TestModelDeserialization::test_response", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_without_attr_map", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types" ]
[]
MIT License
1,711
[ "msrest/serialization.py" ]
[ "msrest/serialization.py" ]
Azure__msrest-for-python-55
bb876c23427448d293c793e16d415971eb1753bc
2017-09-28 19:26:51
24deba7a7a9e335314058ec2d0b39a710f61be60
diff --git a/msrest/exceptions.py b/msrest/exceptions.py index 6ac7dcc..3b8788e 100644 --- a/msrest/exceptions.py +++ b/msrest/exceptions.py @@ -136,7 +136,13 @@ class HttpOperationError(ClientException): self.error = deserialize(resp_type, response) if self.error is None: self.error = deserialize.dependencies[resp_type]() - self.message = self.error.message + # ARM uses OData v4 + # http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + # Code and Message are REQUIRED + self.message = "({}) {}".format( + self.error.error.code, + self.error.error.message + ) except (DeserializationError, AttributeError, KeyError): pass
Improve exception string if custom exception Example, with this: https://github.com/Azure/azure-sdk-for-python/blob/master/azure-mgmt-billing/azure/mgmt/billing/models/error_response.py And this answer: ```python { "error": { "code": "NotOptedIn", "message": "You are not allowed to download invoices. Please contact your account administrator ([email protected]) to turn on access in the management portal for allowing to download invoices through the API." } } ``` We should have something better than: ```python D:\VEnvs\AzureCli\Lib\site-packages\azure\mgmt\billing\operations\invoices_operations.py in internal_paging(next_link, raw) 110 111 if response.status_code not in [200]: --> 112 raise models.ErrorResponseException(self._deserialize, response) 113 114 return response ErrorResponseException: Operation returned an invalid status code 'Unauthorized' ``` We loose the message
Azure/msrest-for-python
diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py new file mode 100644 index 0000000..2995593 --- /dev/null +++ b/tests/test_exceptions.py @@ -0,0 +1,92 @@ +#-------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +#-------------------------------------------------------------------------- +import json +import unittest +try: + from unittest import mock +except ImportError: + import mock + +import requests + +from msrest.serialization import Model, Deserializer +from msrest.exceptions import HttpOperationError + + +class TestExceptions(unittest.TestCase): + + def test_custom_exception(self): + + class ErrorResponse(Model): + _attribute_map = { + 'error': {'key': 'error', 'type': 'ErrorDetails'}, + } + def __init__(self, error=None): + self.error = error + + + class ErrorResponseException(HttpOperationError): + def __init__(self, deserialize, response, *args): + super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args) + + class ErrorDetails(Model): + _validation = { + 'code': {'readonly': True}, + 'message': {'readonly': True}, + 'target': {'readonly': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + } + + def __init__(self): + self.code = None + self.message = None + self.target = None + + deserializer = Deserializer({ + 'ErrorResponse': ErrorResponse, + 'ErrorDetails': ErrorDetails + }) + + response = mock.create_autospec(requests.Response) + response.text = json.dumps( + { + "error": { + "code": "NotOptedIn", + "message": "You are not allowed to download invoices. Please contact your account administrator ([email protected]) to turn on access in the management portal for allowing to download invoices through the API." + } + } + ) + response.headers = {"content-type": "application/json; charset=utf8"} + + excep = ErrorResponseException(deserializer, response) + + self.assertIn("NotOptedIn", str(excep)) + self.assertIn("You are not allowed to download invoices", str(excep))
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 exceptiongroup==1.2.2 idna==3.10 iniconfig==2.1.0 isodate==0.7.2 -e git+https://github.com/Azure/msrest-for-python.git@bb876c23427448d293c793e16d415971eb1753bc#egg=msrest oauthlib==3.2.2 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-cov==6.0.0 requests==2.32.3 requests-oauthlib==2.0.0 tomli==2.2.1 urllib3==2.3.0
name: msrest-for-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - exceptiongroup==1.2.2 - idna==3.10 - iniconfig==2.1.0 - isodate==0.7.2 - oauthlib==3.2.2 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==6.0.0 - requests==2.32.3 - requests-oauthlib==2.0.0 - tomli==2.2.1 - urllib3==2.3.0 prefix: /opt/conda/envs/msrest-for-python
[ "tests/test_exceptions.py::TestExceptions::test_custom_exception" ]
[]
[]
[]
MIT License
1,712
[ "msrest/exceptions.py" ]
[ "msrest/exceptions.py" ]
unt-libraries__py-wasapi-client-18
509c7dcac70c7e9ef03a2fac10dc2c5d6479cbb8
2017-09-28 20:42:05
509c7dcac70c7e9ef03a2fac10dc2c5d6479cbb8
diff --git a/wasapi_client.py b/wasapi_client.py index 5b1ea4e..0336263 100755 --- a/wasapi_client.py +++ b/wasapi_client.py @@ -19,16 +19,15 @@ except: from queue import Empty from urllib.parse import urlencode +NAME = 'wasapi_client' if __name__ == '__main__' else __name__ -NAME = 'wasapi-client' if __name__ == '__main__' else __name__ - -MAIN_LOGGER = logging.getLogger('main') +LOGGER = logging.getLogger(NAME) READ_LIMIT = 1024 * 512 -def do_listener_logging(log_q, path=''): - formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') +def start_listener_logging(log_q, path=''): + formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s') if path: handler = logging.FileHandler(filename=path) else: @@ -39,16 +38,27 @@ def do_listener_logging(log_q, path=''): listener = logging.handlers.QueueListener(log_q, handler) listener.start() - # Add the handler to the logger, so records from this process are written. - logger = logging.getLogger(NAME) - logger.addHandler(handler) return listener -def configure_worker_logging(log_q, log_level=logging.ERROR, logger_name=None): - logger = logging.getLogger(logger_name) - logger.setLevel(log_level) - logger.addHandler(logging.handlers.QueueHandler(log_q)) +def configure_main_logging(log_q, log_level=logging.ERROR): + """Put a handler on the root logger. + + This allows handling log records from imported modules. + """ + root = logging.getLogger() + root.addHandler(logging.handlers.QueueHandler(log_q)) + root.setLevel(log_level) + + +def configure_worker_logging(log_q, log_level=logging.ERROR): + """Configure logging for worker processes.""" + # Remove any existing handlers. + LOGGER.handlers = [] + # Prevent root logger duplicating messages. + LOGGER.propagate = False + LOGGER.addHandler(logging.handlers.QueueHandler(log_q)) + LOGGER.setLevel(log_level) class WASAPIDownloadError(Exception): @@ -75,7 +85,7 @@ def get_webdata(webdata_uri, session): response = session.get(webdata_uri) except requests.exceptions.ConnectionError as err: sys.exit('Could not connect at {}:\n{}'.format(webdata_uri, err)) - MAIN_LOGGER.info('requesting {}'.format(webdata_uri)) + LOGGER.info('requesting {}'.format(webdata_uri)) if response.status_code == 403: sys.exit('Verify user/password for {}:\n{} {}'.format(webdata_uri, response.status_code, @@ -188,13 +198,13 @@ def download_file(file_data, session, output_path): try: write_file(response, output_path) except OSError as err: - logging.error('{}: {}'.format(location, str(err))) + LOGGER.error('{}: {}'.format(location, str(err))) break # Successful download; don't try alternate locations. - logging.info(msg) + LOGGER.info(msg) return None else: - logging.error(msg) + LOGGER.error(msg) # We didn't download successfully; raise error. msg = 'FAILED to download {} from {}'.format(file_data['filename'], file_data['locations']) @@ -219,17 +229,17 @@ def verify_file(checksums, file_path): hash_function = getattr(hashlib, algorithm, None) if not hash_function: # The hash algorithm provided is not supported by hashlib. - logging.debug('{} is unsupported'.format(algorithm)) + LOGGER.debug('{} is unsupported'.format(algorithm)) continue digest = calculate_sum(hash_function, file_path) if digest == value: - logging.info('Checksum success at: {}'.format(file_path)) + LOGGER.info('Checksum success at: {}'.format(file_path)) return True else: - logging.error('Checksum {} mismatch for {}: expected {}, got {}'.format(algorithm, - file_path, - value, - digest)) + LOGGER.error('Checksum {} mismatch for {}: expected {}, got {}'.format(algorithm, + file_path, + value, + digest)) return False # We didn't find a compatible algorithm. return False @@ -312,7 +322,7 @@ class Downloader(multiprocessing.Process): try: download_file(file_data, self.session, output_path) except WASAPIDownloadError as err: - logging.error(str(err)) + LOGGER.error(str(err)) else: # If we download the file without error, verify the checksum. if verify_file(file_data['checksums'], output_path): @@ -365,7 +375,7 @@ def _parse_args(args=sys.argv[1:]): action='store_true', dest='skip_manifest', help='do not generate checksum files (ignored' - ' when used in combination with --manifest') + ' when used in combination with --manifest)') parser.add_argument('-u', '--user', dest='user', @@ -443,7 +453,7 @@ def main(): manager = multiprocessing.Manager() log_q = manager.Queue() try: - listener = do_listener_logging(log_q, args.log) + listener = start_listener_logging(log_q, args.log) except OSError as err: print('Could not open file for logging:', err) sys.exit(1) @@ -453,7 +463,7 @@ def main(): log_level = [logging.ERROR, logging.INFO, logging.DEBUG][args.verbose] except IndexError: log_level = logging.DEBUG - configure_worker_logging(log_q, log_level, 'main') + configure_main_logging(log_q, log_level) # Generate query string for the webdata request. try: @@ -499,8 +509,15 @@ def main(): destination=args.destination) get_q = downloads.get_q result_q = manager.Queue() - for _ in range(args.processes): - Downloader(get_q, result_q, log_q, log_level, auth, args.destination).start() + + download_processes = [] + num_processes = min(args.processes, get_q.qsize()) + for _ in range(num_processes): + dp = Downloader(get_q, result_q, log_q, log_level, auth, args.destination) + dp.start() + download_processes.append(dp) + for dp in download_processes: + dp.join() get_q.join() listener.stop()
Duplicate logging messages The same messages are being logged multiple times--at least with more than one download process running.
unt-libraries/py-wasapi-client
diff --git a/tests/test_wasapi_client.py b/tests/test_wasapi_client.py index 2424886..9ff5c7f 100644 --- a/tests/test_wasapi_client.py +++ b/tests/test_wasapi_client.py @@ -385,13 +385,13 @@ class Test_verify_file: path = 'dummy/path' checksums = {algorithm: checksum} mock_calc_sum.return_value = checksum + 'notmatching' - with patch('wasapi_client.logging', autospec=True) as mock_logging: + with patch('wasapi_client.LOGGER', autospec=True) as mock_logger: assert not wc.verify_file(checksums, path) msg = 'Checksum {} mismatch for {}: expected {}, got {}notmatching'.format(algorithm, path, checksum, checksum) - mock_logging.error.assert_called_once_with(msg) + mock_logger.error.assert_called_once_with(msg) @patch('wasapi_client.calculate_sum') def test_verify_file_one_supported_algorithm(self, mock_calc_sum): @@ -400,11 +400,11 @@ class Test_verify_file: checksums = OrderedDict([('abc', 'algorithm_unsupported'), ('sha1', checksum)]) mock_calc_sum.return_value = checksum - with patch('wasapi_client.logging', autospec=True) as mock_logging: + with patch('wasapi_client.LOGGER', autospec=True) as mock_logger: assert wc.verify_file(checksums, 'dummy/path') # Check that unsupported algorithm was tried. - mock_logging.debug.assert_called_once_with('abc is unsupported') - mock_logging.info.assert_called_once_with('Checksum success at: dummy/path') + mock_logger.debug.assert_called_once_with('abc is unsupported') + mock_logger.info.assert_called_once_with('Checksum success at: dummy/path') class Test_calculate_sum:
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 -e git+https://github.com/unt-libraries/py-wasapi-client.git@509c7dcac70c7e9ef03a2fac10dc2c5d6479cbb8#egg=py_wasapi_client pyparsing==3.1.4 pytest==7.0.1 requests==2.27.1 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: py-wasapi-client channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - requests==2.27.1 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/py-wasapi-client
[ "tests/test_wasapi_client.py::Test_verify_file::test_verify_file_checksum_mismatch", "tests/test_wasapi_client.py::Test_verify_file::test_verify_file_one_supported_algorithm" ]
[]
[ "tests/test_wasapi_client.py::Test_make_session::test_make_session_auth", "tests/test_wasapi_client.py::Test_make_session::test_make_session_no_auth", "tests/test_wasapi_client.py::Test_get_webdata::test_get_webdata", "tests/test_wasapi_client.py::Test_get_webdata::test_get_webdata_403_forbidden", "tests/test_wasapi_client.py::Test_get_webdata::test_get_webdata_ConnectionError", "tests/test_wasapi_client.py::Test_get_webdata::test_get_webdata_json_error", "tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads", "tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads_multi_page", "tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads_no_get_q", "tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads_urls", "tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads_manifest", "tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads_manifest_destination", "tests/test_wasapi_client.py::Test_Downloads::test_populate_downloads_generate_manifest", "tests/test_wasapi_client.py::Test_Downloads::test_write_manifest_file", "tests/test_wasapi_client.py::Test_Downloads::test_write_manifest_file_wrong_algorithm", "tests/test_wasapi_client.py::Test_get_files_count::test_get_files_count", "tests/test_wasapi_client.py::Test_get_files_size::test_get_files_size", "tests/test_wasapi_client.py::Test_get_files_size::test_get_files_size_multi_page", "tests/test_wasapi_client.py::Test_get_files_size::test_get_files_size_no_files", "tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[0-0.0B]", "tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[1023-1023.0B]", "tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[1024-1.0KB]", "tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[1024000-1000.0KB]", "tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[1048576-1.0MB]", "tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[1073741824-1.0GB]", "tests/test_wasapi_client.py::Test_convert_bytes::test_convert_bytes[1099511628000-1.0TB]", "tests/test_wasapi_client.py::Test_download_file::test_download_file_200", "tests/test_wasapi_client.py::Test_download_file::test_download_file_not_200", "tests/test_wasapi_client.py::Test_download_file::test_download_file_OSError", "tests/test_wasapi_client.py::Test_verify_file::test_verify_file", "tests/test_wasapi_client.py::Test_verify_file::test_verify_file_unsupported_algorithm", "tests/test_wasapi_client.py::Test_calculate_sum::test_calculate_sum", "tests/test_wasapi_client.py::Test_convert_queue::test_convert_queue", "tests/test_wasapi_client.py::Test_generate_report::test_generate_report_all_success", "tests/test_wasapi_client.py::Test_generate_report::test_generate_report_one_failure", "tests/test_wasapi_client.py::Test_generate_report::test_generate_report_all_failure", "tests/test_wasapi_client.py::TestDownloader::test_run", "tests/test_wasapi_client.py::TestDownloader::test_run_WASAPIDownloadError", "tests/test_wasapi_client.py::Test_parse_args::test_SetQueryParametersAction", "tests/test_wasapi_client.py::Test_parse_args::test_SetQueryParametersAction_multiple_collections" ]
[]
BSD 3-Clause "New" or "Revised" License
1,713
[ "wasapi_client.py" ]
[ "wasapi_client.py" ]
vertexproject__synapse-445
ca3e448523e3e09729f884f54d2135ebf9ff3c08
2017-09-29 13:01:03
6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0
diff --git a/synapse/models/inet.py b/synapse/models/inet.py index 97d9311f8..6df0a55c5 100644 --- a/synapse/models/inet.py +++ b/synapse/models/inet.py @@ -144,18 +144,31 @@ class Srv4Type(DataType): if s_compat.isstr(valu): return self._norm_str(valu, oldval=oldval) + if valu < 0 or valu > 281474976710655: + self._raiseBadValu(valu, mesg='Srv4Type integer is out of bounds') + addr = valu >> 16 port = valu & 0xffff return valu, {'port': port, 'ipv4': addr} def _norm_str(self, text, oldval=None): + if ':' not in text: + try: + valu = int(text) + except ValueError: + self._raiseBadValu(text, mesg='Srv4Type string is not a integer or a colon delimited string.') + return self.norm(valu) + try: astr, pstr = text.split(':') except ValueError as e: - self._raiseBadValu(text) + self._raiseBadValu(text, mesg='Unable to split Srv4Type into two parts') addr = ipv4int(astr) port = int(pstr, 0) + if port < 0 or port > 65535: + self._raiseBadValu(text, port=port, + mesg='Srv4 Port number is out of bounds') return (addr << 16) | port, {'port': port, 'ipv4': addr} srv6re = re.compile('^\[([a-f0-9:]+)\]:(\d+)$')
inet:srv4 type doesn't handle integer as string input inet:tcp4="123456789012345" doesn't norm correctly
vertexproject/synapse
diff --git a/synapse/tests/test_model_inet.py b/synapse/tests/test_model_inet.py index d36f28b0a..16418b499 100644 --- a/synapse/tests/test_model_inet.py +++ b/synapse/tests/test_model_inet.py @@ -123,6 +123,39 @@ class InetModelTest(SynTest): self.eq(t3[1].get('inet:udp4:port'), 8443) self.eq(t3[1].get('inet:udp4:ipv4'), core.getTypeNorm('inet:ipv4', '1.2.3.4')[0]) + # 1.2.3.4:8443 + t4 = core.formTufoByProp('inet:udp4', '1108152164603') + self.eq(t4[1].get('inet:udp4:port'), 8443) + self.eq(t4[1].get('inet:udp4:ipv4'), core.getTypeNorm('inet:ipv4', '1.2.3.4')[0]) + + # Ensure boundaries are observed + for i in ['0', 0, '0.0.0.0:0']: + valu, subs = core.getTypeNorm('inet:srv4', i) + self.eq(valu, 0) + self.eq(subs.get('port'), 0) + self.eq(subs.get('ipv4'), 0) + + for i in ['281474976710655', 281474976710655, '255.255.255.255:65535']: + valu, subs = core.getTypeNorm('inet:srv4', i) + self.eq(valu, 281474976710655) + self.eq(subs.get('port'), 0xFFFF) + self.eq(subs.get('ipv4'), 0xFFFFFFFF) + + # Repr works as expected + self.eq(core.getTypeRepr('inet:srv4', 0), '0.0.0.0:0') + self.eq(core.getTypeRepr('inet:srv4', 1108152164603), '1.2.3.4:8443') + self.eq(core.getTypeRepr('inet:srv4', 281474976710655), '255.255.255.255:65535') + + # Ensure bad input fails + self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', '281474976710656') + self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', 281474976710656) + self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', '255.255.255.255:65536') + self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', '255.255.255.255:-1') + self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', -1) + self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', '-1') + self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', 'ruh roh') + self.raises(BadTypeValu, core.getTypeNorm, 'inet:srv4', '1.2.3.4:8080:9090') + def test_model_inet_srv6_types(self): with self.getRamCore() as core: t0 = core.formTufoByProp('inet:tcp6', '[0:0:0:0:0:0:0:1]:80')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y build-essential libffi-dev libssl-dev python3 python3-dev python3-pip python3-setuptools" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 cffi==1.15.1 coverage==6.2 cryptography==40.0.2 importlib-metadata==4.8.3 iniconfig==1.1.1 lmdb==1.6.2 msgpack-python==0.5.6 packaging==21.3 pluggy==1.0.0 py==1.11.0 pycparser==2.21 pyOpenSSL==23.2.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 -e git+https://github.com/vertexproject/synapse.git@ca3e448523e3e09729f884f54d2135ebf9ff3c08#egg=synapse tomli==1.2.3 tornado==6.1 typing_extensions==4.1.1 xxhash==3.2.0 zipp==3.6.0
name: synapse channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - cffi==1.15.1 - coverage==6.2 - cryptography==40.0.2 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - lmdb==1.6.2 - msgpack-python==0.5.6 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pycparser==2.21 - pyopenssl==23.2.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - tomli==1.2.3 - tornado==6.1 - typing-extensions==4.1.1 - xxhash==3.2.0 - zipp==3.6.0 prefix: /opt/conda/envs/synapse
[ "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_srv4_types" ]
[]
[ "synapse/tests/test_model_inet.py::InetModelTest::test_model_fqdn_punycode", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_201706121318", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_201706201837", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_201709181501", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_asnet4", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_cast_defang", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_cidr4", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_email", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_follows", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_fqdn", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_fqdn_set_sfx", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_fqdn_unicode", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_ipv4", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_ipv4_raise", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_ipv6", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_mac", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_netmemb", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_netmesg", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_netpost", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_passwd", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_srv6_types", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_url_fields", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_urlfile", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_weblogon", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_whois_recns", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_whoisemail", "synapse/tests/test_model_inet.py::InetModelTest::test_model_whois_contact" ]
[]
Apache License 2.0
1,714
[ "synapse/models/inet.py" ]
[ "synapse/models/inet.py" ]
CORE-GATECH-GROUP__serpent-tools-21
b67f52bfd0b23baa3eae9f11bab1af14bd8b2798
2017-09-29 14:33:04
b67f52bfd0b23baa3eae9f11bab1af14bd8b2798
diff --git a/serpentTools/__init__.py b/serpentTools/__init__.py index 8e3f32c..3e8e8de 100644 --- a/serpentTools/__init__.py +++ b/serpentTools/__init__.py @@ -1,7 +1,7 @@ from serpentTools import settings from serpentTools import parsers -__version__ = '0.1.3' +__version__ = '0.1.4' # List TODOS/feature requests here for now # Messages/Errors diff --git a/serpentTools/objects/__init__.py b/serpentTools/objects/__init__.py index 8f2e2cd..2ef74d4 100644 --- a/serpentTools/objects/__init__.py +++ b/serpentTools/objects/__init__.py @@ -1,8 +1,5 @@ """Objects used to support the parsing.""" -import numpy -from matplotlib import pyplot - class _SupportingObject(object): """ @@ -17,14 +14,12 @@ class _SupportingObject(object): """ - def __init__(self, container, name): + def __init__(self, container): self._container = container - self.name = name self._filePath = container.filePath def __repr__(self): - return '<{} {} from {}>'.format(self.whatAmI(), - self.name, self._filePath) + return '<{} from {}>'.format(self.whatAmI(), self._filePath) def whatAmI(self): return type(self).__name__ @@ -47,194 +42,13 @@ class _SupportingObject(object): for item in lowerSplits[1:]]) -class DepletedMaterial(_SupportingObject): - """Class for storing material data from ``_dep.m`` files. - - Parameters - ---------- - parser: :py:class:`~serpentTools.parsers.depletion.DepletionReader` - Parser that found this material. - Used to obtain file metadata like isotope names and burnup - name: str - Name of this material - - Attributes - ---------- - zai: numpy.array - Isotope id's - names: numpy.array - Names of isotopes - days: numpy.array - Days overwhich the material was depleted - adens: numpy.array - Atomic density over time for each nuclide - - :note: - - These attributes only exist if the pasers was instructed to - read in this data. I.e. if ``readers.depletion.metadataKeys`` - does not contain ``ZAI``, then this object will not have - the ``zai`` data. - - """ - - def __init__(self, parser, name): - _SupportingObject.__init__(self, parser, name) - self._varData = {} - - def __getattr__(self, item): - """ - Allows the user to get items like ``zai`` and ``adens`` - with ``self.zai`` and ``self.adens``, respectively. - """ - if item in self._varData: - return self._varData[item] - return _SupportingObject.__getattr__(self, item) - - def __getitem__(self, item): - if item not in self._varData: - if item not in self._container.metadata: - raise KeyError('{} has no item {}'.format(self, item)) - return self._container.metadata[item] - return self._varData[item] - - def addData(self, variable, rawData): - """Add data straight from the file onto a variable. - - Parameters - ---------- - variable: str - Name of the variable directly from ``SERPENT`` - rawData: list - List of strings corresponding to the raw data from the file - """ - newName = self._convertVariableName(variable) - if isinstance(rawData, str): - scratch = [float(item) for item in rawData.split()] - else: - scratch = [] - for line in rawData: - if line: - scratch.append([float(item) for item in line.split()]) - self._varData[newName] = numpy.array(scratch) - - def getXY(self, xUnits, yUnits, timePoints=None, names=None): - """Return x values for given time, and corresponding isotope values. - - Parameters - ---------- - xUnits: str - name of x value to obtain, e.g. ``'days'``, ``'burnup'`` - yUnits: str - name of y value to return, e.g. ``'adens'``, ``'burnup'`` - timePoints: list or None - If given, select the time points according to those specified here. - Otherwise, select all points - names: list or None - If given, return y values corresponding to these isotope names. - Otherwise, return values for all isotopes. - - Returns - ------- - numpy.array - Array of values. - numpy.array - Vector of time points only if ``timePoints`` is ``None`` - - Raises - ------ - AttributeError - If the names of the isotopes have not been obtained and specific - isotopes have been requested - KeyError - If at least one of the days requested is not present - """ - if timePoints is not None: - returnX = False - timeCheck = self._checkTimePoints(xUnits, timePoints) - if any(timeCheck): - raise KeyError('The following times were not present in file {}' - '\n{}'.format(self._container.filePath, - ', '.join(timeCheck))) - else: - returnX = True - if names and 'names' not in self._container.metadata: - raise AttributeError('Parser {} has not stored the isotope names.' - .format(self._container)) - xVals, colIndices = self._getXSlice(xUnits, timePoints) - rowIndices = self._getIsoID(names) - allY = self[yUnits] - if allY.shape[0] == 1 or len(allY.shape) == 1: # vector - return xVals, allY[colIndices] if colIndices else allY - yVals = numpy.empty((len(rowIndices), len(xVals)), dtype=float) - for isoID, rowId in enumerate(rowIndices): - yVals[isoID, :] = (allY[rowId][colIndices] if colIndices - else allY[rowId][:]) - if returnX: - return yVals, xVals - return yVals - - def _checkTimePoints(self, xUnits, timePoints): - valid = self[xUnits] - badPoints = [str(time) for time in timePoints if time not in valid] - return badPoints - - - def _getXSlice(self, xUnits, timePoints): - allX = self[xUnits] - if timePoints is not None: - colIndices = [indx for indx, xx in enumerate(allX) - if xx in timePoints] - xVals = allX[colIndices] - else: - colIndices = None - xVals = allX - return xVals, colIndices - - def _getIsoID(self, isotopes): - """Return the row indices that correspond to specfic isotopes.""" - # TODO: List comprehension to make rowIDs then return array - if not isotopes: - return numpy.array(list(range(len(self.names))), dtype=int) - isoList = [isotopes] if isinstance(isotopes, (str, int)) else isotopes - rowIDs = numpy.empty_like(isoList, dtype=int) - for indx, isotope in enumerate(isoList): - rowIDs[indx] = self.names.index(isotope) - return rowIDs - - def plot(self, xUnits, yUnits, timePoints=None, names=None, ax=None): - """Plot some data as a function of time for some or all isotopes. - - Parameters - ---------- - xUnits: str - name of x value to obtain, e.g. ``'days'``, ``'burnup'`` - yUnits: str - name of y value to return, e.g. ``'adens'``, ``'burnup'`` - timePoints: list or None - If given, select the time points according to those - specified here. Otherwise, select all points - names: list or None - If given, return y values corresponding to these isotope - names. Otherwise, return values for all isotopes. - ax: None or ``matplotlib axes`` - If given, add the data to this plot. - Otherwise, create a new plot - - Returns - ------- - ``matplotlib axes`` - Axes corresponding to the figure that was plotted +class _NamedObject(_SupportingObject): + """Class for named objects like materials and detectors.""" - See Also - -------- - getXY - - """ - xVals, yVals = self.getXY(xUnits, yUnits, timePoints, names) - ax = ax or pyplot.subplots(1, 1)[1] - labels = names or [None] - for row in range(yVals.shape[0]): - ax.plot(xVals, yVals[row], label=labels[row]) + def __init__(self, container, name): + _SupportingObject.__init__(self, container) + self.name = name - return ax + def __repr__(self): + return '<{} {} from {}>'.format(self.whatAmI(), + self.name, self._filePath) \ No newline at end of file diff --git a/serpentTools/objects/materials.py b/serpentTools/objects/materials.py new file mode 100644 index 0000000..be2fe02 --- /dev/null +++ b/serpentTools/objects/materials.py @@ -0,0 +1,199 @@ +"""Classes for storing material data.""" + +import numpy +from matplotlib import pyplot + + +from serpentTools.objects import _NamedObject + + +class DepletedMaterial(_NamedObject): + """Class for storing material data from ``_dep.m`` files. + + Parameters + ---------- + parser: :py:class:`~serpentTools.parsers.depletion.DepletionReader` + Parser that found this material. + Used to obtain file metadata like isotope names and burnup + name: str + Name of this material + + Attributes + ---------- + zai: numpy.array + Isotope id's + names: numpy.array + Names of isotopes + days: numpy.array + Days overwhich the material was depleted + adens: numpy.array + Atomic density over time for each nuclide + + :note: + + These attributes only exist if the pasers was instructed to + read in this data. I.e. if ``readers.depletion.metadataKeys`` + does not contain ``ZAI``, then this object will not have + the ``zai`` data. + + """ + + def __init__(self, parser, name): + _NamedObject.__init__(self, parser, name) + self._varData = {} + + def __getattr__(self, item): + """ + Allows the user to get items like ``zai`` and ``adens`` + with ``self.zai`` and ``self.adens``, respectively. + """ + if item in self._varData: + return self._varData[item] + return _NamedObject.__getattr__(self, item) + + def __getitem__(self, item): + if item not in self._varData: + if item not in self._container.metadata: + raise KeyError('{} has no item {}'.format(self, item)) + return self._container.metadata[item] + return self._varData[item] + + def addData(self, variable, rawData): + """Add data straight from the file onto a variable. + + Parameters + ---------- + variable: str + Name of the variable directly from ``SERPENT`` + rawData: list + List of strings corresponding to the raw data from the file + """ + newName = self._convertVariableName(variable) + if isinstance(rawData, str): + scratch = [float(item) for item in rawData.split()] + else: + scratch = [] + for line in rawData: + if line: + scratch.append([float(item) for item in line.split()]) + self._varData[newName] = numpy.array(scratch) + + def getXY(self, xUnits, yUnits, timePoints=None, names=None): + """Return x values for given time, and corresponding isotope values. + + Parameters + ---------- + xUnits: str + name of x value to obtain, e.g. ``'days'``, ``'burnup'`` + yUnits: str + name of y value to return, e.g. ``'adens'``, ``'burnup'`` + timePoints: list or None + If given, select the time points according to those specified here. + Otherwise, select all points + names: list or None + If given, return y values corresponding to these isotope names. + Otherwise, return values for all isotopes. + + Returns + ------- + numpy.array + Array of values. + numpy.array + Vector of time points only if ``timePoints`` is ``None`` + + Raises + ------ + AttributeError + If the names of the isotopes have not been obtained and specific + isotopes have been requested + KeyError + If at least one of the days requested is not present + """ + if timePoints is not None: + returnX = False + timeCheck = self._checkTimePoints(xUnits, timePoints) + if any(timeCheck): + raise KeyError('The following times were not present in file {}' + '\n{}'.format(self._container.filePath, + ', '.join(timeCheck))) + else: + returnX = True + if names and 'names' not in self._container.metadata: + raise AttributeError('Parser {} has not stored the isotope names.' + .format(self._container)) + xVals, colIndices = self._getXSlice(xUnits, timePoints) + rowIndices = self._getIsoID(names) + allY = self[yUnits] + if allY.shape[0] == 1 or len(allY.shape) == 1: # vector + yVals = allY[colIndices] if colIndices else allY + else: + yVals = numpy.empty((len(rowIndices), len(xVals)), dtype=float) + for isoID, rowId in enumerate(rowIndices): + yVals[isoID, :] = (allY[rowId][colIndices] if colIndices + else allY[rowId][:]) + if returnX: + return yVals, xVals + return yVals + + def _checkTimePoints(self, xUnits, timePoints): + valid = self[xUnits] + badPoints = [str(time) for time in timePoints if time not in valid] + return badPoints + + def _getXSlice(self, xUnits, timePoints): + allX = self[xUnits] + if timePoints is not None: + colIndices = [indx for indx, xx in enumerate(allX) + if xx in timePoints] + xVals = allX[colIndices] + else: + colIndices = None + xVals = allX + return xVals, colIndices + + def _getIsoID(self, isotopes): + """Return the row indices that correspond to specfic isotopes.""" + # TODO: List comprehension to make rowIDs then return array + if not isotopes: + return numpy.array(list(range(len(self.names))), dtype=int) + isoList = [isotopes] if isinstance(isotopes, (str, int)) else isotopes + rowIDs = numpy.empty_like(isoList, dtype=int) + for indx, isotope in enumerate(isoList): + rowIDs[indx] = self.names.index(isotope) + return rowIDs + + def plot(self, xUnits, yUnits, timePoints=None, names=None, ax=None): + """Plot some data as a function of time for some or all isotopes. + + Parameters + ---------- + xUnits: str + name of x value to obtain, e.g. ``'days'``, ``'burnup'`` + yUnits: str + name of y value to return, e.g. ``'adens'``, ``'burnup'`` + timePoints: list or None + If given, select the time points according to those + specified here. Otherwise, select all points + names: list or None + If given, return y values corresponding to these isotope + names. Otherwise, return values for all isotopes. + ax: None or ``matplotlib axes`` + If given, add the data to this plot. + Otherwise, create a new plot + + Returns + ------- + ``matplotlib axes`` + Axes corresponding to the figure that was plotted + + See Also + -------- + getXY + + """ + xVals, yVals = self.getXY(xUnits, yUnits, timePoints, names) + ax = ax or pyplot.subplots(1, 1)[1] + labels = names or [None] + for row in range(yVals.shape[0]): + ax.plot(xVals, yVals[row], label=labels[row]) + return ax diff --git a/serpentTools/parsers/depletion.py b/serpentTools/parsers/depletion.py index 8a3b695..002f516 100644 --- a/serpentTools/parsers/depletion.py +++ b/serpentTools/parsers/depletion.py @@ -6,7 +6,7 @@ import numpy from drewtils.parsers import KeywordParser from serpentTools.objects.readers import MaterialReader -from serpentTools.objects import DepletedMaterial +from serpentTools.objects.materials import DepletedMaterial class DepletionReader(MaterialReader):
Days are still returned in time points are given for a vector quantity on materials, e.g. burnup Fix in #2 did not take in to account quantities like `burnup` and `volume` that do not return arrays for isotope quantities. If the user specifies one of these quantities from a depleted material, the time points are still returned. ``` if allY.shape[0] == 1 or len(allY.shape) == 1: # vector return xVals, allY[colIndices] if colIndices else allY ``` change to ``` if allY.shape[0] == 1 or len(allY.shape) == 1: # vector yVals = allY[colIndices] if colIndices else allY else: yVals = numpy.empty((len(rowIndices), len(xVals)), dtype=float) for isoID, rowId in enumerate(rowIndices): yVals[isoID, :] = (allY[rowId][colIndices] if colIndices else allY[rowId][:])` ``` and fix unit tests
CORE-GATECH-GROUP/serpent-tools
diff --git a/serpentTools/tests/test_depletion.py b/serpentTools/tests/test_depletion.py index d7f6d52..6cb911d 100644 --- a/serpentTools/tests/test_depletion.py +++ b/serpentTools/tests/test_depletion.py @@ -124,13 +124,12 @@ class DepletedMaterialTester(_DepletionTestHelper): """ Verify the material can produce the full burnup vector through getXY. """ - _days, actual = self.material.getXY('days', 'burnup', ) + actual, _days = self.material.getXY('days', 'burnup', ) numpy.testing.assert_equal(actual, self.fuelBU) def test_getXY_burnup_slice(self): """Verify depletedMaterial getXY correctly slices a vector.""" - _days, actual = self.material.getXY('days', 'burnup', - self.requestedDays) + actual = self.material.getXY('days', 'burnup', self.requestedDays) expected = [0.0E0, 1.90317E-2, 3.60163E-2, 1.74880E-1, 3.45353E-01, 8.49693E-01, 1.66071E0] numpy.testing.assert_equal(actual, expected)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 3 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
contourpy==1.3.0 cycler==0.12.1 drewtils==0.1.9 exceptiongroup==1.2.2 fonttools==4.56.0 importlib_resources==6.5.2 iniconfig==2.1.0 kiwisolver==1.4.7 matplotlib==3.9.4 numpy==2.0.2 packaging==24.2 pillow==11.1.0 pluggy==1.5.0 pyparsing==3.2.3 pytest==8.3.5 python-dateutil==2.9.0.post0 -e git+https://github.com/CORE-GATECH-GROUP/serpent-tools.git@b67f52bfd0b23baa3eae9f11bab1af14bd8b2798#egg=serpentTools six==1.17.0 tomli==2.2.1 zipp==3.21.0
name: serpent-tools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - contourpy==1.3.0 - cycler==0.12.1 - drewtils==0.1.9 - exceptiongroup==1.2.2 - fonttools==4.56.0 - importlib-resources==6.5.2 - iniconfig==2.1.0 - kiwisolver==1.4.7 - matplotlib==3.9.4 - numpy==2.0.2 - packaging==24.2 - pillow==11.1.0 - pluggy==1.5.0 - pyparsing==3.2.3 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - six==1.17.0 - tomli==2.2.1 - zipp==3.21.0 prefix: /opt/conda/envs/serpent-tools
[ "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_burnup_full", "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_burnup_slice" ]
[]
[ "serpentTools/tests/test_depletion.py::DepletionTester::test_ReadMaterials", "serpentTools/tests/test_depletion.py::DepletionTester::test_metadata", "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_fetchData", "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_adens", "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_adensAndTime", "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_raisesError_badTime", "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_materials" ]
[]
MIT License
1,715
[ "serpentTools/parsers/depletion.py", "serpentTools/objects/__init__.py", "serpentTools/__init__.py", "serpentTools/objects/materials.py" ]
[ "serpentTools/parsers/depletion.py", "serpentTools/objects/__init__.py", "serpentTools/__init__.py", "serpentTools/objects/materials.py" ]
ARMmbed__greentea-243
8f7b28f8ec739156d238304fa4f5f2e5156536f5
2017-09-29 17:09:53
68508c5f4d7cf0635c75399d0ff7cfa896fdf2cc
diff --git a/mbed_greentea/mbed_report_api.py b/mbed_greentea/mbed_report_api.py index 166bc29..22a3778 100644 --- a/mbed_greentea/mbed_report_api.py +++ b/mbed_greentea/mbed_report_api.py @@ -42,7 +42,7 @@ def exporter_json(test_result_ext, test_suite_properties=None): for suite in target.values(): try: suite["single_test_output"] = suite["single_test_output"]\ - .decode("unicode_escape") + .decode("utf-8", "replace") except KeyError: pass return json.dumps(test_result_ext, indent=4) @@ -603,7 +603,7 @@ def get_result_overlay_dropdowns(result_div_id, test_results): result_output_dropdown = get_dropdown_html(result_output_div_id, "Test Output", test_results['single_test_output'] - .decode("unicode-escape") + .decode("utf-8", "replace") .rstrip("\n"), output_text=True)
mbedgt crash with UnicodeDecodeError Hi I am sorry, but I still get some crash with the new green tea version ... mbedgt: exporting to HTML file 'C:/mcu/reports/report__mbed_os5_release_non_regression_F756ZG_mbed-os-5.5.7__2017_09_28_00_06.html'... mbedgt: unexpected error: 'unicodeescape' codec can't decode bytes in position 6308-6310: truncated \uXXXX escape Traceback (most recent call last): File "C:\Python27\Scripts\mbedgt-script.py", line 11, in <module> load_entry_point('mbed-greentea==1.3.0', 'console_scripts', 'mbedgt')() File "c:\python27\lib\site-packages\mbed_greentea\mbed_greentea_cli.py", line 416, in main cli_ret = main_cli(opts, args) File "c:\python27\lib\site-packages\mbed_greentea\mbed_greentea_cli.py", line 1067, in main_cli html_report = exporter_html(test_report) File "c:\python27\lib\site-packages\mbed_greentea\mbed_report_api.py", line 747, in exporter_html test_results) File "c:\python27\lib\site-packages\mbed_greentea\mbed_report_api.py", line 636, in get_result_overlay overlay_dropdowns = get_result_overlay_dropdowns(result_div_id, test_results) File "c:\python27\lib\site-packages\mbed_greentea\mbed_report_api.py", line 598, in get_result_overlay_dropdowns .decode("unicode-escape") UnicodeDecodeError: 'unicodeescape' codec can't decode bytes in position 6308-6310: truncated \uXXXX escape @theotherjimmy
ARMmbed/greentea
diff --git a/test/report_api.py b/test/report_api.py index 122e26e..2a4275f 100644 --- a/test/report_api.py +++ b/test/report_api.py @@ -45,7 +45,7 @@ class ReportEmitting(unittest.TestCase): u'build_path_abs': u'N/A', u'copy_method': u'N/A', u'image_path': u'N/A', - u'single_test_output': b'N/A', + u'single_test_output': b'\x80abc\uXXXX' , u'platform_name': u'k64f', u'test_bin_name': u'N/A', u'testcase_result': {},
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 1 }
1.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
appdirs==1.4.4 attrs==22.2.0 beautifulsoup4==4.12.3 certifi==2021.5.30 charset-normalizer==2.0.12 colorama==0.3.9 coverage==6.2 execnet==1.9.0 fasteners==0.19 future==1.0.0 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 intelhex==2.3.0 junit-xml==1.9 lockfile==0.12.2 -e git+https://github.com/ARMmbed/greentea.git@8f7b28f8ec739156d238304fa4f5f2e5156536f5#egg=mbed_greentea mbed-host-tests==1.8.15 mbed-ls==1.8.15 mbed-os-tools==1.8.15 mock==5.2.0 packaging==21.3 pluggy==1.0.0 prettytable==2.5.0 py==1.11.0 pyparsing==3.1.4 pyserial==3.5 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 requests==2.27.1 six==1.17.0 soupsieve==2.3.2.post1 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 wcwidth==0.2.13 zipp==3.6.0
name: greentea channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - appdirs==1.4.4 - attrs==22.2.0 - beautifulsoup4==4.12.3 - charset-normalizer==2.0.12 - colorama==0.3.9 - coverage==6.2 - execnet==1.9.0 - fasteners==0.19 - future==1.0.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - intelhex==2.3.0 - junit-xml==1.9 - lockfile==0.12.2 - mbed-host-tests==1.8.15 - mbed-ls==1.8.15 - mbed-os-tools==1.8.15 - mock==5.2.0 - packaging==21.3 - pluggy==1.0.0 - prettytable==2.5.0 - py==1.11.0 - pyparsing==3.1.4 - pyserial==3.5 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - requests==2.27.1 - six==1.17.0 - soupsieve==2.3.2.post1 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - wcwidth==0.2.13 - zipp==3.6.0 prefix: /opt/conda/envs/greentea
[ "test/report_api.py::ReportEmitting::test_report_zero_testcases" ]
[]
[ "test/report_api.py::ReportEmitting::test_report_zero_tests" ]
[]
Apache License 2.0
1,716
[ "mbed_greentea/mbed_report_api.py" ]
[ "mbed_greentea/mbed_report_api.py" ]
palantir__python-language-server-146
a1bbd401621f8fc3083f4341b7912b491e82bf68
2017-09-29 21:10:31
a1bbd401621f8fc3083f4341b7912b491e82bf68
diff --git a/pyls/plugins/pycodestyle_lint.py b/pyls/plugins/pycodestyle_lint.py index 668096d..2a5baab 100644 --- a/pyls/plugins/pycodestyle_lint.py +++ b/pyls/plugins/pycodestyle_lint.py @@ -21,12 +21,29 @@ def pyls_lint(config, document): conf_to_use = pycodestyle_conf if pycodestyle_conf else pep8_conf conf = {k.replace("-", "_"): v for k, v in conf_to_use.items()} - log.debug("Got pycodestyle config: %s", conf) # Grab the pycodestyle parser and set the defaults based on the config we found parser = pycodestyle.get_parser() parser.set_defaults(**conf) - opts, _args = parser.parse_args([]) + + # Override with any options set in the language server config + argv = [] + ls_conf = config.plugin_settings('pycodestyle') + if ls_conf.get('exclude') is not None: + argv.extend(['--exclude', ','.join(ls_conf['exclude'])]) + if ls_conf.get('filename') is not None: + argv.extend(['--filename', ','.join(ls_conf['filename'])]) + if ls_conf.get('select') is not None: + argv.extend(['--select', ','.join(ls_conf['select'])]) + if ls_conf.get('ignore') is not None: + argv.extend(['--ignore', ','.join(ls_conf['ignore'])]) + if ls_conf.get('maxLineLength') is not None: + argv.extend(['--max-line-length', str(ls_conf['maxLineLength'])]) + if ls_conf.get('hangClosing'): + argv.extend(['--hang-closing']) + + opts, _args = parser.parse_args(argv) + log.debug("Got pycodestyle config: %s", opts) styleguide = pycodestyle.StyleGuide(vars(opts)) c = pycodestyle.Checker( diff --git a/vscode-client/package.json b/vscode-client/package.json index b75ce25..5c8b285 100644 --- a/vscode-client/package.json +++ b/vscode-client/package.json @@ -70,6 +70,52 @@ "default": true, "description": "Enable or disable the plugin." }, + "pyls.plugins.pycodestyle.exclude": { + "type": "array", + "default": null, + "items": { + "type": "string" + }, + "uniqueItems": true, + "description": "Exclude files or directories which match these patterns." + }, + "pyls.plugins.pycodestyle.filename": { + "type": "array", + "default": null, + "items": { + "type": "string" + }, + "uniqueItems": true, + "description": "When parsing directories, only check filenames matching these patterns." + }, + "pyls.plugins.pycodestyle.select": { + "type": "array", + "default": null, + "items": { + "type": "string" + }, + "uniqueItems": true, + "description": "Select errors and warnings" + }, + "pyls.plugins.pycodestyle.ignore": { + "type": "array", + "default": null, + "items": { + "type": "string" + }, + "uniqueItems": true, + "description": "Ignore errors and warnings" + }, + "pyls.plugins.pycodestyle.hangClosing": { + "type": "boolean", + "default": null, + "description": "Hang closing bracket instead of matching indentation of opening bracket's line." + }, + "pyls.plugins.pycodestyle.maxLineLength": { + "type": "number", + "default": null, + "description": "Set maximum allowed line length." + }, "pyls.plugins.pydocstyle.enabled": { "type": "boolean", "default": false,
Custom pycodestyle configuration via xcode/Atom settings It would be really great to configure the ignored error codes and the maximum line length of `pycodestyle` via the settings if no configuration file is found on the system. This is the most requested issue on the Atom package for `pyls`: https://github.com/lgeiger/ide-python/issues/9
palantir/python-language-server
diff --git a/test/plugins/test_lint.py b/test/plugins/test_lint.py index a760c68..a02d992 100644 --- a/test/plugins/test_lint.py +++ b/test/plugins/test_lint.py @@ -96,6 +96,12 @@ def test_pycodestyle_config(workspace): os.unlink(os.path.join(workspace.root_path, conf_file)) + # Make sure we can ignore via the PYLS config as well + config.update({'plugins': {'pycodestyle': {'ignore': ['W191']}}}) + # And make sure we don't get any warnings + diags = pycodestyle_lint.pyls_lint(config, doc) + assert not [d for d in diags if d['code'] == 'W191'] + def test_pydocstyle(): doc = Document(DOC_URI, DOC)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 2 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "coverage" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==5.5.2 chardet==5.2.0 colorama==0.4.6 configparser==7.2.0 coverage==7.8.0 distlib==0.3.9 exceptiongroup==1.2.2 filelock==3.18.0 future==1.0.0 iniconfig==2.1.0 jedi==0.19.2 json-rpc==1.15.0 mccabe==0.7.0 packaging==24.2 parso==0.8.4 platformdirs==4.3.7 pluggy==1.5.0 pycodestyle==2.13.0 pydocstyle==6.3.0 pyflakes==3.3.1 pyproject-api==1.9.0 pytest==8.3.5 pytest-cov==6.0.0 -e git+https://github.com/palantir/python-language-server.git@a1bbd401621f8fc3083f4341b7912b491e82bf68#egg=python_language_server snowballstemmer==2.2.0 tomli==2.2.1 tox==4.25.0 typing_extensions==4.13.0 versioneer==0.29 virtualenv==20.29.3 yapf==0.43.0
name: python-language-server channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==5.5.2 - chardet==5.2.0 - colorama==0.4.6 - configparser==7.2.0 - coverage==7.8.0 - distlib==0.3.9 - exceptiongroup==1.2.2 - filelock==3.18.0 - future==1.0.0 - iniconfig==2.1.0 - jedi==0.19.2 - json-rpc==1.15.0 - mccabe==0.7.0 - packaging==24.2 - parso==0.8.4 - platformdirs==4.3.7 - pluggy==1.5.0 - pycodestyle==2.13.0 - pydocstyle==6.3.0 - pyflakes==3.3.1 - pyproject-api==1.9.0 - pytest==8.3.5 - pytest-cov==6.0.0 - snowballstemmer==2.2.0 - tomli==2.2.1 - tox==4.25.0 - typing-extensions==4.13.0 - versioneer==0.29 - virtualenv==20.29.3 - yapf==0.43.0 prefix: /opt/conda/envs/python-language-server
[ "test/plugins/test_lint.py::test_pycodestyle_config" ]
[ "test/plugins/test_lint.py::test_pydocstyle" ]
[ "test/plugins/test_lint.py::test_mccabe", "test/plugins/test_lint.py::test_pycodestyle", "test/plugins/test_lint.py::test_pyflakes", "test/plugins/test_lint.py::test_syntax_error_pyflakes" ]
[]
MIT License
1,717
[ "pyls/plugins/pycodestyle_lint.py", "vscode-client/package.json" ]
[ "pyls/plugins/pycodestyle_lint.py", "vscode-client/package.json" ]
sendgrid__sendgrid-python-348
08845196565bee2cdc1efafdd9d27018c2530eea
2017-10-01 18:30:18
172bf1bbdbcd1259566b72acc456de65a63ffa3f
diff --git a/sendgrid/helpers/mail/mail.py b/sendgrid/helpers/mail/mail.py index de41bad..a2159b2 100644 --- a/sendgrid/helpers/mail/mail.py +++ b/sendgrid/helpers/mail/mail.py @@ -262,11 +262,15 @@ class Email(object): def __init__(self, email=None, name=None): self._name = None self._email = None - - if email is not None: - self.email = email - if name is not None: - self.name = name + if name or email: + if not name: + # allows passing emails as "dude Fella <[email protected]>" + self.parse_email(email) + else: + #allows backwards compatibility for Email(email, name) + if email is not None: + self.email = email + self.name = name @property def name(self): @@ -293,6 +297,28 @@ class Email(object): email["email"] = self.email return email + def parse_email(self, email_info): + try: + import rfc822 + except ImportError: + import email.utils as rfc822 + + name, email = rfc822.parseaddr(email_info) + + # more than likely a string was passed here instead of an email address + if "@" not in email: + name = email + email = None + + if not name: + name = None + + if not email: + email = None + + self.name = name + self.email = email + return name, email class Content(object):
Inconsistent format of email from header “name <email>” #### Issue Summary Current implementation restrict use default `email_from` behavior. For more info please see https://tools.ietf.org/html/rfc2822#section-3.4 #### Steps to Reproduce 1. When user try to create `Email` instance from https://github.com/sendgrid/sendgrid-python/blob/master/sendgrid/helpers/mail/mail.py#L173-L179. You must specify `from_email` and `from_name` as seperate parametrs ``` def get(self): email = {} if self.name != None: email["name"] = self.name if self.email != None: email["email"] = self.email return email from_email = Email("[email protected]", "Example Name") ``` As a result client should split `from_email`: ``` try: import rfc822 except ImportError: import email.utils as rfc822 from_name, from_email = rfc822.parseaddr(email.from_email) # Python sendgrid client should improve # sendgrid/helpers/mail/mail.py:164 if not from_name: from_name = None Mail(from_email, from_name) ``` Proposal to how to improve this bahaviour. Because if user now create `Email` from: ``` from_email = Email("Andrii Soldatenko <[email protected]>") ``` **Actual result:** ![image](https://cloud.githubusercontent.com/assets/1374633/20647168/e2159a04-b495-11e6-95f9-20c2bb5fc21f.png) #### Technical details: * sendgrid-python Version: master * Python Version: all #### References: - https://github.com/sendgrid/sendgrid-python/issues/277 - RFC: https://tools.ietf.org/html/rfc2822#section-3.4 - Django docs: https://docs.djangoproject.com/en/1.10/topics/email/#emailmessage-objects **NOTE:** In `sengrid-python==1.4` this feature is working
sendgrid/sendgrid-python
diff --git a/test/test_email.py b/test/test_email.py new file mode 100644 index 0000000..92ae10a --- /dev/null +++ b/test/test_email.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +import json + +from sendgrid.helpers.mail import (Email) + +try: + import unittest2 as unittest +except ImportError: + import unittest + + +class TestEmailObject(unittest.TestCase): + def test_add_email_address(self): + address = "[email protected]" + email = Email(address) + + self.assertEqual(email.email, "[email protected]") + + def test_add_name(self): + name = "SomeName" + email = Email(name=name) + + self.assertEqual(email.name, name) + + def test_add_name_email(self): + name = "SomeName" + address = "[email protected]" + email = Email(email=address, name=name) + self.assertEqual(email.name, name) + self.assertEqual(email.email, "[email protected]") + + def test_add_rfc_function_finds_name_not_email(self): + name = "SomeName" + email = Email(name) + + self.assertEqual(email.name, name) + self.assertIsNone(email.email) + + def test_add_rfc_email(self): + name = "SomeName" + address = "[email protected]" + name_address = "{0} <{1}>".format(name, address) + email = Email(name_address) + self.assertEqual(email.name, name) + self.assertEqual(email.email, "[email protected]") + + def test_empty_obj_add_name(self): + email = Email() + name = "SomeName" + email.name = name + + self.assertEqual(email.name, name) + + def test_empty_obj_add_email(self): + email = Email() + address = "[email protected]" + email.email = address + + self.assertEqual(email.email, address) \ No newline at end of file
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_media" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
5.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 dataclasses==0.8 Flask==0.10.1 importlib-metadata==4.8.3 iniconfig==1.1.1 itsdangerous==2.0.1 Jinja2==3.0.3 MarkupSafe==2.0.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 python-http-client==3.3.7 PyYAML==3.11 -e git+https://github.com/sendgrid/sendgrid-python.git@08845196565bee2cdc1efafdd9d27018c2530eea#egg=sendgrid six==1.10.0 tomli==1.2.3 typing_extensions==4.1.1 Werkzeug==2.0.3 zipp==3.6.0
name: sendgrid-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - dataclasses==0.8 - flask==0.10.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - itsdangerous==2.0.1 - jinja2==3.0.3 - markupsafe==2.0.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-http-client==3.3.7 - pyyaml==3.11 - six==1.10.0 - tomli==1.2.3 - typing-extensions==4.1.1 - werkzeug==2.0.3 - zipp==3.6.0 prefix: /opt/conda/envs/sendgrid-python
[ "test/test_email.py::TestEmailObject::test_add_rfc_email", "test/test_email.py::TestEmailObject::test_add_rfc_function_finds_name_not_email" ]
[]
[ "test/test_email.py::TestEmailObject::test_add_email_address", "test/test_email.py::TestEmailObject::test_add_name", "test/test_email.py::TestEmailObject::test_add_name_email", "test/test_email.py::TestEmailObject::test_empty_obj_add_email", "test/test_email.py::TestEmailObject::test_empty_obj_add_name" ]
[]
MIT License
1,718
[ "sendgrid/helpers/mail/mail.py" ]
[ "sendgrid/helpers/mail/mail.py" ]
lamenezes__simple-model-7
f811032749a8c0f5b3415d7b25e2b76fcc16b877
2017-10-01 22:53:28
f811032749a8c0f5b3415d7b25e2b76fcc16b877
coveralls: [![Coverage Status](https://coveralls.io/builds/13518910/badge)](https://coveralls.io/builds/13518910) Coverage remained the same at 100.0% when pulling **83f3b000306665f385e93e4fecb8daf2ae8c3696 on dirtycoder:add-build-many** into **b81430b959e88963cf2a46145491a30a1de03dd1 on lamenezes:master**. lamenezes: Thanks for the PR @dirtycoder! :smile: lamenezes: Also, would you please document this new functionality to the README? Thanks again @dirtycoder! coveralls: [![Coverage Status](https://coveralls.io/builds/13693764/badge)](https://coveralls.io/builds/13693764) Coverage remained the same at 100.0% when pulling **783c9cd8cca5fd50a5a671fbff9cb23b3fd07b4c on dirtycoder:add-build-many** into **b81430b959e88963cf2a46145491a30a1de03dd1 on lamenezes:master**. coveralls: [![Coverage Status](https://coveralls.io/builds/13693897/badge)](https://coveralls.io/builds/13693897) Coverage remained the same at 100.0% when pulling **18afe6a2a2820c1e97996b755b77426a00ddb054 on dirtycoder:add-build-many** into **f811032749a8c0f5b3415d7b25e2b76fcc16b877 on lamenezes:master**.
diff --git a/.gitignore b/.gitignore index 0ad2f42..ce23a29 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ coverage.xml *~ [._]*.s[a-w][a-z] .mypy_cache +.idea diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9947a37..3705fe3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,5 @@ - repo: [email protected]:pre-commit/pre-commit-hooks - sha: 516cc9fa72ad09699f2c03ffbd0aa7f60d75b59a + sha: v0.9.5 hooks: - id: debug-statements - id: trailing-whitespace diff --git a/README.rst b/README.rst index 1341afd..b0deefc 100644 --- a/README.rst +++ b/README.rst @@ -14,9 +14,9 @@ plenty of lists and dicts. It has simple objectives: -- Define your fields easily (just a tuple, nor dicts or instances of type classes whatever) +- Define your fields easily (just a tuple, not dicts or instances of type classes whatever) - Support for field validation -- Conversion to dict +- Convert to dict That's it. If you want something more complex there are plenty of libraries and frameworks that does a lot of cool stuff. @@ -114,6 +114,18 @@ easily done using simple-model: ('John Doe', 10) +Build many models +----------------- + +It's possible to build many models in a single step, it can be done by passing an iterable +to the `build_many` method. + +.. code:: python + + people = [{'name': 'John Doe'}, {'name': 'John Doe II'}] + models = Person.build_many(people) + + Conversion to Dict ------------------ @@ -149,7 +161,6 @@ It also supports nested models as lists: >> person = Person(name='Jane Doe', age=60) >> other_person = Person(name='John Doe', age=15) >> social_person = MoreSocialPerson(name='Foo Bar', friends=[person, other_person]) - >> dict(social_person) { 'name': 'Foo Bar', 'friends': [ diff --git a/simple_model/models.py b/simple_model/models.py index 0b04d0b..2ba0244 100644 --- a/simple_model/models.py +++ b/simple_model/models.py @@ -30,6 +30,17 @@ class BaseModel(Iterable[Tuple[str, Any]]): ) return '{class_name}({attrs})'.format(class_name=type(self).__name__, attrs=attrs) + @classmethod + def build_many(cls, source: Iterable) -> list: + if not source: + raise ValueError('source should have at least one item') + + keys_sets = [set(d.keys()) for d in source] + if any(map(lambda x: x ^ keys_sets[0], keys_sets)): + raise ValueError('All elements in source should have the same keys') + + return [cls(**item) for item in source] + def _get_fields(self) -> Iterator[ModelField]: for field_name in self.get_fields(): field_value = getattr(self, field_name)
Allow model creation from list In order to create models from a list currently we have to do the following: ```python my_list = [{'foo': 1, 'bar': 2}, {'foo': 0, 'bar': -10}] models = [MyModel(**data) for data in my_list] ``` It would be nice to have a SimpleModel classmethod that did it for us in order to avoid repetition and minor mistakes: ```python my_list = [{'foo': 1, 'bar': 2}, {'foo': 0, 'bar': -10}] models = MyModel.build_many(my_list) ```
lamenezes/simple-model
diff --git a/tests/test_model.py b/tests/test_model.py index 6b8d74b..99179c3 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -23,6 +23,15 @@ def nested_model(): return child [email protected] +def many_source(): + return ( + {'foo': '1 foo', 'bar': '1 bar', 'qux': '1 qux'}, + {'foo': '2 foo', 'bar': '2 bar', 'qux': '2 qux'}, + {'foo': '3 foo', 'bar': '3 bar', 'qux': '3 qux'}, + ) + + def test_base_model(base_model): assert base_model.foo == 'foo' assert base_model.bar == 'bar' @@ -337,3 +346,22 @@ def test_dynamic_model_get_fields(): fields = model.get_fields() assert '_private' not in fields assert len(fields) == 3 + + +def test_build_many(many_source): + models = BaseModel.build_many(many_source) + + assert len(models) == 3 + assert models[0].foo == '1 foo' + assert models[1].bar == '2 bar' + assert models[2].qux == '3 qux' + + +def test_build_many_empty_iterable(): + with pytest.raises(ValueError): + BaseModel.build_many([]) + + +def test_build_many_different_items(): + with pytest.raises(ValueError): + BaseModel.build_many([{'a': 1}, {'b': 2}])
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 4 }
0.12
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "flake8", "mypy" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work flake8==7.2.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mccabe==0.7.0 mypy==1.15.0 mypy-extensions==1.0.0 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pycodestyle==2.13.0 pyflakes==3.3.2 -e git+https://github.com/lamenezes/simple-model.git@f811032749a8c0f5b3415d7b25e2b76fcc16b877#egg=pysimplemodel pytest @ file:///croot/pytest_1738938843180/work pytest-cov==6.0.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions==4.13.0
name: simple-model channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - flake8==7.2.0 - mccabe==0.7.0 - mypy==1.15.0 - mypy-extensions==1.0.0 - pycodestyle==2.13.0 - pyflakes==3.3.2 - pytest-cov==6.0.0 - typing-extensions==4.13.0 prefix: /opt/conda/envs/simple-model
[ "tests/test_model.py::test_build_many", "tests/test_model.py::test_build_many_empty_iterable", "tests/test_model.py::test_build_many_different_items" ]
[ "tests/test_model.py::test_model_fields_validate_allow_empty_error[None]", "tests/test_model.py::test_model_fields_validate_allow_empty_error[]" ]
[ "tests/test_model.py::test_base_model", "tests/test_model.py::test_base_model_iter", "tests/test_model.py::test_base_model_repr", "tests/test_model.py::test_base_model_get_fields", "tests/test_model.py::test_base_model__get_fields", "tests/test_model.py::test_base_model_is_empty_false[False]", "tests/test_model.py::test_base_model_is_empty_false[0]", "tests/test_model.py::test_base_model_is_empty_false[foo]", "tests/test_model.py::test_base_model_is_empty_false[10]", "tests/test_model.py::test_base_model_is_empty_false[value4]", "tests/test_model.py::test_base_model_is_empty_false[value5]", "tests/test_model.py::test_base_model_is_empty_true[]", "tests/test_model.py::test_base_model_is_empty_true[value1]", "tests/test_model.py::test_base_model_is_empty_true[value2]", "tests/test_model.py::test_base_model_clean", "tests/test_model.py::test_base_model_validate_success", "tests/test_model.py::test_base_model_validate_fail", "tests/test_model.py::test_base_model___eq___equals", "tests/test_model.py::test_base_model___eq___not_equals", "tests/test_model.py::test_model", "tests/test_model.py::test_model_fields_allow_empty", "tests/test_model.py::test_model_fields_allow_empty__all__", "tests/test_model.py::test_model__get_fields", "tests/test_model.py::test_model_get_fields", "tests/test_model.py::test_model_get_fields_without_fields", "tests/test_model.py::test_model_get_allow_empty", "tests/test_model.py::test_model_get_allow_empty_without_fields", "tests/test_model.py::test_model_validate_empty[False]", "tests/test_model.py::test_model_validate_empty[0]", "tests/test_model.py::test_model_fields_field_validation", "tests/test_model.py::test_model_fields_field_validation_without_raise", "tests/test_model.py::test_model_fields_field_validation_error", "tests/test_model.py::test_model_fields_field_validation_error_without_raise", "tests/test_model.py::test_model_validate_nested", "tests/test_model.py::test_model_iter_simple", "tests/test_model.py::test_model_iter_nested_list[list]", "tests/test_model.py::test_model_iter_nested_list[tuple]", "tests/test_model.py::test_model_clean_without_clean_method", "tests/test_model.py::test_model_clean", "tests/test_model.py::test_model_clean_nested", "tests/test_model.py::test_model_iter_clean", "tests/test_model.py::test_model_get_fields_invalid", "tests/test_model.py::test_dynamic_model_get_fields" ]
[]
MIT License
1,719
[ "README.rst", ".gitignore", "simple_model/models.py", ".pre-commit-config.yaml" ]
[ "README.rst", ".gitignore", "simple_model/models.py", ".pre-commit-config.yaml" ]
ucfopen__canvasapi-88
de1c930ec61cde44163133846740454905b9677c
2017-10-02 01:01:09
f2faa1835e104aae764a1fc7638c284d2888639f
diff --git a/.gitignore b/.gitignore index 1987e91..c8fa7a6 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ /build/ /coverage.xml /dist/ +/docs/_build/ /env/ /htmlcov/ \#*# diff --git a/canvasapi/course.py b/canvasapi/course.py index 7b87e2d..d5b64d9 100644 --- a/canvasapi/course.py +++ b/canvasapi/course.py @@ -231,8 +231,6 @@ class Course(CanvasObject): :calls: `POST /api/v1/courses/:course_id/files \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.create_file>`_ - :param path: The path of the file to upload. - :type path: str :param file: The file or path of the file to upload. :type file: file or str :returns: True if the file uploaded successfully, False otherwise, \ @@ -1101,12 +1099,14 @@ class Course(CanvasObject): 'courses/%s/assignments/%s/submissions' % (self.id, assignment_id), _kwargs=combine_kwargs(**kwargs) ) + response_json = response.json() + response_json.update(course_id=self.id) - return Submission(self._requester, response.json()) + return Submission(self._requester, response_json) def list_submissions(self, assignment_id, **kwargs): """ - Makes a submission for an assignment. + Get all existing submissions for an assignment. :calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/submissions \ <https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.index>`_ @@ -1121,6 +1121,7 @@ class Course(CanvasObject): self._requester, 'GET', 'courses/%s/assignments/%s/submissions' % (self.id, assignment_id), + {'course_id': self.id}, _kwargs=combine_kwargs(**kwargs) ) @@ -1144,6 +1145,7 @@ class Course(CanvasObject): self._requester, 'GET', 'courses/%s/students/submissions' % (self.id), + {'course_id': self.id}, _kwargs=combine_kwargs(**kwargs) ) @@ -1165,7 +1167,10 @@ class Course(CanvasObject): 'courses/%s/assignments/%s/submissions/%s' % (self.id, assignment_id, user_id), _kwargs=combine_kwargs(**kwargs) ) - return Submission(self._requester, response.json()) + response_json = response.json() + response_json.update(course_id=self.id) + + return Submission(self._requester, response_json) def update_submission(self, assignment_id, user_id, **kwargs): """ @@ -1186,12 +1191,15 @@ class Course(CanvasObject): _kwargs=combine_kwargs(**kwargs) ) + response_json = response.json() + response_json.update(course_id=self.id) + submission = self.get_submission(assignment_id, user_id) - if 'submission_type' in response.json(): - super(Submission, submission).set_attributes(response.json()) + if 'submission_type' in response_json: + super(Submission, submission).set_attributes(response_json) - return Submission(self._requester, response.json()) + return Submission(self._requester, response_json) def list_gradeable_students(self, assignment_id): """ diff --git a/canvasapi/section.py b/canvasapi/section.py index f9f86ca..89fe900 100644 --- a/canvasapi/section.py +++ b/canvasapi/section.py @@ -124,12 +124,14 @@ class Section(CanvasObject): 'sections/%s/assignments/%s/submissions' % (self.id, assignment_id), _kwargs=combine_kwargs(**kwargs) ) + response_json = response.json() + response_json.update(section_id=self.id) - return Submission(self._requester, response.json()) + return Submission(self._requester, response_json) def list_submissions(self, assignment_id, **kwargs): """ - Makes a submission for an assignment. + Get all existing submissions for an assignment. :calls: `GET /api/v1/sections/:section_id/assignments/:assignment_id/submissions \ <https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.index>`_ @@ -144,6 +146,7 @@ class Section(CanvasObject): self._requester, 'GET', 'sections/%s/assignments/%s/submissions' % (self.id, assignment_id), + {'section_id': self.id}, _kwargs=combine_kwargs(**kwargs) ) @@ -167,6 +170,7 @@ class Section(CanvasObject): self._requester, 'GET', 'sections/%s/students/submissions' % (self.id), + {'section_id': self.id}, _kwargs=combine_kwargs(**kwargs) ) @@ -188,7 +192,10 @@ class Section(CanvasObject): 'sections/%s/assignments/%s/submissions/%s' % (self.id, assignment_id, user_id), _kwargs=combine_kwargs(**kwargs) ) - return Submission(self._requester, response.json()) + response_json = response.json() + response_json.update(section_id=self.id) + + return Submission(self._requester, response_json) def update_submission(self, assignment_id, user_id, **kwargs): """ @@ -211,10 +218,13 @@ class Section(CanvasObject): submission = self.get_submission(assignment_id, user_id) - if 'submission_type' in response.json(): - super(Submission, submission).set_attributes(response.json()) + response_json = response.json() + response_json.update(section_id=self.id) + + if 'submission_type' in response_json: + super(Submission, submission).set_attributes(response_json) - return Submission(self._requester, response.json()) + return Submission(self._requester, response_json) def mark_submission_as_read(self, assignment_id, user_id): """ diff --git a/canvasapi/submission.py b/canvasapi/submission.py index f1afb73..9592f55 100644 --- a/canvasapi/submission.py +++ b/canvasapi/submission.py @@ -3,6 +3,7 @@ from __future__ import absolute_import, division, print_function, unicode_litera from six import python_2_unicode_compatible from canvasapi.canvas_object import CanvasObject +from canvasapi.upload import Uploader @python_2_unicode_compatible @@ -10,3 +11,31 @@ class Submission(CanvasObject): def __str__(self): return "{}".format(self.id) + + def upload_comment(self, file, **kwargs): + """ + Upload a file to attach to this submission comment. + + :calls: `POST \ + /api/v1/courses/:course_id/assignments/:assignment_id/submissions/:user_id/comments/files \ + <https://canvas.instructure.com/doc/api/submission_comments.html#method.submission_comments_api.create_file>`_ + + :param file: The file or path of the file to upload. + :type file: file or str + :returns: True if the file uploaded successfully, False otherwise, \ + and the JSON response from the API. + :rtype: tuple + """ + if not hasattr(self, 'course_id'): + raise ValueError('Must use a course to upload file comments.') + + return Uploader( + self._requester, + 'courses/{}/assignments/{}/submissions/{}/comments/files'.format( + self.course_id, + self.assignment_id, + self.user_id + ), + file, + **kwargs + ).start()
Support uploading files for submission comments As far as I can tell, `canvasapi` 0.6.0 has no support for [uploading files to be used in submission comments](https://canvas.instructure.com/doc/api/submission_comments.html). I would like to automate parts of my grading workflow that require this capability. It would be nice for `canvasapi` to help me get that done. Note that [the complete file-upload workflow](https://canvas.instructure.com/doc/api/file.file_uploads.html) is rather complex. It would be great to have an elegant `canvasapi` interface that simplifies all of this. Barring that, access to each of the primitive steps would at least be better than nothing.
ucfopen/canvasapi
diff --git a/tests/fixtures/submission.json b/tests/fixtures/submission.json new file mode 100644 index 0000000..4ea46c1 --- /dev/null +++ b/tests/fixtures/submission.json @@ -0,0 +1,44 @@ +{ + "get_by_id_course": { + "method": "GET", + "endpoint": "courses/1/assignments/1/submissions/1", + "data": { + "id": 1, + "assignment_id": 1, + "user_id": 1, + "html_url": "http://example.com/courses/1/assignments/1/submissions/1", + "submission_type": "online_upload" + }, + "status_code": 200 + }, + "get_by_id_section": { + "method": "GET", + "endpoint": "sections/1/assignments/1/submissions/1", + "data": { + "id": 1, + "assignment_id": 1, + "user_id": 1, + "html_url": "http://example.com/sections/1/assignments/1/submissions/1", + "submission_type": "online_upload" + }, + "status_code": 200 + }, + "upload_comment": { + "method": "POST", + "endpoint": "courses/1/assignments/1/submissions/1/comments/files", + "data": { + "upload_url": "http://example.com/api/v1/files/upload_response_upload_url", + "upload_params": { + "some_param": "param123", + "a_different_param": "param456" + } + } + }, + "upload_comment_final": { + "method": "POST", + "endpoint": "files/upload_response_upload_url", + "data": { + "url": "great_url_success" + } + } +} \ No newline at end of file diff --git a/tests/test_course.py b/tests/test_course.py index b2bc030..f3b42ed 100644 --- a/tests/test_course.py +++ b/tests/test_course.py @@ -1,5 +1,4 @@ from __future__ import absolute_import, division, print_function, unicode_literals -import os import unittest import uuid import warnings @@ -26,7 +25,7 @@ from canvasapi.user import User from canvasapi.submission import Submission from canvasapi.user import UserDisplay from tests import settings -from tests.util import register_uris +from tests.util import cleanup_file, register_uris @requests_mock.Mocker() @@ -169,12 +168,7 @@ class TestCourse(unittest.TestCase): self.assertIsInstance(response[1], dict) self.assertIn('url', response[1]) - # http://stackoverflow.com/a/10840586 - # Not as stupid as it looks. - try: - os.remove(filename) - except OSError: - pass + cleanup_file(filename) # reset() def test_reset(self, m): diff --git a/tests/test_group.py b/tests/test_group.py index 4388ec2..3ecf80a 100644 --- a/tests/test_group.py +++ b/tests/test_group.py @@ -1,5 +1,4 @@ from __future__ import absolute_import, division, print_function, unicode_literals -import os import unittest import uuid @@ -15,7 +14,7 @@ from canvasapi.file import File from canvasapi.folder import Folder from canvasapi.tab import Tab from tests import settings -from tests.util import register_uris +from tests.util import cleanup_file, register_uris @requests_mock.Mocker() @@ -145,12 +144,8 @@ class TestGroup(unittest.TestCase): self.assertTrue(response[0]) self.assertIsInstance(response[1], dict) self.assertIn('url', response[1]) - # http://stackoverflow.com/a/10840586 - # Not as stupid as it looks. - try: - os.remove(filename) - except OSError: - pass + + cleanup_file(filename) # preview_processed_html() def test_preview_processed_html(self, m): diff --git a/tests/test_submission.py b/tests/test_submission.py index 8f178c1..e526424 100644 --- a/tests/test_submission.py +++ b/tests/test_submission.py @@ -1,11 +1,12 @@ from __future__ import absolute_import, division, print_function, unicode_literals import unittest +import uuid import requests_mock from canvasapi import Canvas from tests import settings -from tests.util import register_uris +from tests.util import cleanup_file, register_uris @requests_mock.Mocker() @@ -16,13 +17,36 @@ class TestSubmission(unittest.TestCase): with requests_mock.Mocker() as m: register_uris({ - 'section': ['get_by_id', 'get_submission'] + 'course': ['get_by_id'], + 'section': ['get_by_id'], + 'submission': ['get_by_id_course', 'get_by_id_section'] }, m) + self.course = self.canvas.get_course(1) + self.submission_course = self.course.get_submission(1, 1) self.section = self.canvas.get_section(1) - self.submission = self.section.get_submission(1, 1) + self.submission_section = self.section.get_submission(1, 1) # __str__() def test__str__(self, m): - string = str(self.submission) + string = str(self.submission_course) self.assertIsInstance(string, str) + + # upload_comment() + def test_upload_comment(self, m): + register_uris({'submission': ['upload_comment', 'upload_comment_final']}, m) + + filename = 'testfile_submission_%s' % uuid.uuid4().hex + with open(filename, 'w+') as file: + response = self.submission_course.upload_comment(file) + + self.assertTrue(response[0]) + self.assertIsInstance(response[1], dict) + self.assertIn('url', response[1]) + + cleanup_file(filename) + + def test_upload_comment_section(self, m): + # Sections do not support uploading file comments + with self.assertRaises(ValueError): + self.submission_section.upload_comment('fakefilename.txt') diff --git a/tests/test_uploader.py b/tests/test_uploader.py index ad7a964..bf04942 100644 --- a/tests/test_uploader.py +++ b/tests/test_uploader.py @@ -1,5 +1,4 @@ from __future__ import absolute_import, division, print_function, unicode_literals -import os import unittest import uuid @@ -8,7 +7,7 @@ import requests_mock from canvasapi.canvas import Canvas from canvasapi.upload import Uploader from tests import settings -from tests.util import register_uris +from tests.util import cleanup_file, register_uris @requests_mock.Mocker() @@ -24,12 +23,7 @@ class TestUploader(unittest.TestCase): def tearDown(self): self.file.close() - # http://stackoverflow.com/a/10840586 - # Not as stupid as it looks. - try: - os.remove(self.filename) - except OSError: - pass + cleanup_file(self.filename) # start() def test_start(self, m): diff --git a/tests/test_user.py b/tests/test_user.py index 1a4517e..f13a5ef 100644 --- a/tests/test_user.py +++ b/tests/test_user.py @@ -1,5 +1,4 @@ from __future__ import absolute_import, division, print_function, unicode_literals -import os import unittest import uuid @@ -20,7 +19,7 @@ from canvasapi.page_view import PageView from canvasapi.user import User from canvasapi.login import Login from tests import settings -from tests.util import register_uris +from tests.util import cleanup_file, register_uris @requests_mock.Mocker() @@ -202,12 +201,7 @@ class TestUser(unittest.TestCase): self.assertIsInstance(response[1], dict) self.assertIn('url', response[1]) - # http://stackoverflow.com/a/10840586 - # Not as stupid as it looks. - try: - os.remove(filename) - except OSError: - pass + cleanup_file(filename) # list_groups() def test_list_groups(self, m): diff --git a/tests/util.py b/tests/util.py index c7d74ea..3b92514 100644 --- a/tests/util.py +++ b/tests/util.py @@ -1,5 +1,6 @@ from __future__ import absolute_import, division, print_function, unicode_literals import json +import os import requests_mock @@ -51,3 +52,15 @@ def register_uris(requirements, requests_mocker): ) except Exception as e: print(e) + + +def cleanup_file(filename): + """ + Remove a test file from the system. If the file doesn't exist, ignore. + + `Not as stupid as it looks. <http://stackoverflow.com/a/10840586>_` + """ + try: + os.remove(filename) + except OSError: + pass
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 4 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "coverage", "flake8", "pyflakes", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt", "dev_requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 -e git+https://github.com/ucfopen/canvasapi.git@de1c930ec61cde44163133846740454905b9677c#egg=canvasapi certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 docutils==0.17.1 flake8==5.0.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.2.0 iniconfig==1.1.1 Jinja2==3.0.3 MarkupSafe==2.0.1 mccabe==0.7.0 packaging==21.3 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytz==2025.2 requests==2.27.1 requests-mock==1.12.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==4.3.2 sphinx-rtd-theme==1.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: canvasapi channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - coverage==6.2 - docutils==0.17.1 - flake8==5.0.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - jinja2==3.0.3 - markupsafe==2.0.1 - mccabe==0.7.0 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytz==2025.2 - requests==2.27.1 - requests-mock==1.12.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==4.3.2 - sphinx-rtd-theme==1.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/canvasapi
[ "tests/test_submission.py::TestSubmission::test_upload_comment", "tests/test_submission.py::TestSubmission::test_upload_comment_section" ]
[]
[ "tests/test_course.py::TestCourse::test__str__", "tests/test_course.py::TestCourse::test_conclude", "tests/test_course.py::TestCourse::test_course_files", "tests/test_course.py::TestCourse::test_create_assignment", "tests/test_course.py::TestCourse::test_create_assignment_fail", "tests/test_course.py::TestCourse::test_create_assignment_group", "tests/test_course.py::TestCourse::test_create_course_section", "tests/test_course.py::TestCourse::test_create_discussion_topic", "tests/test_course.py::TestCourse::test_create_external_feed", "tests/test_course.py::TestCourse::test_create_external_tool", "tests/test_course.py::TestCourse::test_create_folder", "tests/test_course.py::TestCourse::test_create_group_category", "tests/test_course.py::TestCourse::test_create_module", "tests/test_course.py::TestCourse::test_create_module_fail", "tests/test_course.py::TestCourse::test_create_page", "tests/test_course.py::TestCourse::test_create_page_fail", "tests/test_course.py::TestCourse::test_create_quiz", "tests/test_course.py::TestCourse::test_create_quiz_fail", "tests/test_course.py::TestCourse::test_delete", "tests/test_course.py::TestCourse::test_delete_external_feed", "tests/test_course.py::TestCourse::test_edit_front_page", "tests/test_course.py::TestCourse::test_enroll_user", "tests/test_course.py::TestCourse::test_get_assignment", "tests/test_course.py::TestCourse::test_get_assignment_group", "tests/test_course.py::TestCourse::test_get_assignments", "tests/test_course.py::TestCourse::test_get_course_level_assignment_data", "tests/test_course.py::TestCourse::test_get_course_level_participation_data", "tests/test_course.py::TestCourse::test_get_course_level_student_summary_data", "tests/test_course.py::TestCourse::test_get_discussion_topic", "tests/test_course.py::TestCourse::test_get_discussion_topics", "tests/test_course.py::TestCourse::test_get_enrollments", "tests/test_course.py::TestCourse::test_get_external_tool", "tests/test_course.py::TestCourse::test_get_external_tools", "tests/test_course.py::TestCourse::test_get_file", "tests/test_course.py::TestCourse::test_get_folder", "tests/test_course.py::TestCourse::test_get_full_discussion_topic", "tests/test_course.py::TestCourse::test_get_module", "tests/test_course.py::TestCourse::test_get_modules", "tests/test_course.py::TestCourse::test_get_page", "tests/test_course.py::TestCourse::test_get_pages", "tests/test_course.py::TestCourse::test_get_quiz", "tests/test_course.py::TestCourse::test_get_quiz_fail", "tests/test_course.py::TestCourse::test_get_quizzes", "tests/test_course.py::TestCourse::test_get_recent_students", "tests/test_course.py::TestCourse::test_get_section", "tests/test_course.py::TestCourse::test_get_settings", "tests/test_course.py::TestCourse::test_get_submission", "tests/test_course.py::TestCourse::test_get_user", "tests/test_course.py::TestCourse::test_get_user_id_type", "tests/test_course.py::TestCourse::test_get_user_in_a_course_level_assignment_data", "tests/test_course.py::TestCourse::test_get_user_in_a_course_level_messaging_data", "tests/test_course.py::TestCourse::test_get_user_in_a_course_level_participation_data", "tests/test_course.py::TestCourse::test_get_users", "tests/test_course.py::TestCourse::test_list_assignment_groups", "tests/test_course.py::TestCourse::test_list_external_feeds", "tests/test_course.py::TestCourse::test_list_folders", "tests/test_course.py::TestCourse::test_list_gradeable_students", "tests/test_course.py::TestCourse::test_list_group_categories", "tests/test_course.py::TestCourse::test_list_groups", "tests/test_course.py::TestCourse::test_list_multiple_submissions", "tests/test_course.py::TestCourse::test_list_multiple_submissions_grouped_param", "tests/test_course.py::TestCourse::test_list_sections", "tests/test_course.py::TestCourse::test_list_submissions", "tests/test_course.py::TestCourse::test_list_tabs", "tests/test_course.py::TestCourse::test_mark_submission_as_read", "tests/test_course.py::TestCourse::test_mark_submission_as_unread", "tests/test_course.py::TestCourse::test_preview_html", "tests/test_course.py::TestCourse::test_reorder_pinned_topics", "tests/test_course.py::TestCourse::test_reorder_pinned_topics_no_list", "tests/test_course.py::TestCourse::test_reset", "tests/test_course.py::TestCourse::test_show_front_page", "tests/test_course.py::TestCourse::test_subit_assignment_fail", "tests/test_course.py::TestCourse::test_submit_assignment", "tests/test_course.py::TestCourse::test_update", "tests/test_course.py::TestCourse::test_update_settings", "tests/test_course.py::TestCourse::test_update_submission", "tests/test_course.py::TestCourse::test_update_tab", "tests/test_course.py::TestCourse::test_upload", "tests/test_course.py::TestCourseNickname::test__str__", "tests/test_course.py::TestCourseNickname::test_remove", "tests/test_group.py::TestGroup::test__str__", "tests/test_group.py::TestGroup::test_create_discussion_topic", "tests/test_group.py::TestGroup::test_create_external_feed", "tests/test_group.py::TestGroup::test_create_folder", "tests/test_group.py::TestGroup::test_create_membership", "tests/test_group.py::TestGroup::test_create_page", "tests/test_group.py::TestGroup::test_create_page_fail", "tests/test_group.py::TestGroup::test_delete", "tests/test_group.py::TestGroup::test_delete_external_feed", "tests/test_group.py::TestGroup::test_edit", "tests/test_group.py::TestGroup::test_edit_front_page", "tests/test_group.py::TestGroup::test_get_activity_stream_summary", "tests/test_group.py::TestGroup::test_get_discussion_topic", "tests/test_group.py::TestGroup::test_get_discussion_topics", "tests/test_group.py::TestGroup::test_get_file", "tests/test_group.py::TestGroup::test_get_folder", "tests/test_group.py::TestGroup::test_get_full_discussion_topic", "tests/test_group.py::TestGroup::test_get_membership", "tests/test_group.py::TestGroup::test_get_page", "tests/test_group.py::TestGroup::test_get_pages", "tests/test_group.py::TestGroup::test_group_files", "tests/test_group.py::TestGroup::test_invite", "tests/test_group.py::TestGroup::test_list_external_feeds", "tests/test_group.py::TestGroup::test_list_folders", "tests/test_group.py::TestGroup::test_list_memberships", "tests/test_group.py::TestGroup::test_list_tabs", "tests/test_group.py::TestGroup::test_list_users", "tests/test_group.py::TestGroup::test_preview_processed_html", "tests/test_group.py::TestGroup::test_remove_user", "tests/test_group.py::TestGroup::test_reorder_pinned_topics", "tests/test_group.py::TestGroup::test_reorder_pinned_topics_no_list", "tests/test_group.py::TestGroup::test_show_front_page", "tests/test_group.py::TestGroup::test_update_membership", "tests/test_group.py::TestGroup::test_upload", "tests/test_group.py::TestGroupMembership::test__str__", "tests/test_group.py::TestGroupMembership::test_remove_self", "tests/test_group.py::TestGroupMembership::test_remove_user", "tests/test_group.py::TestGroupMembership::test_update", "tests/test_group.py::TestGroupCategory::test__str__", "tests/test_group.py::TestGroupCategory::test_assign_members", "tests/test_group.py::TestGroupCategory::test_create_group", "tests/test_group.py::TestGroupCategory::test_delete_category", "tests/test_group.py::TestGroupCategory::test_list_groups", "tests/test_group.py::TestGroupCategory::test_list_users", "tests/test_group.py::TestGroupCategory::test_update", "tests/test_submission.py::TestSubmission::test__str__", "tests/test_uploader.py::TestUploader::test_start", "tests/test_uploader.py::TestUploader::test_start_file_does_not_exist", "tests/test_uploader.py::TestUploader::test_start_path", "tests/test_uploader.py::TestUploader::test_upload_fail", "tests/test_uploader.py::TestUploader::test_upload_no_upload_params", "tests/test_uploader.py::TestUploader::test_upload_no_upload_url", "tests/test_user.py::TestUser::test__str__", "tests/test_user.py::TestUser::test_add_observee", "tests/test_user.py::TestUser::test_add_observee_with_credentials", "tests/test_user.py::TestUser::test_create_bookmark", "tests/test_user.py::TestUser::test_create_folder", "tests/test_user.py::TestUser::test_edit", "tests/test_user.py::TestUser::test_get_avatars", "tests/test_user.py::TestUser::test_get_bookmark", "tests/test_user.py::TestUser::test_get_color", "tests/test_user.py::TestUser::test_get_colors", "tests/test_user.py::TestUser::test_get_courses", "tests/test_user.py::TestUser::test_get_file", "tests/test_user.py::TestUser::test_get_folder", "tests/test_user.py::TestUser::test_get_missing_submissions", "tests/test_user.py::TestUser::test_get_page_views", "tests/test_user.py::TestUser::test_get_profile", "tests/test_user.py::TestUser::test_list_bookmarks", "tests/test_user.py::TestUser::test_list_calendar_events_for_user", "tests/test_user.py::TestUser::test_list_communication_channels", "tests/test_user.py::TestUser::test_list_enrollments", "tests/test_user.py::TestUser::test_list_folders", "tests/test_user.py::TestUser::test_list_groups", "tests/test_user.py::TestUser::test_list_observees", "tests/test_user.py::TestUser::test_list_user_logins", "tests/test_user.py::TestUser::test_merge_into_id", "tests/test_user.py::TestUser::test_merge_into_user", "tests/test_user.py::TestUser::test_remove_observee", "tests/test_user.py::TestUser::test_show_observee", "tests/test_user.py::TestUser::test_update_color", "tests/test_user.py::TestUser::test_update_color_no_hashtag", "tests/test_user.py::TestUser::test_update_settings", "tests/test_user.py::TestUser::test_upload", "tests/test_user.py::TestUser::test_user_assignments", "tests/test_user.py::TestUser::test_user_files", "tests/test_user.py::TestUserDisplay::test__str__" ]
[]
MIT License
1,720
[ ".gitignore", "canvasapi/section.py", "canvasapi/submission.py", "canvasapi/course.py" ]
[ ".gitignore", "canvasapi/section.py", "canvasapi/submission.py", "canvasapi/course.py" ]
altair-viz__altair-398
dfed1d404821e21c25413579f8506b4be05561ad
2017-10-02 18:54:28
e37000c8f54bc5e0e98ea8457b9a3c913cd58ccb
diff --git a/altair/v1/__init__.py b/altair/v1/__init__.py index 6239d9e1..62db76eb 100644 --- a/altair/v1/__init__.py +++ b/altair/v1/__init__.py @@ -43,6 +43,7 @@ from .api import ( OneOfFilter, MaxRowsExceeded, enable_mime_rendering, + disable_mime_rendering ) from ..datasets import ( diff --git a/altair/v1/api.py b/altair/v1/api.py index 51135a9e..1b0ecea0 100644 --- a/altair/v1/api.py +++ b/altair/v1/api.py @@ -64,6 +64,8 @@ DEFAULT_MAX_ROWS = 5000 # Rendering configuration #************************************************************************* +_original_ipython_display_ = None + # This is added to TopLevelMixin as a method if MIME rendering is enabled def _repr_mimebundle_(self, include, exclude, **kwargs): """Return a MIME-bundle for rich display in the Jupyter Notebook.""" @@ -75,8 +77,22 @@ def _repr_mimebundle_(self, include, exclude, **kwargs): def enable_mime_rendering(): """Enable MIME bundle based rendering used in JupyterLab/nteract.""" # This is what makes Python fun! - delattr(TopLevelMixin, '_ipython_display_') - TopLevelMixin._repr_mimebundle_ = _repr_mimebundle_ + global _original_ipython_display_ + if _original_ipython_display_ is None: + TopLevelMixin._repr_mimebundle_ = _repr_mimebundle_ + _original_ipython_display_ = TopLevelMixin._ipython_display_ + delattr(TopLevelMixin, '_ipython_display_') + + +def disable_mime_rendering(): + """Disable MIME bundle based rendering used in JupyterLab/nteract.""" + global _original_ipython_display_ + if _original_ipython_display_ is not None: + delattr(TopLevelMixin, '_repr_mimebundle_') + TopLevelMixin._ipython_display_ = _original_ipython_display_ + _original_ipython_display_ = None + + #************************************************************************* # Channel Aliases
Safer enabling of MIME rendering Right now the `enable_mime_rendering()` function is not very safe: * Can't call twice. * Can't disable. Easy to fix, but need to wait for #377 to be merged.
altair-viz/altair
diff --git a/altair/expr/tests/test_expr.py b/altair/expr/tests/test_expr.py index 2c3b981b..328d64e5 100644 --- a/altair/expr/tests/test_expr.py +++ b/altair/expr/tests/test_expr.py @@ -112,7 +112,7 @@ def test_getitem_list(data): assert set(dir(df2)) == {'xxx', 'yyy', 'calculated'} # changing df2 shouldn't affect df1 - df2['qqq'] = df2.xxx / df2.yyy + df2['qqq'] = df2.xxx // df2.yyy assert set(dir(df2)) == {'xxx', 'yyy', 'calculated', 'qqq'} assert set(dir(df)) == {'xxx', 'yyy', 'zzz', 'calculated'} diff --git a/altair/v1/tests/test_api.py b/altair/v1/tests/test_api.py index b1e14e3a..75dacfce 100644 --- a/altair/v1/tests/test_api.py +++ b/altair/v1/tests/test_api.py @@ -448,7 +448,7 @@ def test_chart_serve(): def test_formula_expression(): - formula = Formula('blah', expr.log(expr.df.value) / expr.LN10) + formula = Formula('blah', expr.log(expr.df.value) // expr.LN10) assert formula.field == 'blah' assert formula.expr == '(log(datum.value)/LN10)' @@ -579,3 +579,11 @@ def test_schema_url(): # Make sure that $schema chart = Chart.from_dict(dct) + + +def test_enable_mime_rendering(): + # Make sure these functions are safe to call multiple times. + enable_mime_rendering() + enable_mime_rendering() + disable_mime_rendering() + disable_mime_rendering()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 2 }
1.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/altair-viz/altair.git@dfed1d404821e21c25413579f8506b4be05561ad#egg=altair attrs==22.2.0 backcall==0.2.0 certifi==2021.5.30 coverage==6.2 decorator==5.1.1 importlib-metadata==4.8.3 iniconfig==1.1.1 ipython==7.16.3 ipython-genutils==0.2.0 jedi==0.17.2 numpy==1.19.5 packaging==21.3 pandas==1.1.5 parso==0.7.1 pexpect==4.9.0 pickleshare==0.7.5 pluggy==1.0.0 prompt-toolkit==3.0.36 ptyprocess==0.7.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 six==1.17.0 tomli==1.2.3 traitlets==4.3.3 typing_extensions==4.1.1 vega==0.4.4 wcwidth==0.2.13 zipp==3.6.0
name: altair channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - backcall==0.2.0 - coverage==6.2 - decorator==5.1.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - ipython==7.16.3 - ipython-genutils==0.2.0 - jedi==0.17.2 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - parso==0.7.1 - pexpect==4.9.0 - pickleshare==0.7.5 - pluggy==1.0.0 - prompt-toolkit==3.0.36 - ptyprocess==0.7.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - six==1.17.0 - tomli==1.2.3 - traitlets==4.3.3 - typing-extensions==4.1.1 - vega==0.4.4 - wcwidth==0.2.13 - zipp==3.6.0 prefix: /opt/conda/envs/altair
[ "altair/v1/tests/test_api.py::test_enable_mime_rendering" ]
[ "altair/v1/tests/test_api.py::test_savechart_html", "altair/v1/tests/test_api.py::test_savechart_json", "altair/v1/tests/test_api.py::test_Chart_from_dict", "altair/v1/tests/test_api.py::test_to_python", "altair/v1/tests/test_api.py::test_to_python_stocks", "altair/v1/tests/test_api.py::test_data_finalization", "altair/v1/tests/test_api.py::test_layered_chart_iadd", "altair/v1/tests/test_api.py::test_chart_add", "altair/v1/tests/test_api.py::test_chart_to_json", "altair/v1/tests/test_api.py::test_chart_serve", "altair/v1/tests/test_api.py::test_max_rows", "altair/v1/tests/test_api.py::test_schema_url" ]
[ "altair/expr/tests/test_expr.py::test_dataframe_namespace", "altair/expr/tests/test_expr.py::test_dataframe_newcols", "altair/expr/tests/test_expr.py::test_unary_operations", "altair/expr/tests/test_expr.py::test_binary_operations", "altair/expr/tests/test_expr.py::test_abs", "altair/expr/tests/test_expr.py::test_expr_funcs", "altair/expr/tests/test_expr.py::test_expr_consts", "altair/expr/tests/test_expr.py::test_getitem_list", "altair/expr/tests/test_expr.py::test_json_reprs", "altair/v1/tests/test_api.py::test_default_mark", "altair/v1/tests/test_api.py::test_mark_methods", "altair/v1/tests/test_api.py::test_chart_url_input", "altair/v1/tests/test_api.py::test_chart_to_html", "altair/v1/tests/test_api.py::test_chart_to_json_round_trip", "altair/v1/tests/test_api.py::test_encode_update", "altair/v1/tests/test_api.py::test_configure_update", "altair/v1/tests/test_api.py::test_configure_axis_update", "altair/v1/tests/test_api.py::test_configure_cell_update", "altair/v1/tests/test_api.py::test_configure_legend_update", "altair/v1/tests/test_api.py::test_configure_mark_update", "altair/v1/tests/test_api.py::test_configure_scale_update", "altair/v1/tests/test_api.py::test_configure_facet_axis_update", "altair/v1/tests/test_api.py::test_configure_facet_cell_update", "altair/v1/tests/test_api.py::test_configure_facet_grid_update", "altair/v1/tests/test_api.py::test_configure_facet_scale_update", "altair/v1/tests/test_api.py::test_transform_update", "altair/v1/tests/test_api.py::test_Chart_load_example", "altair/v1/tests/test_api.py::test_to_python_with_methods", "altair/v1/tests/test_api.py::test_mark_config[area]", "altair/v1/tests/test_api.py::test_mark_config[bar]", "altair/v1/tests/test_api.py::test_mark_config[line]", "altair/v1/tests/test_api.py::test_mark_config[point]", "altair/v1/tests/test_api.py::test_mark_config[text]", "altair/v1/tests/test_api.py::test_mark_config[tick]", "altair/v1/tests/test_api.py::test_mark_config[rule]", "altair/v1/tests/test_api.py::test_mark_config[circle]", "altair/v1/tests/test_api.py::test_mark_config[square]", "altair/v1/tests/test_api.py::test_mark_config[errorBar]", "altair/v1/tests/test_api.py::test_config_methods[params0]", "altair/v1/tests/test_api.py::test_config_methods[params1]", "altair/v1/tests/test_api.py::test_config_methods[params2]", "altair/v1/tests/test_api.py::test_config_methods[params3]", "altair/v1/tests/test_api.py::test_config_facet_grid", "altair/v1/tests/test_api.py::test_finalize", "altair/v1/tests/test_api.py::test_formula_expression", "altair/v1/tests/test_api.py::test_filter_expression", "altair/v1/tests/test_api.py::test_df_formula", "altair/v1/tests/test_api.py::test_df_filter", "altair/v1/tests/test_api.py::test_df_filter_multiple", "altair/v1/tests/test_api.py::test_chart_dir", "altair/v1/tests/test_api.py::test_empty_traits" ]
[]
BSD 3-Clause "New" or "Revised" License
1,721
[ "altair/v1/__init__.py", "altair/v1/api.py" ]
[ "altair/v1/__init__.py", "altair/v1/api.py" ]
altair-viz__altair-399
ea129b3b43bc6768a8a66d09830731ed8197c4b8
2017-10-02 22:01:45
e37000c8f54bc5e0e98ea8457b9a3c913cd58ccb
jakevdp: Another thing this needs is to check data within compound charts. Unfortunately, in vega-lite 1.X, that behavior is not well-defined. I think it's better handled in 2.X. jakevdp: I'm not certain, but I think this change would cause a failure if you have a layered chart that contains a Chart object without its own data ellisonbg: Do you think it is worth trying to get it working with the 1.x layered chart API? On Fri, Oct 6, 2017 at 1:48 PM, Jake Vanderplas <[email protected]> wrote: > I'm not certain, but I think this change would cause a failure if you have > a layered chart that contains a Chart object without its own data > > — > You are receiving this because you were assigned. > Reply to this email directly, view it on GitHub > <https://github.com/altair-viz/altair/pull/399#issuecomment-334866274>, > or mute the thread > <https://github.com/notifications/unsubscribe-auth/AABr0C6zqgPO7aRT-ZkdYqHW75dm4Po5ks5sppI3gaJpZM4PrUHY> > . > -- Brian E. Granger Associate Professor of Physics and Data Science Cal Poly State University, San Luis Obispo @ellisonbg on Twitter and GitHub [email protected] and [email protected] jakevdp: Hard to say... I worry we'd end up in a place where we raise an error on input that should work, because we're not looking in the right place for the column names.
diff --git a/altair/v1/__init__.py b/altair/v1/__init__.py index 62db76eb..f67374d6 100644 --- a/altair/v1/__init__.py +++ b/altair/v1/__init__.py @@ -42,6 +42,7 @@ from .api import ( RangeFilter, OneOfFilter, MaxRowsExceeded, + FieldError, enable_mime_rendering, disable_mime_rendering ) diff --git a/altair/v1/api.py b/altair/v1/api.py index 925d2dfa..d50d4723 100644 --- a/altair/v1/api.py +++ b/altair/v1/api.py @@ -58,6 +58,16 @@ class MaxRowsExceeded(Exception): """Raised if the number of rows in the dataset is too large.""" pass +class FieldError(Exception): + """Raised if a channel has a field related error. + + This is raised if a channel has no field name or if the field name is + not found as the column name of the ``DataFrame``. + """ + + + + DEFAULT_MAX_ROWS = 5000 #************************************************************************* @@ -69,7 +79,7 @@ _original_ipython_display_ = None # This is added to TopLevelMixin as a method if MIME rendering is enabled def _repr_mimebundle_(self, include, exclude, **kwargs): """Return a MIME-bundle for rich display in the Jupyter Notebook.""" - spec = self.to_dict() + spec = self.to_dict(validate_columns=True) bundle = create_vegalite_mime_bundle(spec) return bundle @@ -97,6 +107,7 @@ def disable_mime_rendering(): #************************************************************************* # Channel Aliases #************************************************************************* + from .schema import X, Y, Row, Column, Color, Size, Shape, Text, Label, Detail, Opacity, Order, Path from .schema import Encoding, Facet @@ -120,6 +131,7 @@ def use_signature(Obj): # - makes field a required first argument of initialization # - allows expr trait to be an Expression and processes it properly #************************************************************************* + class Formula(schema.Formula): expr = jst.JSONUnion([jst.JSONString(), jst.JSONInstance(expr.Expression)], @@ -139,6 +151,7 @@ class Formula(schema.Formula): # Transform wrapper # - allows filter trait to be an Expression and processes it properly #************************************************************************* + class Transform(schema.Transform): filter = jst.JSONUnion([jst.JSONString(), jst.JSONInstance(expr.Expression), @@ -165,6 +178,7 @@ class Transform(schema.Transform): #************************************************************************* # Top-level Objects #************************************************************************* + class TopLevelMixin(object): @staticmethod @@ -253,22 +267,26 @@ class TopLevelMixin(object): including HTML """ from ..utils.html import to_html - return to_html(self.to_dict(), template=template, title=title, **kwargs) + return to_html(self.to_dict(validate_columns=True), template=template, title=title, **kwargs) - def to_dict(self, data=True): + def to_dict(self, data=True, validate_columns=False): """Emit the JSON representation for this object as as dict. Parameters ---------- data : bool If True (default) then include data in the representation. + validate_columns : bool + If True (default is False) raise FieldError if there are missing or misspelled + column names. This only actually raises if self.validate_columns is also set + (it defaults to True). Returns ------- spec : dict The JSON specification of the chart object. """ - dct = super(TopLevelMixin, self.clone()).to_dict(data=data) + dct = super(TopLevelMixin, self.clone()).to_dict(data=data, validate_columns=validate_columns) dct['$schema'] = schema.vegalite_schema_url return dct @@ -424,7 +442,7 @@ class TopLevelMixin(object): """Use the vega package to display in the classic Jupyter Notebook.""" from IPython.display import display from vega import VegaLite - display(VegaLite(self.to_dict())) + display(VegaLite(self.to_dict(validate_columns=True))) def display(self): """Display the Chart using the Jupyter Notebook's rich output. @@ -471,6 +489,21 @@ class TopLevelMixin(object): files=files, jupyter_warning=jupyter_warning, open_browser=open_browser, http_server=http_server) + def _finalize(self, **kwargs): + self._finalize_data() + # data comes from wrappers, but self.data overrides this if defined + if self.data is not None: + kwargs['data'] = self.data + super(TopLevelMixin, self)._finalize(**kwargs) + + # Validate columns after the rest of _finalize() has run. This is last as + # field names are not yet filled in from shortcuts until now. + validate_columns = kwargs.get('validate_columns') + # Only do validation if the requested as a keyword arg to `_finalize` + # and the Chart allows it. + if validate_columns and self.validate_columns: + self._validate_columns() + def _finalize_data(self): """ This function is called by _finalize() below. @@ -481,19 +514,10 @@ class TopLevelMixin(object): * Whether the data attribute contains expressions, and if so it extracts the appropriate data object and generates the appropriate transforms. """ - # Check to see if data has too many rows. - if isinstance(self.data, pd.DataFrame): - if len(self.data) > self.max_rows: - raise MaxRowsExceeded( - "Your dataset has too many rows and could take a long " - "time to send to the frontend or to render. To override the " - "default maximum rows (%s), set the max_rows property of " - "your Chart to an integer larger than the number of rows " - "in your dataset. Alternatively you could perform aggregations " - "or other data reductions before using it with Altair" % DEFAULT_MAX_ROWS - ) - # Handle expressions. + # Handle expressions. This transforms expr.DataFrame object into a set + # of transforms and an actual pd.DataFrame. After this block runs, + # self.data is either a URL or a pd.DataFrame or None. if isinstance(self.data, expr.DataFrame): columns = self.data._cols calculated_cols = self.data._calculated_cols @@ -512,6 +536,68 @@ class TopLevelMixin(object): else: self.transform_data(filter=filters) + # If self.data is a pd.DataFrame, check to see if data has too many rows. + if isinstance(self.data, pd.DataFrame): + if len(self.data) > self.max_rows: + raise MaxRowsExceeded( + "Your dataset has too many rows and could take a long " + "time to send to the frontend or to render. To override the " + "default maximum rows (%s), set the max_rows property of " + "your Chart to an integer larger than the number of rows " + "in your dataset. Alternatively you could perform aggregations " + "or other data reductions before using it with Altair" % DEFAULT_MAX_ROWS + ) + + + def _validate_columns(self): + """Validate the columns in the encoding, but only if if the data is a ``DataFrame``. + + This has to be called after the rest of the ``_finalize()`` logic, which fills in the + shortcut field names and also processes the expressions for computed fields. + + This validates: + + 1. That each encoding channel has a field (column name). + 2. That the specified field name is present the column names of the ``DataFrame`` or + computed field from transform expressions. + + This logic only runs when the dataset is a ``DataFrame``. + """ + + # Only validate columns if the data is a pd.DataFrame. + if isinstance(self.data, pd.DataFrame): + # Find columns with visual encodings + encoded_columns = set() + encoding = self.encoding + if encoding is not jst.undefined: + for channel_name in encoding.channel_names: + channel = getattr(encoding, channel_name) + if channel is not jst.undefined: + field = channel.field + if field is jst.undefined: + raise FieldError( + "Missing field/column name for channel: {}".format(channel_name) + ) + else: + if field != '*': + encoded_columns.add(field) + # Find columns in the data + data_columns = set(self.data.columns.values) + transform = self.transform + if transform is not jst.undefined: + calculate = transform.calculate + if calculate is not jst.undefined: + for formula in calculate: + field = formula.field + if field is not jst.undefined: + data_columns.add(field) + # Find columns in the visual encoding that are not in the data + missing_columns = encoded_columns - data_columns + if missing_columns: + raise FieldError( + "Fields/columns not found in the data: {}".format(missing_columns) + ) + class Chart(TopLevelMixin, schema.ExtendedUnitSpec): _data = None @@ -522,11 +608,14 @@ class Chart(TopLevelMixin, schema.ExtendedUnitSpec): transform = jst.JSONInstance(Transform, help=schema.ExtendedUnitSpec.transform.help) mark = schema.Mark(default_value='point', help="""The mark type.""") - max_rows = T.Int( default_value=DEFAULT_MAX_ROWS, help="Maximum number of rows in the dataset to accept." ) + validate_columns = T.Bool( + default_value=True, + help="Raise FieldError if the data is a DataFrame and there are missing columns." + ) def clone(self): """ @@ -550,7 +639,7 @@ class Chart(TopLevelMixin, schema.ExtendedUnitSpec): else: raise TypeError('Expected DataFrame or altair.Data, got: {0}'.format(new)) - _skip_on_export = ['data', '_data', 'max_rows'] + _skip_on_export = ['data', '_data', 'max_rows', 'validate_columns'] def __init__(self, data=None, **kwargs): super(Chart, self).__init__(**kwargs) @@ -624,13 +713,6 @@ class Chart(TopLevelMixin, schema.ExtendedUnitSpec): """Define the encoding for the Chart.""" return update_subtraits(self, 'encoding', *args, **kwargs) - def _finalize(self, **kwargs): - self._finalize_data() - # data comes from wrappers, but self.data overrides this if defined - if self.data is not None: - kwargs['data'] = self.data - super(Chart, self)._finalize(**kwargs) - def __add__(self, other): if isinstance(other, Chart): lc = LayeredChart() @@ -682,6 +764,10 @@ class LayeredChart(TopLevelMixin, schema.LayerSpec): default_value=DEFAULT_MAX_ROWS, help="Maximum number of rows in the dataset to accept." ) + validate_columns = T.Bool( + default_value=True, + help="Raise FieldError if the data is a DataFrame and there are missing columns." + ) def clone(self): """ @@ -705,7 +791,7 @@ class LayeredChart(TopLevelMixin, schema.LayerSpec): else: raise TypeError('Expected DataFrame or altair.Data, got: {0}'.format(new)) - _skip_on_export = ['data', '_data', 'max_rows'] + _skip_on_export = ['data', '_data', 'max_rows', 'validate_columns'] def __init__(self, data=None, **kwargs): super(LayeredChart, self).__init__(**kwargs) @@ -718,13 +804,6 @@ class LayeredChart(TopLevelMixin, schema.LayerSpec): self.layers = list(layers) return self - def _finalize(self, **kwargs): - self._finalize_data() - # data comes from wrappers, but self.data overrides this if defined - if self.data is not None: - kwargs['data'] = self.data - super(LayeredChart, self)._finalize(**kwargs) - def __iadd__(self, layer): if self.layers is jst.undefined: self.layers = [layer] @@ -747,6 +826,10 @@ class FacetedChart(TopLevelMixin, schema.FacetSpec): default_value=DEFAULT_MAX_ROWS, help="Maximum number of rows in the dataset to accept." ) + validate_columns = T.Bool( + default_value=True, + help="Raise FieldError if the data is a DataFrame and there are missing columns." + ) def clone(self): """ @@ -770,7 +853,7 @@ class FacetedChart(TopLevelMixin, schema.FacetSpec): else: raise TypeError('Expected DataFrame or altair.Data, got: {0}'.format(new)) - _skip_on_export = ['data', '_data', 'max_rows'] + _skip_on_export = ['data', '_data', 'max_rows', 'validate_columns'] def __init__(self, data=None, **kwargs): super(FacetedChart, self).__init__(**kwargs) @@ -783,10 +866,3 @@ class FacetedChart(TopLevelMixin, schema.FacetSpec): def set_facet(self, *args, **kwargs): """Define the facet encoding for the Chart.""" return update_subtraits(self, 'facet', *args, **kwargs) - - def _finalize(self, **kwargs): - self._finalize_data() - # data comes from wrappers, but self.data overrides this if defined - if self.data is not None: - kwargs['data'] = self.data - super(FacetedChart, self)._finalize(**kwargs)
Raise exception when a user specifies a field not in the data or expressions. Right now if a user creates a spec that has column name misspelled, the chart renders with nothing and no error messages are shown. This is probably the most common error we see in teaching with Altair.
altair-viz/altair
diff --git a/altair/v1/examples/tests/test_examples.py b/altair/v1/examples/tests/test_examples.py index 745fda94..cd383298 100644 --- a/altair/v1/examples/tests/test_examples.py +++ b/altair/v1/examples/tests/test_examples.py @@ -19,7 +19,7 @@ def test_json_examples_round_trip(example): filename, json_dict = example v = Chart.from_dict(json_dict) - v_dict = v.to_dict() + v_dict = v.to_dict(validate_columns=True) if '$schema' not in json_dict: v_dict.pop('$schema') assert v_dict == json_dict @@ -27,7 +27,7 @@ def test_json_examples_round_trip(example): # code generation discards empty function calls, and so we # filter these out before comparison v2 = eval(v.to_python()) - v2_dict = v2.to_dict() + v2_dict = v2.to_dict(validate_columns=True) if '$schema' not in json_dict: v2_dict.pop('$schema') assert v2_dict == remove_empty_fields(json_dict) diff --git a/altair/v1/tests/test_api.py b/altair/v1/tests/test_api.py index 75dacfce..4d0b2743 100644 --- a/altair/v1/tests/test_api.py +++ b/altair/v1/tests/test_api.py @@ -587,3 +587,40 @@ def test_enable_mime_rendering(): enable_mime_rendering() disable_mime_rendering() disable_mime_rendering() + + +def test_validate_spec(): + + # Make sure we catch channels with no field specified + c = make_chart() + c.encode(Color()) + assert isinstance(c.to_dict(), dict) + assert isinstance(c.to_dict(validate_columns=False), dict) + with pytest.raises(FieldError): + c.to_dict(validate_columns=True) + c.validate_columns = False + assert isinstance(c.to_dict(validate_columns=True), dict) + + # Make sure we catch encoded fields not in the data + c = make_chart() + c.encode(x='x', y='y', color='z') + c.encode(color='z') + assert isinstance(c.to_dict(), dict) + assert isinstance(c.to_dict(validate_columns=False), dict) + with pytest.raises(FieldError): + c.to_dict(validate_columns=True) + c.validate_columns = False + assert isinstance(c.to_dict(validate_columns=True), dict) + + c = make_chart() + c.encode(x='x', y='count(*)') + assert isinstance(c.to_dict(validate_columns=True), dict) + + # Make sure we can resolve computed fields + c = make_chart() + c.encode(x='x', y='y', color='z') + c.encode(color='z') + c.transform_data( + calculate=[Formula('z', 'sin(((2*PI)*datum.x))')] + ) + assert isinstance(c.to_dict(), dict)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 2 }
1.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/altair-viz/altair.git@ea129b3b43bc6768a8a66d09830731ed8197c4b8#egg=altair asttokens==3.0.0 decorator==5.2.1 exceptiongroup==1.2.2 executing==2.2.0 iniconfig==2.1.0 ipython==8.18.1 jedi==0.19.2 matplotlib-inline==0.1.7 numpy==2.0.2 packaging==24.2 pandas==2.2.3 parso==0.8.4 pexpect==4.9.0 pluggy==1.5.0 prompt_toolkit==3.0.50 ptyprocess==0.7.0 pure_eval==0.2.3 Pygments==2.19.1 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 six==1.17.0 stack-data==0.6.3 tomli==2.2.1 traitlets==5.14.3 typing_extensions==4.13.0 tzdata==2025.2 vega==0.4.4 wcwidth==0.2.13
name: altair channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - asttokens==3.0.0 - decorator==5.2.1 - exceptiongroup==1.2.2 - executing==2.2.0 - iniconfig==2.1.0 - ipython==8.18.1 - jedi==0.19.2 - matplotlib-inline==0.1.7 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - parso==0.8.4 - pexpect==4.9.0 - pluggy==1.5.0 - prompt-toolkit==3.0.50 - ptyprocess==0.7.0 - pure-eval==0.2.3 - pygments==2.19.1 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - six==1.17.0 - stack-data==0.6.3 - tomli==2.2.1 - traitlets==5.14.3 - typing-extensions==4.13.0 - tzdata==2025.2 - vega==0.4.4 - wcwidth==0.2.13 prefix: /opt/conda/envs/altair
[ "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example0]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example1]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example2]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example3]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example4]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example5]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example6]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example7]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example8]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example9]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example10]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example11]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example12]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example13]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example14]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example15]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example16]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example17]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example18]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example19]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example20]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example21]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example22]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example23]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example24]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example25]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example26]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example27]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example28]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example29]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example30]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example31]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example32]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example33]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example34]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example35]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example36]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example37]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example38]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example39]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example40]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example41]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example42]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example43]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example44]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example45]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example46]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example47]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example48]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example49]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example50]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example51]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example52]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example53]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example54]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example55]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example56]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example57]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example58]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example59]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example60]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example61]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example62]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example63]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example64]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example65]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example66]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example67]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example68]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example69]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example70]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example71]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example72]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example73]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example74]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example75]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example76]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example77]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example78]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example79]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example80]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example81]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example82]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example83]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example84]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example85]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example86]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example87]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example88]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example89]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example90]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example91]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example92]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example93]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example94]", "altair/v1/examples/tests/test_examples.py::test_json_examples_round_trip[example95]" ]
[ "altair/v1/tests/test_api.py::test_savechart_html", "altair/v1/tests/test_api.py::test_savechart_json", "altair/v1/tests/test_api.py::test_Chart_from_dict", "altair/v1/tests/test_api.py::test_to_python", "altair/v1/tests/test_api.py::test_to_python_stocks", "altair/v1/tests/test_api.py::test_data_finalization", "altair/v1/tests/test_api.py::test_layered_chart_iadd", "altair/v1/tests/test_api.py::test_chart_add", "altair/v1/tests/test_api.py::test_chart_to_json", "altair/v1/tests/test_api.py::test_chart_serve", "altair/v1/tests/test_api.py::test_max_rows", "altair/v1/tests/test_api.py::test_schema_url", "altair/v1/tests/test_api.py::test_validate_spec" ]
[ "altair/v1/examples/tests/test_examples.py::test_load_example", "altair/v1/examples/tests/test_examples.py::test_metadata[D0]", "altair/v1/examples/tests/test_examples.py::test_metadata[D1]", "altair/v1/examples/tests/test_examples.py::test_metadata[D2]", "altair/v1/examples/tests/test_examples.py::test_metadata[D3]", "altair/v1/examples/tests/test_examples.py::test_metadata[D4]", "altair/v1/examples/tests/test_examples.py::test_metadata[D5]", "altair/v1/examples/tests/test_examples.py::test_metadata[D6]", "altair/v1/examples/tests/test_examples.py::test_metadata[D7]", "altair/v1/examples/tests/test_examples.py::test_metadata[D8]", "altair/v1/examples/tests/test_examples.py::test_metadata[D9]", "altair/v1/examples/tests/test_examples.py::test_metadata[D10]", "altair/v1/examples/tests/test_examples.py::test_metadata[D11]", "altair/v1/examples/tests/test_examples.py::test_metadata[D12]", "altair/v1/examples/tests/test_examples.py::test_metadata[D13]", "altair/v1/examples/tests/test_examples.py::test_metadata[D14]", "altair/v1/examples/tests/test_examples.py::test_metadata[D15]", "altair/v1/examples/tests/test_examples.py::test_metadata[D16]", "altair/v1/examples/tests/test_examples.py::test_metadata[D17]", "altair/v1/examples/tests/test_examples.py::test_metadata[D18]", "altair/v1/examples/tests/test_examples.py::test_metadata[D19]", "altair/v1/examples/tests/test_examples.py::test_metadata[D20]", "altair/v1/examples/tests/test_examples.py::test_metadata[D21]", "altair/v1/examples/tests/test_examples.py::test_metadata[D22]", "altair/v1/examples/tests/test_examples.py::test_metadata[D23]", "altair/v1/examples/tests/test_examples.py::test_metadata[D24]", "altair/v1/examples/tests/test_examples.py::test_metadata[D25]", "altair/v1/examples/tests/test_examples.py::test_metadata[D26]", "altair/v1/examples/tests/test_examples.py::test_metadata[D27]", "altair/v1/examples/tests/test_examples.py::test_metadata[D28]", "altair/v1/examples/tests/test_examples.py::test_metadata[D29]", "altair/v1/examples/tests/test_examples.py::test_metadata[D30]", "altair/v1/examples/tests/test_examples.py::test_metadata[D31]", "altair/v1/examples/tests/test_examples.py::test_metadata[D32]", "altair/v1/tests/test_api.py::test_default_mark", "altair/v1/tests/test_api.py::test_mark_methods", "altair/v1/tests/test_api.py::test_chart_url_input", "altair/v1/tests/test_api.py::test_chart_to_html", "altair/v1/tests/test_api.py::test_chart_to_json_round_trip", "altair/v1/tests/test_api.py::test_encode_update", "altair/v1/tests/test_api.py::test_configure_update", "altair/v1/tests/test_api.py::test_configure_axis_update", "altair/v1/tests/test_api.py::test_configure_cell_update", "altair/v1/tests/test_api.py::test_configure_legend_update", "altair/v1/tests/test_api.py::test_configure_mark_update", "altair/v1/tests/test_api.py::test_configure_scale_update", "altair/v1/tests/test_api.py::test_configure_facet_axis_update", "altair/v1/tests/test_api.py::test_configure_facet_cell_update", "altair/v1/tests/test_api.py::test_configure_facet_grid_update", "altair/v1/tests/test_api.py::test_configure_facet_scale_update", "altair/v1/tests/test_api.py::test_transform_update", "altair/v1/tests/test_api.py::test_Chart_load_example", "altair/v1/tests/test_api.py::test_to_python_with_methods", "altair/v1/tests/test_api.py::test_mark_config[area]", "altair/v1/tests/test_api.py::test_mark_config[bar]", "altair/v1/tests/test_api.py::test_mark_config[line]", "altair/v1/tests/test_api.py::test_mark_config[point]", "altair/v1/tests/test_api.py::test_mark_config[text]", "altair/v1/tests/test_api.py::test_mark_config[tick]", "altair/v1/tests/test_api.py::test_mark_config[rule]", "altair/v1/tests/test_api.py::test_mark_config[circle]", "altair/v1/tests/test_api.py::test_mark_config[square]", "altair/v1/tests/test_api.py::test_mark_config[errorBar]", "altair/v1/tests/test_api.py::test_config_methods[params0]", "altair/v1/tests/test_api.py::test_config_methods[params1]", "altair/v1/tests/test_api.py::test_config_methods[params2]", "altair/v1/tests/test_api.py::test_config_methods[params3]", "altair/v1/tests/test_api.py::test_config_facet_grid", "altair/v1/tests/test_api.py::test_finalize", "altair/v1/tests/test_api.py::test_formula_expression", "altair/v1/tests/test_api.py::test_filter_expression", "altair/v1/tests/test_api.py::test_df_formula", "altair/v1/tests/test_api.py::test_df_filter", "altair/v1/tests/test_api.py::test_df_filter_multiple", "altair/v1/tests/test_api.py::test_chart_dir", "altair/v1/tests/test_api.py::test_empty_traits", "altair/v1/tests/test_api.py::test_enable_mime_rendering" ]
[]
BSD 3-Clause "New" or "Revised" License
1,722
[ "altair/v1/__init__.py", "altair/v1/api.py" ]
[ "altair/v1/__init__.py", "altair/v1/api.py" ]
wright-group__WrightTools-318
2be1155987558d6c7358654e9f26068f5359df96
2017-10-03 00:06:45
592649ce55c9fa7847325c9e9b15b320a38f1389
pep8speaks: Hello @ksunden! Thanks for submitting the PR. - In the file [`WrightTools/data/_data.py`](https://github.com/wright-group/WrightTools/blob/6ebf5e1f7c3278d932b53a62d9ea7785dc2aafc7/WrightTools/data/_data.py), following are the PEP8 issues : > [Line 1506:91](https://github.com/wright-group/WrightTools/blob/6ebf5e1f7c3278d932b53a62d9ea7785dc2aafc7/WrightTools/data/_data.py#L1506): [W291](https://duckduckgo.com/?q=pep8%20W291) trailing whitespace > [Line 1507:25](https://github.com/wright-group/WrightTools/blob/6ebf5e1f7c3278d932b53a62d9ea7785dc2aafc7/WrightTools/data/_data.py#L1507): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent - In the file [`WrightTools/exceptions.py`](https://github.com/wright-group/WrightTools/blob/6ebf5e1f7c3278d932b53a62d9ea7785dc2aafc7/WrightTools/exceptions.py), following are the PEP8 issues : > [Line 29:1](https://github.com/wright-group/WrightTools/blob/6ebf5e1f7c3278d932b53a62d9ea7785dc2aafc7/WrightTools/exceptions.py#L29): [E302](https://duckduckgo.com/?q=pep8%20E302) expected 2 blank lines, found 1
diff --git a/WrightTools/data/_data.py b/WrightTools/data/_data.py index 8915e48..c023b51 100644 --- a/WrightTools/data/_data.py +++ b/WrightTools/data/_data.py @@ -1203,12 +1203,10 @@ class Data: Notes ----- - m-factors originally derived by Carlson and Wright. [1]_ References ---------- - .. [1] **Absorption and Coherent Interference Effects in Multiply Resonant Four-Wave Mixing Spectroscopy** Roger J. Carlson, and John C. Wright @@ -1520,6 +1518,30 @@ class Data: # finish self._update() + def rename_attrs(self, **kwargs): + """Rename a set of attributes. + + Keyword Arguments + ----------------- + Each argument should have the key of a current axis or channel, + and a value which is a string of its new name. + + The name will be set to str(val), and its natural naming identifier + will be wt.kit.string2identifier(str(val)) + """ + changed = kwargs.keys() + for k, v in kwargs.items(): + if getattr(self, k).__class__ not in (Channel, Axis): + raise TypeError("Attribute for key %s: expected {Channel, Axis}, got %s" % + (k, getattr(self, k).__class__)) + if v not in changed and hasattr(self, v): + raise wt_exceptions.NameNotUniqueError(v) + for k, v in kwargs.items(): + axis = getattr(self, k) + axis.name = str(v) + delattr(self, k) + self._update() + def save(self, filepath=None, verbose=True): """Save using the `pickle`__ module. diff --git a/WrightTools/exceptions.py b/WrightTools/exceptions.py index 3c25ce4..8f3fa78 100644 --- a/WrightTools/exceptions.py +++ b/WrightTools/exceptions.py @@ -44,6 +44,21 @@ class FileNotFound(Exception): Exception.__init__(self, message) +class NameNotUniqueError(Exception): + """NameNotUniqueError.""" + + def __init__(self, name): + """Format a Name Not Unique Error. + + Parameters + ---------- + name : string + Name of an attribute which causes a duplication. + """ + message = 'Name {} results in a duplicate'.format(name) + Exception.__init__(self, message) + + # --- custom warnings -----------------------------------------------------------------------------
rename methods `data.rename_channel` and `data.rename_axis` accept kwargs with key oldname and value newname if newname already in names and not ALSO being renamed, raise helpful exception remember to call `data._update` not necessarily clear how to handle `axis.label_seed`
wright-group/WrightTools
diff --git a/tests/data/rename_attrs.py b/tests/data/rename_attrs.py new file mode 100644 index 0000000..f032f7d --- /dev/null +++ b/tests/data/rename_attrs.py @@ -0,0 +1,30 @@ +"""test rename_attrs.""" + + +# --- import -------------------------------------------------------------------------------------- + + +import WrightTools as wt +from WrightTools import datasets + + +# --- test ---------------------------------------------------------------------------------------- + + +def test_rename(): + p = datasets.PyCMDS.wm_w2_w1_000 + data = wt.data.from_PyCMDS(p) + data.rename_attrs(w1='w2', w2='w1') + assert data.shape == (35, 11, 11) + assert data.axis_names == ['wm', 'w1', 'w2'] + + +def test_error(): + p = datasets.PyCMDS.wm_w2_w1_000 + data = wt.data.from_PyCMDS(p) + try: + data.rename_attrs(w1='w2') + except wt.exceptions.NameNotUniqueError: + assert True + else: + assert False
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 2 }
2.13
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
appdirs==1.4.4 attrs==22.2.0 cached-property==1.5.2 certifi==2021.5.30 cycler==0.11.0 h5py==3.1.0 imageio==2.15.0 importlib-metadata==4.8.3 iniconfig==1.1.1 kiwisolver==1.3.1 matplotlib==3.3.4 numpy==1.19.5 packaging==21.3 Pillow==8.4.0 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 python-dateutil==2.9.0.post0 pytz==2025.2 scipy==1.5.4 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 -e git+https://github.com/wright-group/WrightTools.git@2be1155987558d6c7358654e9f26068f5359df96#egg=WrightTools zipp==3.6.0
name: WrightTools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - appdirs==1.4.4 - attrs==22.2.0 - cached-property==1.5.2 - cycler==0.11.0 - h5py==3.1.0 - imageio==2.15.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - kiwisolver==1.3.1 - matplotlib==3.3.4 - numpy==1.19.5 - packaging==21.3 - pillow==8.4.0 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - scipy==1.5.4 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/WrightTools
[ "tests/data/rename_attrs.py::test_rename", "tests/data/rename_attrs.py::test_error" ]
[]
[]
[]
MIT License
1,723
[ "WrightTools/exceptions.py", "WrightTools/data/_data.py" ]
[ "WrightTools/exceptions.py", "WrightTools/data/_data.py" ]
praekeltfoundation__marathon-acme-112
6f795e56fb80b6ce2e93ae3e33ec578eb13f6202
2017-10-03 12:22:02
6f795e56fb80b6ce2e93ae3e33ec578eb13f6202
diff --git a/.travis.yml b/.travis.yml index 2046992..c0f46b6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,7 @@ branches: only: - develop - master - - /\d+\.\d+(\.\d+)?/ + - /^\d+\.\d+(\.\d+)?$/ language: python matrix: diff --git a/marathon_acme/marathon_util.py b/marathon_acme/marathon_util.py index 771939d..36b257c 100644 --- a/marathon_acme/marathon_util.py +++ b/marathon_acme/marathon_util.py @@ -2,46 +2,101 @@ def get_number_of_app_ports(app): """ Get the number of ports for the given app JSON. This roughly follows the logic in marathon-lb for finding app IPs/ports, although we are only - interested in the quantity of ports an app has: - https://github.com/mesosphere/marathon-lb/blob/v1.7.0/utils.py#L278-L350 + interested in the quantity of ports an app should have and don't consider + the specific IPs/ports of individual tasks: + https://github.com/mesosphere/marathon-lb/blob/v1.10.3/utils.py#L393-L415 :param app: The app JSON from the Marathon API. :return: The number of ports for the app. """ - if _is_ip_per_task(app): - if _is_user_network(app): - return len(app['container']['docker']['portMappings']) - else: - return len(app['ipAddress']['discovery']['ports']) + mode = _get_networking_mode(app) + ports_list = None + if mode == 'host': + ports_list = _get_port_definitions(app) + elif mode == 'container/bridge': + ports_list = _get_port_definitions(app) + if ports_list is None: + ports_list = _get_container_port_mappings(app) + elif mode == 'container': + ports_list = _get_ip_address_discovery_ports(app) + # Marathon 1.5+: the ipAddress field is missing -> ports_list is None + # Marathon <1.5: the ipAddress field can be present, but ports_list can + # still be empty while the container port mapping is not :-/ + if not ports_list: + ports_list = _get_container_port_mappings(app) else: - # Prefer the 'portDefinitions' field added in Marathon 1.0.0 but fall - # back to the deprecated 'ports' array if that's not present. - if 'portDefinitions' in app: - return len(app['portDefinitions']) - else: - return len(app['ports']) + raise RuntimeError( + "Unknown Marathon networking mode '{}'".format(mode)) + return len(ports_list) -def _is_ip_per_task(app): + +def _get_networking_mode(app): """ - Return whether the application is using IP-per-task. - :param app: The application to check. - :return: True if using IP per task, False otherwise. + Get the Marathon networking mode for the app. """ - return app.get('ipAddress') is not None + # Marathon 1.5+: there is a `networks` field + networks = app.get('networks') + if networks: + # Modes cannot be mixed, so assigning the last mode is fine + return networks[-1].get('mode', 'container') + + # Older Marathon: determine equivalent network mode + container = app.get('container') + if container is not None and 'docker' in container: + docker_network = container['docker'].get('network') + if docker_network == 'USER': + return 'container' + elif docker_network == 'BRIDGE': + return 'container/bridge' + + return 'container' if _is_legacy_ip_per_task(app) else 'host' -def _is_user_network(app): +def _get_container_port_mappings(app): """ - Returns True if container network mode is set to USER - :param app: The application to check. - :return: True if using USER network, False otherwise. + Get the ``portMappings`` field for the app container. """ - container = app.get('container') - if container is None: - return False + container = app['container'] + + # Marathon 1.5+: container.portMappings field + port_mappings = container.get('portMappings') + + # Older Marathon: container.docker.portMappings field + if port_mappings is None and 'docker' in container: + port_mappings = container['docker'].get('portMappings') - if container['type'] != 'DOCKER': - return False + return port_mappings - return container['docker']['network'] == 'USER' + +def _get_port_definitions(app): + """ + Get the ``portDefinitions`` field for the app if present. + """ + if 'portDefinitions' in app: + return app['portDefinitions'] + + # In the worst case try use the old `ports` array + # Only useful on very old Marathons + if 'ports' in app: + return app['ports'] + + return None + + +def _get_ip_address_discovery_ports(app): + """ + Get the ports from the ``ipAddress`` field for the app if present. + """ + if not _is_legacy_ip_per_task(app): + return None + return app['ipAddress']['discovery']['ports'] + + +def _is_legacy_ip_per_task(app): + """ + Return whether the application is using IP-per-task on Marathon < 1.5. + :param app: The application to check. + :return: True if using IP per task, False otherwise. + """ + return app.get('ipAddress') is not None
Support Marathon 1.5 networking API Marathon 1.5 isn't out yet but changes things up a lot: https://github.com/mesosphere/marathon/blob/master/docs/docs/networking.md
praekeltfoundation/marathon-acme
diff --git a/marathon_acme/tests/test_marathon_util.py b/marathon_acme/tests/test_marathon_util.py index e793424..9eb7c2f 100644 --- a/marathon_acme/tests/test_marathon_util.py +++ b/marathon_acme/tests/test_marathon_util.py @@ -1,4 +1,5 @@ import pytest +from testtools import ExpectedException from testtools.assertions import assert_that from testtools.matchers import Equals @@ -53,6 +54,19 @@ TEST_APP = { 'deployments': [], } +CONTAINER_HOST_NETWORKING = { + 'type': 'DOCKER', + 'volumes': [], + 'docker': { + 'image': 'praekeltfoundation/marathon-lb:1.6.0', + 'network': 'HOST', + 'portMappings': [], + 'privileged': True, + 'parameters': [], + 'forcePullImage': False + } +} + CONTAINER_USER_NETWORKING = { 'type': 'DOCKER', 'volumes': [], @@ -115,6 +129,77 @@ CONTAINER_MESOS = { ], } +# https://github.com/mesosphere/marathon/blob/v1.5.1/docs/docs/networking.md#host-mode +NETWORKS_CONTAINER_HOST_MARATHON15 = [{'mode': 'host'}] +CONTAINER_MESOS_HOST_NETWORKING_MARATHON15 = { + 'type': 'MESOS', + 'docker': { + 'image': 'my-image:1.0' + }, +} + +# https://github.com/mesosphere/marathon/blob/v1.5.1/docs/docs/networking.md#specifying-ports-1 +NETWORKS_CONTAINER_BRIDGE_MARATHON15 = [{'mode': 'container/bridge'}] +CONTAINER_BRIDGE_NETWORKING_MARATHON15 = { + 'type': 'DOCKER', + 'docker': { + 'forcePullImage': True, + 'image': 'praekeltfoundation/mc2:release-3.11.2', + 'parameters': [ + { + 'key': 'add-host', + 'value': 'servicehost:172.17.0.1' + } + ], + 'privileged': False + }, + 'volumes': [], + 'portMappings': [ + { + 'containerPort': 80, + 'hostPort': 0, + 'labels': {}, + 'protocol': 'tcp', + 'servicePort': 10005 + } + ] +} +CONTAINER_MESOS_BRIDGE_NETWORKING_MARATHON15 = { + 'type': 'MESOS', + 'docker': { + 'image': 'my-image:1.0' + }, + 'portMappings': [ + {'containerPort': 80, 'hostPort': 0, 'name': 'http'}, + {'containerPort': 443, 'hostPort': 0, 'name': 'https'}, + {'containerPort': 4000, 'hostPort': 0, 'name': 'mon'} + ] +} + +# https://github.com/mesosphere/marathon/blob/v1.5.1/docs/docs/networking.md#enabling-container-mode +NETWORKS_CONTAINER_USER_MARATHON15 = [{'mode': 'container', 'name': 'dcos'}] +CONTAINER_USER_NETWORKING_MARATHON15 = { + 'type': 'DOCKER', + 'docker': { + 'forcePullImage': False, + 'image': 'python:3-alpine', + 'parameters': [], + 'privileged': False + }, + 'volumes': [], + 'portMappings': [ + { + 'containerPort': 8080, + 'labels': { + 'VIP_0': '/foovu1:8080' + }, + 'name': 'foovu1http', + 'protocol': 'tcp', + 'servicePort': 10004 + } + ], +} + IP_ADDRESS_NO_PORTS = { 'groups': [], 'labels': {}, @@ -151,6 +236,17 @@ def test_app(): class TestGetNumberOfAppPortsFunc(object): + def test_host_networking(self, test_app): + """ + When the app uses Docker containers with HOST networking, the ports + should be counted from the 'portDefinitions' field. + """ + test_app['container'] = CONTAINER_HOST_NETWORKING + test_app['portDefinitions'] = PORT_DEFINITIONS_ONE_PORT + + num_ports = get_number_of_app_ports(test_app) + assert_that(num_ports, Equals(1)) + def test_user_networking(self, test_app): """ When the app uses a Docker container with USER networking, it will have @@ -207,3 +303,64 @@ class TestGetNumberOfAppPortsFunc(object): num_ports = get_number_of_app_ports(test_app) assert_that(num_ports, Equals(2)) + + def test_host_networking_mesos_marathon15(self, test_app): + """ + For Marathon 1.5+, when the app uses Mesos containers with host + networking, the ports should be counted from the 'portDefinitions' + field. + """ + test_app['container'] = CONTAINER_MESOS_HOST_NETWORKING_MARATHON15 + test_app['networks'] = NETWORKS_CONTAINER_HOST_MARATHON15 + test_app['portDefinitions'] = PORT_DEFINITIONS_ONE_PORT + + num_ports = get_number_of_app_ports(test_app) + assert_that(num_ports, Equals(1)) + + def test_bridge_networking_marathon15(self, test_app): + """ + For Marathon 1.5+, when the app uses Docker containers with + 'container/bridge' networking, the ports should be counted from the + ``container.portMappings`` field. + """ + test_app['container'] = CONTAINER_BRIDGE_NETWORKING_MARATHON15 + test_app['networks'] = NETWORKS_CONTAINER_BRIDGE_MARATHON15 + + num_ports = get_number_of_app_ports(test_app) + assert_that(num_ports, Equals(1)) + + def test_bridge_networking_mesos_marathon15(self, test_app): + """ + For Marathon 1.5+, when the app uses Mesos containers with + 'container/bridge' networking, the ports should be counted from the + ``container.portMappings`` field. + """ + test_app['container'] = CONTAINER_MESOS_BRIDGE_NETWORKING_MARATHON15 + test_app['networks'] = NETWORKS_CONTAINER_BRIDGE_MARATHON15 + + num_ports = get_number_of_app_ports(test_app) + assert_that(num_ports, Equals(3)) + + def test_user_networking_marathon15(self, test_app): + """ + For Marathon 1.5+, when the app uses Docker containers with 'container' + networking, the ports should be counted from the + ``container.portMappings`` field. + """ + test_app['container'] = CONTAINER_USER_NETWORKING_MARATHON15 + test_app['networks'] = NETWORKS_CONTAINER_USER_MARATHON15 + + num_ports = get_number_of_app_ports(test_app) + assert_that(num_ports, Equals(1)) + + def test_unknown_networking_mode(self, test_app): + """ + When an app is defined with an unknown networking mode, an error is + raised. + """ + test_app['networks'] = [{'mode': 'container/iptables'}] + + with ExpectedException( + RuntimeError, + r"Unknown Marathon networking mode 'container/iptables'"): + get_number_of_app_ports(test_app)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 2 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test,pep8test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "dev-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
acme==0.40.1 aiocontextvars==0.2.2 attrs==22.2.0 Automat==22.10.0 boltons==23.1.1 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 constantly==15.1.0 contextvars==2.4 coverage==6.2 cryptography==40.0.2 dataclasses==0.8 distlib==0.3.9 eliot==1.14.0 filelock==3.4.1 fixtures==4.0.1 flake8==5.0.4 flake8-import-order==0.18.2 hyperlink==21.0.0 idna==3.10 immutables==0.19 importlib-metadata==4.2.0 importlib-resources==5.4.0 incremental==22.10.0 iniconfig==1.1.1 josepy==1.13.0 klein==21.8.0 -e git+https://github.com/praekeltfoundation/marathon-acme.git@6f795e56fb80b6ce2e93ae3e33ec578eb13f6202#egg=marathon_acme mccabe==0.7.0 mock==5.2.0 packaging==21.3 pbr==6.1.1 pem==21.2.0 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pyasn1==0.5.1 pyasn1-modules==0.3.0 pycodestyle==2.9.1 pycparser==2.21 pyflakes==2.5.0 pyOpenSSL==23.2.0 pyparsing==3.1.4 pyRFC3339==2.0.1 pyrsistent==0.18.0 pytest==7.0.1 pytz==2025.2 requests==2.27.1 requests-toolbelt==1.0.0 service-identity==21.1.0 six==1.17.0 testtools==2.6.0 toml==0.10.2 tomli==1.2.3 tox==3.28.0 treq==22.2.0 Tubes==0.2.1 Twisted==22.4.0 txacme==0.9.3 txfake==0.1.1 TxSNI==0.2.0 typing_extensions==4.1.1 uritools==3.0.2 urllib3==1.26.20 virtualenv==20.17.1 Werkzeug==2.0.3 zipp==3.6.0 zope.interface==5.5.2
name: marathon-acme channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - acme==0.40.1 - aiocontextvars==0.2.2 - attrs==22.2.0 - automat==22.10.0 - boltons==23.1.1 - cffi==1.15.1 - charset-normalizer==2.0.12 - constantly==15.1.0 - contextvars==2.4 - coverage==6.2 - cryptography==40.0.2 - dataclasses==0.8 - distlib==0.3.9 - eliot==1.14.0 - filelock==3.4.1 - fixtures==4.0.1 - flake8==5.0.4 - flake8-import-order==0.18.2 - hyperlink==21.0.0 - idna==3.10 - immutables==0.19 - importlib-metadata==4.2.0 - importlib-resources==5.4.0 - incremental==22.10.0 - iniconfig==1.1.1 - josepy==1.13.0 - klein==21.8.0 - mccabe==0.7.0 - mock==5.2.0 - packaging==21.3 - pbr==6.1.1 - pem==21.2.0 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pyasn1==0.5.1 - pyasn1-modules==0.3.0 - pycodestyle==2.9.1 - pycparser==2.21 - pyflakes==2.5.0 - pyopenssl==23.2.0 - pyparsing==3.1.4 - pyrfc3339==2.0.1 - pyrsistent==0.18.0 - pytest==7.0.1 - pytz==2025.2 - requests==2.27.1 - requests-toolbelt==1.0.0 - service-identity==21.1.0 - six==1.17.0 - testtools==2.6.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - treq==22.2.0 - tubes==0.2.1 - twisted==22.4.0 - txacme==0.9.3 - txfake==0.1.1 - txsni==0.2.0 - typing-extensions==4.1.1 - uritools==3.0.2 - urllib3==1.26.20 - virtualenv==20.17.1 - werkzeug==2.0.3 - zipp==3.6.0 - zope-interface==5.5.2 prefix: /opt/conda/envs/marathon-acme
[ "marathon_acme/tests/test_marathon_util.py::TestGetNumberOfAppPortsFunc::test_bridge_networking_marathon15", "marathon_acme/tests/test_marathon_util.py::TestGetNumberOfAppPortsFunc::test_bridge_networking_mesos_marathon15", "marathon_acme/tests/test_marathon_util.py::TestGetNumberOfAppPortsFunc::test_user_networking_marathon15", "marathon_acme/tests/test_marathon_util.py::TestGetNumberOfAppPortsFunc::test_unknown_networking_mode" ]
[]
[ "marathon_acme/tests/test_marathon_util.py::TestGetNumberOfAppPortsFunc::test_host_networking", "marathon_acme/tests/test_marathon_util.py::TestGetNumberOfAppPortsFunc::test_user_networking", "marathon_acme/tests/test_marathon_util.py::TestGetNumberOfAppPortsFunc::test_ip_per_task_no_container", "marathon_acme/tests/test_marathon_util.py::TestGetNumberOfAppPortsFunc::test_ip_per_task_mesos_containerizer", "marathon_acme/tests/test_marathon_util.py::TestGetNumberOfAppPortsFunc::test_bridge_networking", "marathon_acme/tests/test_marathon_util.py::TestGetNumberOfAppPortsFunc::test_bridge_networking_no_port_definitions", "marathon_acme/tests/test_marathon_util.py::TestGetNumberOfAppPortsFunc::test_host_networking_mesos_marathon15" ]
[]
MIT License
1,724
[ ".travis.yml", "marathon_acme/marathon_util.py" ]
[ ".travis.yml", "marathon_acme/marathon_util.py" ]
jjhelmus__pyfive-35
bdddda56cfcc51548f8a553bac96adc9919594ad
2017-10-03 12:26:13
d21662e6c95ed08a0b909f9aef4ee3f2428c0036
diff --git a/pyfive/high_level.py b/pyfive/high_level.py index 11c90ff..973f49d 100644 --- a/pyfive/high_level.py +++ b/pyfive/high_level.py @@ -3,6 +3,7 @@ from collections import Mapping, deque, Sequence import os from io import open # Python 2.7 requires for a Buffered Reader +import posixpath import numpy as np @@ -49,33 +50,45 @@ class Group(Mapping): """ Number of links in the group. """ return len(self._links) + def _dereference(self, ref): + """ Deference a Reference object. """ + if not ref: + raise ValueError('cannot deference null reference') + obj = self.file._get_object_by_address(ref.address_of_reference) + if obj is None: + dataobjects = DataObjects(self.file._fh, ref.address_of_reference) + if dataobjects.is_dataset: + return Dataset(None, dataobjects, None, alt_file=self.file) + return Group(None, dataobjects, None, alt_file=self.file) + return obj + def __getitem__(self, y): """ x.__getitem__(y) <==> x[y] """ if isinstance(y, Reference): - if not y: - raise ValueError('cannot deference null reference') - obj = self.file._get_object_by_address(y.address_of_reference) - if obj is None: - dataobjs = DataObjects(self.file._fh, y.address_of_reference) - if dataobjs.is_dataset: - return Dataset(None, dataobjs, None, alt_file=self.file) - return Group(None, dataobjs, None, alt_file=self.file) - return obj - - y = y.strip('/') - - if y not in self._links: - raise KeyError('%s not found in group' % (y)) - - if self.name == '/': - sep = '' + return self._dereference(y) + + path = posixpath.normpath(y) + if path == '.': + return self + if path.startswith('/'): + return self.file[path[1:]] + + if posixpath.dirname(path) != '': + next_obj, additional_obj = path.split('/', 1) else: - sep = '/' + next_obj = path + additional_obj = '.' + + if next_obj not in self._links: + raise KeyError('%s not found in group' % (next_obj)) - dataobjs = DataObjects(self.file._fh, self._links[y]) + obj_name = posixpath.join(self.name, next_obj) + dataobjs = DataObjects(self.file._fh, self._links[next_obj]) if dataobjs.is_dataset: - return Dataset(self.name + sep + y, dataobjs, self) - return Group(self.name + sep + y, dataobjs, self) + if additional_obj != '.': + raise KeyError('%s is a dataset, not a group' % (obj_name)) + return Dataset(obj_name, dataobjs, self) + return Group(obj_name, dataobjs, self)[additional_obj] def __iter__(self): for k in self._links.keys():
Access values with path One thing that is not yet possible and that would help a lot. For example, with version 0.2.0: Using Python 2.7.12 (default, Jul 1 2016, 15:12:24) [GCC 5.4.0 20160609] on linux2 > > > import h5py > > > import pyfive > > > > > > f5 = h5py.File('tests/latest.hdf5') > > > f5["group1/subgroup1/dataset3"].value > > > array([ 0., 1., 2., 3.], dtype=float32) > > > ffive = pyfive.File('tests/latest.hdf5') > > > ffive["group1/subgroup1/dataset3"].value > > > Traceback (most recent call last): > > > File "<stdin>", line 1, in <module> > > > File "pyfive/high_level.py", line 48, in **getitem** > > > raise KeyError('%s not found in group' % (y)) > > > KeyError: 'group1/subgroup1/dataset3 not found in group'
jjhelmus/pyfive
diff --git a/tests/test_high_level.py b/tests/test_high_level.py index 593ad24..9e14af3 100644 --- a/tests/test_high_level.py +++ b/tests/test_high_level.py @@ -138,3 +138,20 @@ def test_dataset_class(): assert dset1.parent.name == '/' assert dset2.parent.name == '/group1' + + +def test_get_objects_by_path(): + # gh-15 + + with pyfive.File(EARLIEST_HDF5_FILE) as hfile: + grp = hfile['/group1'] + + assert hfile['/group1/subgroup1'].name == '/group1/subgroup1' + assert grp['/group1/subgroup1'].name == '/group1/subgroup1' + + dset2 = hfile['group1/dataset2/'] + assert dset2.name == '/group1/dataset2' + + assert_raises(KeyError, hfile.__getitem__, 'group1/fake') + assert_raises(KeyError, hfile.__getitem__, 'group1/subgroup1/fake') + assert_raises(KeyError, hfile.__getitem__, 'group1/dataset2/fake')
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "nose", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work nose==1.3.7 numpy==1.19.5 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work -e git+https://github.com/jjhelmus/pyfive.git@bdddda56cfcc51548f8a553bac96adc9919594ad#egg=pyfive pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: pyfive channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - nose==1.3.7 - numpy==1.19.5 prefix: /opt/conda/envs/pyfive
[ "tests/test_high_level.py::test_get_objects_by_path" ]
[]
[ "tests/test_high_level.py::test_file_class", "tests/test_high_level.py::test_group_class", "tests/test_high_level.py::test_dataset_class" ]
[]
BSD 3-Clause "New" or "Revised" License
1,725
[ "pyfive/high_level.py" ]
[ "pyfive/high_level.py" ]
OpenMined__PySyft-285
28ee3b4ade61bbbf0cce1c745a0b9fac310841e9
2017-10-03 13:06:22
06ce023225dd613d8fb14ab2046135b93ab22376
nish21: @bharathgs addressed the review, thank you. nish21: @bharathgs please take a look now
diff --git a/syft/tensor.py b/syft/tensor.py index b2142167d4..6d040cf1ed 100644 --- a/syft/tensor.py +++ b/syft/tensor.py @@ -862,6 +862,25 @@ class TensorBase(object): out = np.random.uniform(low=low, high=high, size=self.shape()) return TensorBase(out) + def geometric_(self, p): + """Fills the given tensor in-place with samples from a geometric distribution + with given probability of success of an individual trial. + + Parameters + ---------- + p: float + Probability of success of an individual trial + + Returns + ------- + TensorBase + Caller with values in-place + """ + if self.encrypted: + return NotImplemented + self.data = np.random.geometric(p, size=self.shape()) + return self + def cauchy_(self, median=0, sigma=1): """Fills the tensor in-place with numbers drawn from the Cauchy distribution:
Implement Default geometric Functionality for Base Tensor Type **User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, geometric_() should perform the operation inline. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation. **Acceptance Criteria:** - If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error. - a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors. - inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator.
OpenMined/PySyft
diff --git a/tests/test_tensor.py b/tests/test_tensor.py index 4c5598db5b..d2b2240133 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -716,6 +716,14 @@ class uniformTests(unittest.TestCase): self.assertTrue(np.all(t1.data > 0) and np.all(t1.data < 3)) +class geometricTests(unittest.TestCase): + def test_geometric_(self): + t1 = TensorBase(np.zeros((4, 4))) + out = t1.geometric_(p=0.5) + self.assertTupleEqual(t1.data.shape, out.data.shape) + self.assertTrue(np.all(out.data > 0)) + + class fillTests(unittest.TestCase): def test_fill_(self): t1 = TensorBase(np.array([1, 2, 3, 4]))
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
PySyft/hydrogen
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
args==0.1.0 attrs==22.2.0 certifi==2021.5.30 clint==0.5.1 flake8==5.0.4 importlib-metadata==4.2.0 iniconfig==1.1.1 joblib==1.1.1 line-profiler==4.1.3 mccabe==0.7.0 numpy==1.19.5 packaging==21.3 phe==1.5.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 pyparsing==3.1.4 pyRserve==1.0.4 pytest==7.0.1 pytest-flake8==1.1.1 scikit-learn==0.24.2 scipy==1.5.4 sklearn==0.0 -e git+https://github.com/OpenMined/PySyft.git@28ee3b4ade61bbbf0cce1c745a0b9fac310841e9#egg=syft threadpoolctl==3.1.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: PySyft channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - args==0.1.0 - attrs==22.2.0 - clint==0.5.1 - flake8==5.0.4 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - joblib==1.1.1 - line-profiler==4.1.3 - mccabe==0.7.0 - numpy==1.19.5 - packaging==21.3 - phe==1.5.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pyparsing==3.1.4 - pyrserve==1.0.4 - pytest==7.0.1 - pytest-flake8==1.1.1 - scikit-learn==0.24.2 - scipy==1.5.4 - sklearn==0.0 - threadpoolctl==3.1.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/PySyft
[ "tests/test_tensor.py::geometricTests::test_geometric_" ]
[]
[ "tests/test_tensor.py::DimTests::test_as_view", "tests/test_tensor.py::DimTests::test_dim_one", "tests/test_tensor.py::DimTests::test_nelement", "tests/test_tensor.py::DimTests::test_resize", "tests/test_tensor.py::DimTests::test_resize_as", "tests/test_tensor.py::DimTests::test_size", "tests/test_tensor.py::DimTests::test_view", "tests/test_tensor.py::AddTests::test_inplace", "tests/test_tensor.py::AddTests::test_scalar", "tests/test_tensor.py::AddTests::test_simple", "tests/test_tensor.py::CeilTests::test_ceil", "tests/test_tensor.py::CeilTests::test_ceil_", "tests/test_tensor.py::ZeroTests::test_zero", "tests/test_tensor.py::FloorTests::test_floor_", "tests/test_tensor.py::SubTests::test_inplace", "tests/test_tensor.py::SubTests::test_scalar", "tests/test_tensor.py::SubTests::test_simple", "tests/test_tensor.py::MaxTests::test_axis", "tests/test_tensor.py::MaxTests::test_no_dim", "tests/test_tensor.py::MultTests::test_inplace", "tests/test_tensor.py::MultTests::test_scalar", "tests/test_tensor.py::MultTests::test_simple", "tests/test_tensor.py::DivTests::test_inplace", "tests/test_tensor.py::DivTests::test_scalar", "tests/test_tensor.py::DivTests::test_simple", "tests/test_tensor.py::AbsTests::test_abs", "tests/test_tensor.py::AbsTests::test_abs_", "tests/test_tensor.py::ShapeTests::test_shape", "tests/test_tensor.py::SqrtTests::test_sqrt", "tests/test_tensor.py::SqrtTests::test_sqrt_", "tests/test_tensor.py::SumTests::test_dim_is_not_none_int", "tests/test_tensor.py::SumTests::test_dim_none_int", "tests/test_tensor.py::EqualTests::test_equal", "tests/test_tensor.py::EqualTests::test_equal_operation", "tests/test_tensor.py::EqualTests::test_inequality_operation", "tests/test_tensor.py::EqualTests::test_not_equal", "tests/test_tensor.py::IndexTests::test_indexing", "tests/test_tensor.py::sigmoidTests::test_sigmoid", "tests/test_tensor.py::addmm::test_addmm_1d", "tests/test_tensor.py::addmm::test_addmm_2d", "tests/test_tensor.py::addmm::test_addmm__1d", "tests/test_tensor.py::addmm::test_addmm__2d", "tests/test_tensor.py::addcmulTests::test_addcmul_1d", "tests/test_tensor.py::addcmulTests::test_addcmul_2d", "tests/test_tensor.py::addcmulTests::test_addcmul__1d", "tests/test_tensor.py::addcmulTests::test_addcmul__2d", "tests/test_tensor.py::addcdivTests::test_addcdiv_1d", "tests/test_tensor.py::addcdivTests::test_addcdiv_2d", "tests/test_tensor.py::addcdivTests::test_addcdiv__1d", "tests/test_tensor.py::addcdivTests::test_addcdiv__2d", "tests/test_tensor.py::addmvTests::test_addmv", "tests/test_tensor.py::addmvTests::test_addmv_", "tests/test_tensor.py::bmmTests::test_bmm", "tests/test_tensor.py::bmmTests::test_bmm_size", "tests/test_tensor.py::addbmmTests::test_addbmm", "tests/test_tensor.py::addbmmTests::test_addbmm_", "tests/test_tensor.py::baddbmmTests::test_baddbmm", "tests/test_tensor.py::baddbmmTests::test_baddbmm_", "tests/test_tensor.py::transposeTests::test_t", "tests/test_tensor.py::transposeTests::test_transpose", "tests/test_tensor.py::transposeTests::test_transpose_", "tests/test_tensor.py::unsqueezeTests::test_unsqueeze", "tests/test_tensor.py::unsqueezeTests::test_unsqueeze_", "tests/test_tensor.py::expTests::test_exp", "tests/test_tensor.py::expTests::test_exp_", "tests/test_tensor.py::fracTests::test_frac", "tests/test_tensor.py::fracTests::test_frac_", "tests/test_tensor.py::rsqrtTests::test_rsqrt", "tests/test_tensor.py::rsqrtTests::test_rsqrt_", "tests/test_tensor.py::signTests::test_sign", "tests/test_tensor.py::signTests::test_sign_", "tests/test_tensor.py::numpyTests::test_numpy", "tests/test_tensor.py::reciprocalTests::test_reciprocal", "tests/test_tensor.py::reciprocalTests::test_reciprocal_", "tests/test_tensor.py::logTests::test_log", "tests/test_tensor.py::logTests::test_log_", "tests/test_tensor.py::logTests::test_log_1p", "tests/test_tensor.py::logTests::test_log_1p_", "tests/test_tensor.py::clampTests::test_clamp_float", "tests/test_tensor.py::clampTests::test_clamp_float_in_place", "tests/test_tensor.py::clampTests::test_clamp_int", "tests/test_tensor.py::clampTests::test_clamp_int_in_place", "tests/test_tensor.py::cloneTests::test_clone", "tests/test_tensor.py::chunkTests::test_chunk", "tests/test_tensor.py::chunkTests::test_chunk_same_size", "tests/test_tensor.py::gtTests::test_gt__in_place_with_number", "tests/test_tensor.py::gtTests::test_gt__in_place_with_tensor", "tests/test_tensor.py::gtTests::test_gt_with_encrypted", "tests/test_tensor.py::gtTests::test_gt_with_number", "tests/test_tensor.py::gtTests::test_gt_with_tensor", "tests/test_tensor.py::geTests::test_ge__in_place_with_number", "tests/test_tensor.py::geTests::test_ge__in_place_with_tensor", "tests/test_tensor.py::geTests::test_ge_with_encrypted", "tests/test_tensor.py::geTests::test_ge_with_number", "tests/test_tensor.py::geTests::test_ge_with_tensor", "tests/test_tensor.py::ltTests::test_lt__in_place_with_number", "tests/test_tensor.py::ltTests::test_lt__in_place_with_tensor", "tests/test_tensor.py::ltTests::test_lt_with_encrypted", "tests/test_tensor.py::ltTests::test_lt_with_number", "tests/test_tensor.py::ltTests::test_lt_with_tensor", "tests/test_tensor.py::leTests::test_le__in_place_with_number", "tests/test_tensor.py::leTests::test_le__in_place_with_tensor", "tests/test_tensor.py::leTests::test_le_with_encrypted", "tests/test_tensor.py::leTests::test_le_with_number", "tests/test_tensor.py::leTests::test_le_with_tensor", "tests/test_tensor.py::bernoulliTests::test_bernoulli", "tests/test_tensor.py::bernoulliTests::test_bernoulli_", "tests/test_tensor.py::cauchyTests::test_cauchy_", "tests/test_tensor.py::uniformTests::test_uniform", "tests/test_tensor.py::uniformTests::test_uniform_", "tests/test_tensor.py::fillTests::test_fill_", "tests/test_tensor.py::topkTests::test_topK", "tests/test_tensor.py::tolistTests::test_to_list", "tests/test_tensor.py::traceTests::test_trace", "tests/test_tensor.py::roundTests::test_round", "tests/test_tensor.py::roundTests::test_round_", "tests/test_tensor.py::repeatTests::test_repeat", "tests/test_tensor.py::powTests::test_pow", "tests/test_tensor.py::powTests::test_pow_", "tests/test_tensor.py::prodTests::test_prod", "tests/test_tensor.py::randomTests::test_random_", "tests/test_tensor.py::nonzeroTests::test_non_zero", "tests/test_tensor.py::cumprodTest::test_cumprod", "tests/test_tensor.py::cumprodTest::test_cumprod_", "tests/test_tensor.py::splitTests::test_split", "tests/test_tensor.py::squeezeTests::test_squeeze", "tests/test_tensor.py::expandAsTests::test_expand_as", "tests/test_tensor.py::meanTests::test_mean", "tests/test_tensor.py::notEqualTests::test_ne", "tests/test_tensor.py::notEqualTests::test_ne_", "tests/test_tensor.py::index_selectTests::test_index_select", "tests/test_tensor.py::gatherTests::test_gather_numerical_1", "tests/test_tensor.py::gatherTests::test_gather_numerical_2", "tests/test_tensor.py::scatterTests::test_scatter_dim_out_Of_range", "tests/test_tensor.py::scatterTests::test_scatter_index_out_of_range", "tests/test_tensor.py::scatterTests::test_scatter_index_src_dimension_mismatch", "tests/test_tensor.py::scatterTests::test_scatter_index_type", "tests/test_tensor.py::scatterTests::test_scatter_numerical_0", "tests/test_tensor.py::scatterTests::test_scatter_numerical_1", "tests/test_tensor.py::scatterTests::test_scatter_numerical_2", "tests/test_tensor.py::scatterTests::test_scatter_numerical_3", "tests/test_tensor.py::scatterTests::test_scatter_numerical_4", "tests/test_tensor.py::scatterTests::test_scatter_numerical_5", "tests/test_tensor.py::scatterTests::test_scatter_numerical_6", "tests/test_tensor.py::remainderTests::test_remainder_", "tests/test_tensor.py::remainderTests::test_remainder_broadcasting", "tests/test_tensor.py::testMv::test_mv", "tests/test_tensor.py::testMv::test_mv_tensor", "tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_1", "tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_braodcasting_1", "tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_braodcasting_2", "tests/test_tensor.py::masked_fill_Tests::test_masked_fill_", "tests/test_tensor.py::masked_fill_Tests::test_masked_fill_broadcasting", "tests/test_tensor.py::masked_select_Tests::test_masked_select", "tests/test_tensor.py::masked_select_Tests::test_masked_select_broadcasting_1", "tests/test_tensor.py::masked_select_Tests::test_masked_select_broadcasting_2", "tests/test_tensor.py::masked_select_Tests::test_tensor_base_masked_select", "tests/test_tensor.py::eqTests::test_eq_in_place_with_number", "tests/test_tensor.py::eqTests::test_eq_in_place_with_tensor", "tests/test_tensor.py::eqTests::test_eq_with_number", "tests/test_tensor.py::eqTests::test_eq_with_tensor", "tests/test_tensor.py::mm_test::test_mm_1d", "tests/test_tensor.py::mm_test::test_mm_2d", "tests/test_tensor.py::mm_test::test_mm_3d" ]
[]
Apache License 2.0
1,726
[ "syft/tensor.py" ]
[ "syft/tensor.py" ]
PlasmaPy__PlasmaPy-138
47925baeaac18c58c758b4ab50fabb44087cd800
2017-10-03 13:09:48
47925baeaac18c58c758b4ab50fabb44087cd800
diff --git a/plasmapy/physics/transport.py b/plasmapy/physics/transport.py index 75073cbd..8af344d3 100644 --- a/plasmapy/physics/transport.py +++ b/plasmapy/physics/transport.py @@ -8,21 +8,22 @@ from .parameters import Debye_length -@check_quantity({"n_e": {"units": units.m**-3}, - "T": {"units": units.K, "can_be_negative": False} +@check_quantity({"T": {"units": units.K, "can_be_negative": False}, + "n_e": {"units": units.m**-3} }) -def Coulomb_logarithm(n_e, T, particles, V=None): +def Coulomb_logarithm(T, n_e, particles, V=None): r"""Estimates the Coulomb logarithm. Parameters ---------- - n_e : Quantity - The electron density in units convertible to per cubic meter. T : Quantity - Temperature in units of temperature or energy per particle, - which is assumed to be equal for both the test particle and - the target particle + Temperature in units of temperature or energy per particle, + which is assumed to be equal for both the test particle and + the target particle + + n_e : Quantity + The electron density in units convertible to per cubic meter. particles : tuple A tuple containing string representations of the test particle @@ -90,9 +91,9 @@ def Coulomb_logarithm(n_e, T, particles, V=None): Examples -------- >>> from astropy import units as u - >>> Coulomb_logarithm(T=1e6*units.K, n_e=1e19*units.m**-3, ('e', 'p')) + >>> Coulomb_logarithm(T=1e6*u.K, n_e=1e19*u.m**-3, ('e', 'p')) 14.748259780491056 - >>> Coulomb_logarithm(1e6*units.K, 1e19*units.m**-3, ('e', 'p'), + >>> Coulomb_logarithm(1e6*u.K, 1e19*u.m**-3, ('e', 'p'), V=1e6*u.m/u.s) References
Check consistency of argument ordering in physics Here are a few example signatures straight from `physics.transport`: ``` def Coulomb_logarithm(n_e, T, particles, V=None): def Debye_length(T_e, n_e): def Debye_number(T_e, n_e): def upper_hybrid_frequency(B, n_e): ``` It would be nice to ensure that non-keyword arguments, where applicable, have the same ordering - like in other scientific packages, like Numpy, a consistent API is helpful for being able to call multiple functions without having to check the signature each time you call them. Any consistent ordering would be welcome - but common sense takes precedence.
PlasmaPy/PlasmaPy
diff --git a/plasmapy/physics/tests/test_transport.py b/plasmapy/physics/tests/test_transport.py index 3e5333a0..a1553e55 100644 --- a/plasmapy/physics/tests/test_transport.py +++ b/plasmapy/physics/tests/test_transport.py @@ -18,37 +18,37 @@ def test_Coulomb_logarithm(): particles = ('e', 'p') for i in range(3): - assert np.isclose(Coulomb_logarithm(n_e[i], T[i], particles), + assert np.isclose(Coulomb_logarithm(T[i], n_e[i], particles), Lambda[i], atol=0.01) - assert np.isclose(Coulomb_logarithm(5*u.m**-3, 1*u.eV, ('e', 'e')), - Coulomb_logarithm(5*u.m**-3, 11604.5220*u.K, ('e', 'e'))) + assert np.isclose(Coulomb_logarithm(1*u.eV, 5*u.m**-3, ('e', 'e')), + Coulomb_logarithm(11604.5220*u.K, 5*u.m**-3, ('e', 'e'))) - assert np.isclose(Coulomb_logarithm(1e9*u.cm**-3, 1e2*u.K, ('e', 'p')), + assert np.isclose(Coulomb_logarithm(1e2*u.K, 1e9*u.cm**-3, ('e', 'p')), 5.97, atol=0.01) - assert np.isclose(Coulomb_logarithm(1e9*u.cm**-3, 1e7*u.K, ('e', 'p')), + assert np.isclose(Coulomb_logarithm(1e7*u.K, 1e9*u.cm**-3, ('e', 'p')), 21.6, atol=0.1) - assert np.isclose(Coulomb_logarithm(1e24*u.cm**-3, 1e8*u.K, ('e', 'p')), + assert np.isclose(Coulomb_logarithm(1e8*u.K, 1e24*u.cm**-3, ('e', 'p')), 6.69, atol=0.01) - assert np.allclose(Coulomb_logarithm(n_e, T, particles), Lambda, atol=0.01) + assert np.allclose(Coulomb_logarithm(T, n_e, particles), Lambda, atol=0.01) - assert np.isclose(Coulomb_logarithm(5*u.m**-3, 1e5*u.K, ('e', 'e'), + assert np.isclose(Coulomb_logarithm(1e5*u.K, 5*u.m**-3, ('e', 'e'), V=1e4*u.m/u.s), 21.379082011) with pytest.raises(UserWarning): - Coulomb_logarithm(1*u.m**-3, 1e5*u.K, ('e', 'p'), 299792458*u.m/u.s) + Coulomb_logarithm(1e5*u.K, 1*u.m**-3, ('e', 'p'), 299792458*u.m/u.s) with pytest.raises(u.UnitConversionError): - Coulomb_logarithm(1*u.m**-3, 1e5*u.g, ('e', 'p'), 29979245*u.m/u.s) + Coulomb_logarithm(1e5*u.g, 1*u.m**-3, ('e', 'p'), 29979245*u.m/u.s) with pytest.raises(ValueError): - Coulomb_logarithm(5*u.m**-3, 1*u.K, ('e')) + Coulomb_logarithm(1*u.K, 5*u.m**-3, ('e')) with pytest.raises(ValueError): - Coulomb_logarithm(5*u.m**-3, 1*u.K, ('e', 'g')) + Coulomb_logarithm(1*u.K, 5*u.m**-3, ('e', 'g')) with pytest.raises(ValueError): - Coulomb_logarithm(5*u.m**-3, 1*u.K, ('e', 'D')) + Coulomb_logarithm(1*u.K, 5*u.m**-3, ('e', 'D'))
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astropy==6.0.1 astropy-iers-data==0.2025.3.31.0.36.18 exceptiongroup==1.2.2 iniconfig==2.1.0 numpy==1.26.4 packaging==24.2 -e git+https://github.com/PlasmaPy/PlasmaPy.git@47925baeaac18c58c758b4ab50fabb44087cd800#egg=plasmapy pluggy==1.5.0 pyerfa==2.0.1.5 pytest==8.3.5 PyYAML==6.0.2 scipy==1.13.1 tomli==2.2.1
name: PlasmaPy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astropy==6.0.1 - astropy-iers-data==0.2025.3.31.0.36.18 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - numpy==1.26.4 - packaging==24.2 - pluggy==1.5.0 - pyerfa==2.0.1.5 - pytest==8.3.5 - pyyaml==6.0.2 - scipy==1.13.1 - tomli==2.2.1 prefix: /opt/conda/envs/PlasmaPy
[ "plasmapy/physics/tests/test_transport.py::test_Coulomb_logarithm" ]
[]
[]
[]
BSD 3-Clause "New" or "Revised" License
1,727
[ "plasmapy/physics/transport.py" ]
[ "plasmapy/physics/transport.py" ]
oasis-open__cti-python-stix2-60
e2151659d7d68d9c6878f050e52d5d1af6bb0fd6
2017-10-03 14:48:23
58f39f80af5cbfe02879c2efa4b3b4ef7a504390
diff --git a/stix2/__init__.py b/stix2/__init__.py index 7be0904..53c2fb1 100644 --- a/stix2/__init__.py +++ b/stix2/__init__.py @@ -8,6 +8,8 @@ from .common import (TLP_AMBER, TLP_GREEN, TLP_RED, TLP_WHITE, CustomMarking, MarkingDefinition, StatementMarking, TLPMarking) from .core import Bundle, _register_type, parse from .environment import Environment, ObjectFactory +from .markings import (add_markings, clear_markings, get_markings, is_marked, + remove_markings, set_markings) from .observables import (URL, AlternateDataStream, ArchiveExt, Artifact, AutonomousSystem, CustomObservable, Directory, DomainName, EmailAddress, EmailMessage, diff --git a/stix2/common.py b/stix2/common.py index a2e6918..d7994c6 100644 --- a/stix2/common.py +++ b/stix2/common.py @@ -3,6 +3,7 @@ from collections import OrderedDict from .base import _STIXBase +from .markings import MarkingsMixin from .properties import (HashesProperty, IDProperty, ListProperty, Property, ReferenceProperty, SelectorProperty, StringProperty, TimestampProperty, TypeProperty) @@ -76,7 +77,7 @@ class MarkingProperty(Property): raise ValueError("must be a Statement, TLP Marking or a registered marking.") -class MarkingDefinition(_STIXBase): +class MarkingDefinition(_STIXBase, MarkingsMixin): _type = 'marking-definition' _properties = OrderedDict() _properties.update([ diff --git a/stix2/markings/__init__.py b/stix2/markings/__init__.py index 4f72e4c..41c761d 100644 --- a/stix2/markings/__init__.py +++ b/stix2/markings/__init__.py @@ -212,3 +212,16 @@ def is_marked(obj, marking=None, selectors=None, inherited=False, descendants=Fa result = result or object_markings.is_marked(obj, object_marks) return result + + +class MarkingsMixin(): + pass + + +# Note that all of these methods will return a new object because of immutability +MarkingsMixin.get_markings = get_markings +MarkingsMixin.set_markings = set_markings +MarkingsMixin.remove_markings = remove_markings +MarkingsMixin.add_markings = add_markings +MarkingsMixin.clear_markings = clear_markings +MarkingsMixin.is_marked = is_marked diff --git a/stix2/markings/granular_markings.py b/stix2/markings/granular_markings.py index 7e9ccc7..5afd1cc 100644 --- a/stix2/markings/granular_markings.py +++ b/stix2/markings/granular_markings.py @@ -88,6 +88,7 @@ def remove_markings(obj, marking, selectors): """ selectors = utils.convert_to_list(selectors) + marking = utils.convert_to_marking_list(marking) utils.validate(obj, selectors) granular_markings = obj.get("granular_markings") @@ -97,12 +98,9 @@ def remove_markings(obj, marking, selectors): granular_markings = utils.expand_markings(granular_markings) - if isinstance(marking, list): - to_remove = [] - for m in marking: - to_remove.append({"marking_ref": m, "selectors": selectors}) - else: - to_remove = [{"marking_ref": marking, "selectors": selectors}] + to_remove = [] + for m in marking: + to_remove.append({"marking_ref": m, "selectors": selectors}) remove = utils.build_granular_marking(to_remove).get("granular_markings") @@ -140,14 +138,12 @@ def add_markings(obj, marking, selectors): """ selectors = utils.convert_to_list(selectors) + marking = utils.convert_to_marking_list(marking) utils.validate(obj, selectors) - if isinstance(marking, list): - granular_marking = [] - for m in marking: - granular_marking.append({"marking_ref": m, "selectors": sorted(selectors)}) - else: - granular_marking = [{"marking_ref": marking, "selectors": sorted(selectors)}] + granular_marking = [] + for m in marking: + granular_marking.append({"marking_ref": m, "selectors": sorted(selectors)}) if obj.get("granular_markings"): granular_marking.extend(obj.get("granular_markings")) @@ -244,7 +240,7 @@ def is_marked(obj, marking=None, selectors=None, inherited=False, descendants=Fa raise TypeError("Required argument 'selectors' must be provided") selectors = utils.convert_to_list(selectors) - marking = utils.convert_to_list(marking) + marking = utils.convert_to_marking_list(marking) utils.validate(obj, selectors) granular_markings = obj.get("granular_markings", []) diff --git a/stix2/markings/object_markings.py b/stix2/markings/object_markings.py index c39c036..a775ddc 100644 --- a/stix2/markings/object_markings.py +++ b/stix2/markings/object_markings.py @@ -31,7 +31,7 @@ def add_markings(obj, marking): A new version of the given SDO or SRO with specified markings added. """ - marking = utils.convert_to_list(marking) + marking = utils.convert_to_marking_list(marking) object_markings = set(obj.get("object_marking_refs", []) + marking) @@ -55,7 +55,7 @@ def remove_markings(obj, marking): A new version of the given SDO or SRO with specified markings removed. """ - marking = utils.convert_to_list(marking) + marking = utils.convert_to_marking_list(marking) object_markings = obj.get("object_marking_refs", []) @@ -121,7 +121,7 @@ def is_marked(obj, marking=None): provided marking refs match, True is returned. """ - marking = utils.convert_to_list(marking) + marking = utils.convert_to_marking_list(marking) object_markings = obj.get("object_marking_refs", []) if marking: diff --git a/stix2/markings/utils.py b/stix2/markings/utils.py index d0d38bb..1154d19 100644 --- a/stix2/markings/utils.py +++ b/stix2/markings/utils.py @@ -37,6 +37,12 @@ def _validate_selector(obj, selector): return True +def _get_marking_id(marking): + if type(marking).__name__ is 'MarkingDefinition': # avoid circular import + return marking.id + return marking + + def validate(obj, selectors): """Given an SDO or SRO, check that each selector is valid.""" if selectors: @@ -57,6 +63,15 @@ def convert_to_list(data): return [data] +def convert_to_marking_list(data): + """Convert input into a list of marking identifiers.""" + if data is not None: + if isinstance(data, list): + return [_get_marking_id(x) for x in data] + else: + return [_get_marking_id(data)] + + def compress_markings(granular_markings): """ Compress granular markings list. If there is more than one marking diff --git a/stix2/sdo.py b/stix2/sdo.py index 77c781a..53f965d 100644 --- a/stix2/sdo.py +++ b/stix2/sdo.py @@ -6,6 +6,7 @@ import stix2 from .base import _STIXBase from .common import ExternalReference, GranularMarking, KillChainPhase +from .markings import MarkingsMixin from .observables import ObservableProperty from .properties import (BooleanProperty, IDProperty, IntegerProperty, ListProperty, PatternProperty, ReferenceProperty, @@ -13,7 +14,11 @@ from .properties import (BooleanProperty, IDProperty, IntegerProperty, from .utils import NOW -class AttackPattern(_STIXBase): +class STIXDomainObject(_STIXBase, MarkingsMixin): + pass + + +class AttackPattern(STIXDomainObject): _type = 'attack-pattern' _properties = OrderedDict() @@ -34,7 +39,7 @@ class AttackPattern(_STIXBase): ]) -class Campaign(_STIXBase): +class Campaign(STIXDomainObject): _type = 'campaign' _properties = OrderedDict() @@ -58,7 +63,7 @@ class Campaign(_STIXBase): ]) -class CourseOfAction(_STIXBase): +class CourseOfAction(STIXDomainObject): _type = 'course-of-action' _properties = OrderedDict() @@ -78,7 +83,7 @@ class CourseOfAction(_STIXBase): ]) -class Identity(_STIXBase): +class Identity(STIXDomainObject): _type = 'identity' _properties = OrderedDict() @@ -101,7 +106,7 @@ class Identity(_STIXBase): ]) -class Indicator(_STIXBase): +class Indicator(STIXDomainObject): _type = 'indicator' _properties = OrderedDict() @@ -125,7 +130,7 @@ class Indicator(_STIXBase): ]) -class IntrusionSet(_STIXBase): +class IntrusionSet(STIXDomainObject): _type = 'intrusion-set' _properties = OrderedDict() @@ -152,7 +157,7 @@ class IntrusionSet(_STIXBase): ]) -class Malware(_STIXBase): +class Malware(STIXDomainObject): _type = 'malware' _properties = OrderedDict() @@ -173,7 +178,7 @@ class Malware(_STIXBase): ]) -class ObservedData(_STIXBase): +class ObservedData(STIXDomainObject): _type = 'observed-data' _properties = OrderedDict() @@ -195,7 +200,7 @@ class ObservedData(_STIXBase): ]) -class Report(_STIXBase): +class Report(STIXDomainObject): _type = 'report' _properties = OrderedDict() @@ -217,7 +222,7 @@ class Report(_STIXBase): ]) -class ThreatActor(_STIXBase): +class ThreatActor(STIXDomainObject): _type = 'threat-actor' _properties = OrderedDict() @@ -245,7 +250,7 @@ class ThreatActor(_STIXBase): ]) -class Tool(_STIXBase): +class Tool(STIXDomainObject): _type = 'tool' _properties = OrderedDict() @@ -267,7 +272,7 @@ class Tool(_STIXBase): ]) -class Vulnerability(_STIXBase): +class Vulnerability(STIXDomainObject): _type = 'vulnerability' _properties = OrderedDict() @@ -316,7 +321,7 @@ def CustomObject(type='x-custom-type', properties=None): def custom_builder(cls): - class _Custom(cls, _STIXBase): + class _Custom(cls, STIXDomainObject): _type = type _properties = OrderedDict() _properties.update([ diff --git a/stix2/sro.py b/stix2/sro.py index af483bc..4fa0465 100644 --- a/stix2/sro.py +++ b/stix2/sro.py @@ -4,13 +4,18 @@ from collections import OrderedDict from .base import _STIXBase from .common import ExternalReference, GranularMarking +from .markings import MarkingsMixin from .properties import (BooleanProperty, IDProperty, IntegerProperty, ListProperty, ReferenceProperty, StringProperty, TimestampProperty, TypeProperty) from .utils import NOW -class Relationship(_STIXBase): +class STIXRelationshipObject(_STIXBase, MarkingsMixin): + pass + + +class Relationship(STIXRelationshipObject): _type = 'relationship' _properties = OrderedDict() @@ -45,7 +50,7 @@ class Relationship(_STIXBase): super(Relationship, self).__init__(**kwargs) -class Sighting(_STIXBase): +class Sighting(STIXRelationshipObject): _type = 'sighting' _properties = OrderedDict() _properties.update([
Improve Data Markings Ease-of-Use Some ideas I had while writing documentation, for how to make data markings a little easier to use: - [ ] Maybe `StatementMarking()` should return a `MarkingDefinition` object and not require the user to create both a `MarkingDefinition` and `StatementMarking` object. Would a user ever create a `StatementMarking` object without also creating a `MarkingDefinition`? - [x] We should add a shortcut to be able to call `add_markings()` (and the other marking functions) on STIX objects. So for example ```indicator.remove_markings('marking--id')``` instead of ```markings.remove_markings(indicator, 'marking--id')``` As we implement these, be explicit that they return *new* versions of the objects since they are immutable. - [x] stix2/\_\_init\_\_.py should import the functions from stix2.markings so they're in the top-level namespace. - [x] `add_markings()` and `remove_markings()` should support passing in a `MarkingDefinition` object, not just an ID string.
oasis-open/cti-python-stix2
diff --git a/stix2/test/test_granular_markings.py b/stix2/test/test_granular_markings.py index e910ad3..f8fc803 100644 --- a/stix2/test/test_granular_markings.py +++ b/stix2/test/test_granular_markings.py @@ -1,7 +1,7 @@ import pytest -from stix2 import Malware, markings +from stix2 import TLP_RED, Malware, markings from .constants import MALWARE_MORE_KWARGS as MALWARE_KWARGS_CONST from .constants import MARKING_IDS @@ -45,6 +45,7 @@ def test_add_marking_mark_one_selector_multiple_refs(): }, ], **MALWARE_KWARGS), + MARKING_IDS[0], ), ( MALWARE_KWARGS, @@ -56,13 +57,26 @@ def test_add_marking_mark_one_selector_multiple_refs(): }, ], **MALWARE_KWARGS), + MARKING_IDS[0], + ), + ( + Malware(**MALWARE_KWARGS), + Malware( + granular_markings=[ + { + "selectors": ["description", "name"], + "marking_ref": TLP_RED.id, + }, + ], + **MALWARE_KWARGS), + TLP_RED, ), ]) def test_add_marking_mark_multiple_selector_one_refs(data): before = data[0] after = data[1] - before = markings.add_markings(before, [MARKING_IDS[0]], ["description", "name"]) + before = markings.add_markings(before, data[2], ["description", "name"]) for m in before["granular_markings"]: assert m in after["granular_markings"] @@ -347,36 +361,42 @@ def test_get_markings_positional_arguments_combinations(data): assert set(markings.get_markings(data, "x.z.foo2", False, True)) == set(["10"]) [email protected]("before", [ - Malware( - granular_markings=[ - { - "selectors": ["description"], - "marking_ref": MARKING_IDS[0] - }, - { - "selectors": ["description"], - "marking_ref": MARKING_IDS[1] - }, - ], - **MALWARE_KWARGS [email protected]("data", [ + ( + Malware( + granular_markings=[ + { + "selectors": ["description"], + "marking_ref": MARKING_IDS[0] + }, + { + "selectors": ["description"], + "marking_ref": MARKING_IDS[1] + }, + ], + **MALWARE_KWARGS + ), + [MARKING_IDS[0], MARKING_IDS[1]], ), - dict( - granular_markings=[ - { - "selectors": ["description"], - "marking_ref": MARKING_IDS[0] - }, - { - "selectors": ["description"], - "marking_ref": MARKING_IDS[1] - }, - ], - **MALWARE_KWARGS + ( + dict( + granular_markings=[ + { + "selectors": ["description"], + "marking_ref": MARKING_IDS[0] + }, + { + "selectors": ["description"], + "marking_ref": MARKING_IDS[1] + }, + ], + **MALWARE_KWARGS + ), + [MARKING_IDS[0], MARKING_IDS[1]], ), ]) -def test_remove_marking_remove_one_selector_with_multiple_refs(before): - before = markings.remove_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], ["description"]) +def test_remove_marking_remove_one_selector_with_multiple_refs(data): + before = markings.remove_markings(data[0], data[1], ["description"]) assert "granular_markings" not in before diff --git a/stix2/test/test_markings.py b/stix2/test/test_markings.py index 0c6069a..456bf92 100644 --- a/stix2/test/test_markings.py +++ b/stix2/test/test_markings.py @@ -241,4 +241,14 @@ def test_marking_wrong_type_construction(): assert str(excinfo.value) == "Must supply a list, containing tuples. For example, [('property1', IntegerProperty())]" -# TODO: Add other examples +def test_campaign_add_markings(): + campaign = stix2.Campaign( + id="campaign--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f", + created_by_ref="identity--f431f809-377b-45e0-aa1c-6a4751cae5ff", + created="2016-04-06T20:03:00Z", + modified="2016-04-06T20:03:00Z", + name="Green Group Attacks Against Finance", + description="Campaign by Green Group against a series of targets in the financial services sector.", + ) + campaign = campaign.add_markings(TLP_WHITE) + assert campaign.object_marking_refs[0] == TLP_WHITE.id diff --git a/stix2/test/test_object_markings.py b/stix2/test/test_object_markings.py index 36e8e4d..10949ab 100644 --- a/stix2/test/test_object_markings.py +++ b/stix2/test/test_object_markings.py @@ -1,7 +1,7 @@ import pytest -from stix2 import Malware, exceptions, markings +from stix2 import TLP_AMBER, Malware, exceptions, markings from .constants import FAKE_TIME, MALWARE_ID, MARKING_IDS from .constants import MALWARE_KWARGS as MALWARE_KWARGS_CONST @@ -21,18 +21,26 @@ MALWARE_KWARGS.update({ Malware(**MALWARE_KWARGS), Malware(object_marking_refs=[MARKING_IDS[0]], **MALWARE_KWARGS), + MARKING_IDS[0], ), ( MALWARE_KWARGS, dict(object_marking_refs=[MARKING_IDS[0]], **MALWARE_KWARGS), + MARKING_IDS[0], + ), + ( + Malware(**MALWARE_KWARGS), + Malware(object_marking_refs=[TLP_AMBER.id], + **MALWARE_KWARGS), + TLP_AMBER, ), ]) def test_add_markings_one_marking(data): before = data[0] after = data[1] - before = markings.add_markings(before, MARKING_IDS[0], None) + before = markings.add_markings(before, data[2], None) for m in before["object_marking_refs"]: assert m in after["object_marking_refs"] @@ -280,19 +288,28 @@ def test_remove_markings_object_level(data): **MALWARE_KWARGS), Malware(object_marking_refs=[MARKING_IDS[1]], **MALWARE_KWARGS), + [MARKING_IDS[0], MARKING_IDS[2]], ), ( dict(object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], MARKING_IDS[2]], **MALWARE_KWARGS), dict(object_marking_refs=[MARKING_IDS[1]], **MALWARE_KWARGS), + [MARKING_IDS[0], MARKING_IDS[2]], + ), + ( + Malware(object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], TLP_AMBER.id], + **MALWARE_KWARGS), + Malware(object_marking_refs=[MARKING_IDS[1]], + **MALWARE_KWARGS), + [MARKING_IDS[0], TLP_AMBER], ), ]) def test_remove_markings_multiple(data): before = data[0] after = data[1] - before = markings.remove_markings(before, [MARKING_IDS[0], MARKING_IDS[2]], None) + before = markings.remove_markings(before, data[2], None) assert before['object_marking_refs'] == after['object_marking_refs']
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 8 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "flake8", "pycodestyle", "isort" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 antlr4-python3-runtime==4.9.3 appdirs==1.4.4 attrs==21.4.0 Babel==2.11.0 bump2version==1.0.1 bumpversion==0.6.0 certifi==2021.5.30 cfgv==3.3.1 charset-normalizer==2.0.12 colorama==0.4.5 coverage==6.2 cpe==1.3.1 distlib==0.3.9 docutils==0.18.1 filelock==3.4.1 flake8==5.0.4 identify==2.4.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.2.0 importlib-resources==5.2.3 iniconfig==1.1.1 isort==5.10.1 itsdangerous==2.0.1 Jinja2==3.0.3 jsonpointer==2.3 jsonschema==3.2.0 MarkupSafe==2.0.1 mccabe==0.7.0 nodeenv==1.6.0 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 pre-commit==2.17.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 requests==2.27.1 requests-cache==0.7.5 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 simplejson==3.20.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-prompt==1.5.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 -e git+https://github.com/oasis-open/cti-python-stix2.git@e2151659d7d68d9c6878f050e52d5d1af6bb0fd6#egg=stix2 stix2-patterns==2.0.0 stix2-validator==3.0.2 taxii2-client==2.3.0 toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 url-normalize==1.4.3 urllib3==1.26.20 virtualenv==20.16.2 webcolors==1.11.1 zipp==3.6.0
name: cti-python-stix2 channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - antlr4-python3-runtime==4.9.3 - appdirs==1.4.4 - attrs==21.4.0 - babel==2.11.0 - bump2version==1.0.1 - bumpversion==0.6.0 - cfgv==3.3.1 - charset-normalizer==2.0.12 - colorama==0.4.5 - coverage==6.2 - cpe==1.3.1 - distlib==0.3.9 - docutils==0.18.1 - filelock==3.4.1 - flake8==5.0.4 - identify==2.4.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.2.0 - importlib-resources==5.2.3 - iniconfig==1.1.1 - isort==5.10.1 - itsdangerous==2.0.1 - jinja2==3.0.3 - jsonpointer==2.3 - jsonschema==3.2.0 - markupsafe==2.0.1 - mccabe==0.7.0 - nodeenv==1.6.0 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - pre-commit==2.17.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - requests==2.27.1 - requests-cache==0.7.5 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 - simplejson==3.20.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-prompt==1.5.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - stix2-patterns==2.0.0 - stix2-validator==3.0.2 - taxii2-client==2.3.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - url-normalize==1.4.3 - urllib3==1.26.20 - virtualenv==20.16.2 - webcolors==1.11.1 - zipp==3.6.0 prefix: /opt/conda/envs/cti-python-stix2
[ "stix2/test/test_granular_markings.py::test_add_marking_mark_multiple_selector_one_refs[data2]", "stix2/test/test_markings.py::test_campaign_add_markings", "stix2/test/test_object_markings.py::test_add_markings_one_marking[data2]", "stix2/test/test_object_markings.py::test_remove_markings_multiple[data2]" ]
[]
[ "stix2/test/test_granular_markings.py::test_add_marking_mark_one_selector_multiple_refs", "stix2/test/test_granular_markings.py::test_add_marking_mark_multiple_selector_one_refs[data0]", "stix2/test/test_granular_markings.py::test_add_marking_mark_multiple_selector_one_refs[data1]", "stix2/test/test_granular_markings.py::test_add_marking_mark_multiple_selector_multiple_refs", "stix2/test/test_granular_markings.py::test_add_marking_mark_another_property_same_marking", "stix2/test/test_granular_markings.py::test_add_marking_mark_same_property_same_marking", "stix2/test/test_granular_markings.py::test_add_marking_bad_selector[data0-marking0]", "stix2/test/test_granular_markings.py::test_get_markings_smoke[data0]", "stix2/test/test_granular_markings.py::test_get_markings_not_marked[data0]", "stix2/test/test_granular_markings.py::test_get_markings_not_marked[data1]", "stix2/test/test_granular_markings.py::test_get_markings_multiple_selectors[data0]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data0-foo]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data1-]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data2-selector2]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data3-selector3]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data4-x.z.[-2]]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data5-c.f]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data6-c.[2].i]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data7-c.[3]]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data8-d]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data9-x.[0]]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data10-z.y.w]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data11-x.z.[1]]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data12-x.z.foo3]", "stix2/test/test_granular_markings.py::test_get_markings_positional_arguments_combinations[data0]", "stix2/test/test_granular_markings.py::test_remove_marking_remove_one_selector_with_multiple_refs[data0]", "stix2/test/test_granular_markings.py::test_remove_marking_remove_one_selector_with_multiple_refs[data1]", "stix2/test/test_granular_markings.py::test_remove_marking_remove_multiple_selector_one_ref", "stix2/test/test_granular_markings.py::test_remove_marking_mark_one_selector_from_multiple_ones", "stix2/test/test_granular_markings.py::test_remove_marking_mark_one_selector_markings_from_multiple_ones", "stix2/test/test_granular_markings.py::test_remove_marking_mark_mutilple_selector_multiple_refs", "stix2/test/test_granular_markings.py::test_remove_marking_mark_another_property_same_marking", "stix2/test/test_granular_markings.py::test_remove_marking_mark_same_property_same_marking", "stix2/test/test_granular_markings.py::test_remove_no_markings", "stix2/test/test_granular_markings.py::test_remove_marking_bad_selector", "stix2/test/test_granular_markings.py::test_is_marked_smoke[data0]", "stix2/test/test_granular_markings.py::test_is_marked_smoke[data1]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data0-foo]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data1-]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data2-selector2]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data3-selector3]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data4-x.z.[-2]]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data5-c.f]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data6-c.[2].i]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data7-c.[3]]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data8-d]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data9-x.[0]]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data10-z.y.w]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data11-x.z.[1]]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data12-x.z.foo3]", "stix2/test/test_granular_markings.py::test_is_marked_mix_selector[data0]", "stix2/test/test_granular_markings.py::test_is_marked_mix_selector[data1]", "stix2/test/test_granular_markings.py::test_is_marked_valid_selector_no_refs[data0]", "stix2/test/test_granular_markings.py::test_is_marked_valid_selector_no_refs[data1]", "stix2/test/test_granular_markings.py::test_is_marked_valid_selector_and_refs[data0]", "stix2/test/test_granular_markings.py::test_is_marked_valid_selector_and_refs[data1]", "stix2/test/test_granular_markings.py::test_is_marked_valid_selector_multiple_refs[data0]", "stix2/test/test_granular_markings.py::test_is_marked_valid_selector_multiple_refs[data1]", "stix2/test/test_granular_markings.py::test_is_marked_no_marking_refs[data0]", "stix2/test/test_granular_markings.py::test_is_marked_no_marking_refs[data1]", "stix2/test/test_granular_markings.py::test_is_marked_no_selectors[data0]", "stix2/test/test_granular_markings.py::test_is_marked_no_selectors[data1]", "stix2/test/test_granular_markings.py::test_is_marked_positional_arguments_combinations", "stix2/test/test_granular_markings.py::test_create_sdo_with_invalid_marking", "stix2/test/test_granular_markings.py::test_set_marking_mark_one_selector_multiple_refs", "stix2/test/test_granular_markings.py::test_set_marking_mark_multiple_selector_one_refs", "stix2/test/test_granular_markings.py::test_set_marking_mark_multiple_selector_multiple_refs_from_none", "stix2/test/test_granular_markings.py::test_set_marking_mark_another_property_same_marking", "stix2/test/test_granular_markings.py::test_set_marking_bad_selector[marking0]", "stix2/test/test_granular_markings.py::test_set_marking_bad_selector[marking1]", "stix2/test/test_granular_markings.py::test_set_marking_bad_selector[marking2]", "stix2/test/test_granular_markings.py::test_set_marking_bad_selector[marking3]", "stix2/test/test_granular_markings.py::test_set_marking_mark_same_property_same_marking", "stix2/test/test_granular_markings.py::test_clear_marking_smoke[data0]", "stix2/test/test_granular_markings.py::test_clear_marking_smoke[data1]", "stix2/test/test_granular_markings.py::test_clear_marking_multiple_selectors[data0]", "stix2/test/test_granular_markings.py::test_clear_marking_multiple_selectors[data1]", "stix2/test/test_granular_markings.py::test_clear_marking_one_selector[data0]", "stix2/test/test_granular_markings.py::test_clear_marking_one_selector[data1]", "stix2/test/test_granular_markings.py::test_clear_marking_all_selectors[data0]", "stix2/test/test_granular_markings.py::test_clear_marking_all_selectors[data1]", "stix2/test/test_granular_markings.py::test_clear_marking_bad_selector[data0-foo]", "stix2/test/test_granular_markings.py::test_clear_marking_bad_selector[data1-]", "stix2/test/test_granular_markings.py::test_clear_marking_bad_selector[data2-selector2]", "stix2/test/test_granular_markings.py::test_clear_marking_bad_selector[data3-selector3]", "stix2/test/test_markings.py::test_marking_def_example_with_tlp", "stix2/test/test_markings.py::test_marking_def_example_with_statement_positional_argument", "stix2/test/test_markings.py::test_marking_def_example_with_kwargs_statement", "stix2/test/test_markings.py::test_marking_def_invalid_type", "stix2/test/test_markings.py::test_campaign_with_markings_example", "stix2/test/test_markings.py::test_granular_example", "stix2/test/test_markings.py::test_granular_example_with_bad_selector", "stix2/test/test_markings.py::test_campaign_with_granular_markings_example", "stix2/test/test_markings.py::test_parse_marking_definition[{\\n", "stix2/test/test_markings.py::test_parse_marking_definition[data1]", "stix2/test/test_markings.py::test_registered_custom_marking", "stix2/test/test_markings.py::test_not_registered_marking_raises_exception", "stix2/test/test_markings.py::test_marking_wrong_type_construction", "stix2/test/test_object_markings.py::test_add_markings_one_marking[data0]", "stix2/test/test_object_markings.py::test_add_markings_one_marking[data1]", "stix2/test/test_object_markings.py::test_add_markings_multiple_marking", "stix2/test/test_object_markings.py::test_add_markings_combination", "stix2/test/test_object_markings.py::test_add_markings_bad_markings[data0]", "stix2/test/test_object_markings.py::test_add_markings_bad_markings[]", "stix2/test/test_object_markings.py::test_add_markings_bad_markings[data2]", "stix2/test/test_object_markings.py::test_add_markings_bad_markings[data3]", "stix2/test/test_object_markings.py::test_get_markings_object_marking[data0]", "stix2/test/test_object_markings.py::test_get_markings_object_and_granular_combinations[data0]", "stix2/test/test_object_markings.py::test_remove_markings_object_level[data0]", "stix2/test/test_object_markings.py::test_remove_markings_object_level[data1]", "stix2/test/test_object_markings.py::test_remove_markings_multiple[data0]", "stix2/test/test_object_markings.py::test_remove_markings_multiple[data1]", "stix2/test/test_object_markings.py::test_remove_markings_bad_markings", "stix2/test/test_object_markings.py::test_clear_markings[data0]", "stix2/test/test_object_markings.py::test_clear_markings[data1]", "stix2/test/test_object_markings.py::test_is_marked_object_and_granular_combinations", "stix2/test/test_object_markings.py::test_is_marked_no_markings[data0]", "stix2/test/test_object_markings.py::test_is_marked_no_markings[data1]", "stix2/test/test_object_markings.py::test_set_marking", "stix2/test/test_object_markings.py::test_set_marking_bad_input[data0]", "stix2/test/test_object_markings.py::test_set_marking_bad_input[data1]", "stix2/test/test_object_markings.py::test_set_marking_bad_input[]", "stix2/test/test_object_markings.py::test_set_marking_bad_input[data3]" ]
[]
BSD 3-Clause "New" or "Revised" License
1,728
[ "stix2/common.py", "stix2/sdo.py", "stix2/markings/__init__.py", "stix2/sro.py", "stix2/markings/granular_markings.py", "stix2/__init__.py", "stix2/markings/utils.py", "stix2/markings/object_markings.py" ]
[ "stix2/common.py", "stix2/sdo.py", "stix2/markings/__init__.py", "stix2/sro.py", "stix2/markings/granular_markings.py", "stix2/__init__.py", "stix2/markings/utils.py", "stix2/markings/object_markings.py" ]
OpenMined__PySyft-286
40261aab99e6857e0ed2e34c26a315f16d9500f7
2017-10-03 19:11:06
06ce023225dd613d8fb14ab2046135b93ab22376
diff --git a/syft/tensor.py b/syft/tensor.py index 9dc2bab3ba..c01a50dd83 100644 --- a/syft/tensor.py +++ b/syft/tensor.py @@ -109,6 +109,30 @@ class TensorBase(object): self.data = _ensure_ndarray(arr_like) self.encrypted = encrypted + def new(self, *args, **kwargs): + """Constructs a new tensor instance of the same data type. + + Parameters + ---------- + *args + Variable length argument list used to instantiate + new TensorBase object. + **kwargs + Arbitrary keyword arguments used to instantiate + new TensorBase object. + + Returns + ------- + TensorBase class instance if parent TensorBase + has self.encrypted = False, otherwise return NotImplemented + error. + + """ + if self.encrypted: + return NotImplemented + + return self.__class__(*args, **kwargs) + def _calc_mul_depth(self, tensor1, tensor2): if isinstance(tensor1, TensorBase) and isinstance(tensor2, TensorBase): self._mul_depth = max(tensor1._mul_depth, tensor2._mul_depth) + 1
Implement Default new Functionality for Base Tensor Type **User Story A:** As a Data Scientist using Syft's Base Tensor type, I want to leverage a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, new() should return a new tensor. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation. **Acceptance Criteria:** - If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error. - a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors. - inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator.
OpenMined/PySyft
diff --git a/tests/test_tensor.py b/tests/test_tensor.py index d063417e29..32614ce752 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -1119,6 +1119,29 @@ class mm_test(unittest.TestCase): self.assertTrue(np.alltrue(out.data == [[5, 8, 11], [8, 13, 18], [11, 18, 25]])) +class newTensorTests(unittest.TestCase): + def test_encrypted_error(self): + + t1 = TensorBase(np.array([1, 1, 1]), encrypted=True) + t2 = t1.new([1, 1, 2], encrypted=True) + + self.assertEqual(t2, NotImplemented) + + def test_return_new_float_tensor(self): + + t1 = TensorBase(np.array([1, 1, 1])) + t2 = t1.new(np.array([1., 1., 2.])) + + self.assertTrue(t2.data.dtype == np.float64) + + def test_return_new_int_tensor(self): + + t1 = TensorBase(np.array([1, 1, 1])) + t2 = t1.new(np.array([1, 1, 2])) + + self.assertTrue(t2.data.dtype == np.int64) + + class half(unittest.TestCase): def half_test_1(self): t1 = TensorBase(np.array([2, 3, 4]))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
PySyft/hydrogen
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-flake8" ], "pre_install": [ "apt-get update", "apt-get install -y gcc musl-dev g++ libgmp3-dev libmpfr-dev ca-certificates libmpc-dev" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
args==0.1.0 attrs==22.2.0 certifi==2021.5.30 clint==0.5.1 flake8==5.0.4 importlib-metadata==4.2.0 iniconfig==1.1.1 joblib==1.1.1 line-profiler==4.1.3 mccabe==0.7.0 numpy==1.19.5 packaging==21.3 phe==1.5.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 pyparsing==3.1.4 pyRserve==1.0.4 pytest==7.0.1 pytest-flake8==1.1.1 scikit-learn==0.24.2 scipy==1.5.4 sklearn==0.0 -e git+https://github.com/OpenMined/PySyft.git@40261aab99e6857e0ed2e34c26a315f16d9500f7#egg=syft threadpoolctl==3.1.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: PySyft channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - args==0.1.0 - attrs==22.2.0 - clint==0.5.1 - flake8==5.0.4 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - joblib==1.1.1 - line-profiler==4.1.3 - mccabe==0.7.0 - numpy==1.19.5 - packaging==21.3 - phe==1.5.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pyparsing==3.1.4 - pyrserve==1.0.4 - pytest==7.0.1 - pytest-flake8==1.1.1 - scikit-learn==0.24.2 - scipy==1.5.4 - sklearn==0.0 - threadpoolctl==3.1.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/PySyft
[ "tests/test_tensor.py::newTensorTests::test_encrypted_error", "tests/test_tensor.py::newTensorTests::test_return_new_float_tensor", "tests/test_tensor.py::newTensorTests::test_return_new_int_tensor" ]
[]
[ "tests/test_tensor.py::DimTests::test_as_view", "tests/test_tensor.py::DimTests::test_dim_one", "tests/test_tensor.py::DimTests::test_nelement", "tests/test_tensor.py::DimTests::test_resize", "tests/test_tensor.py::DimTests::test_resize_as", "tests/test_tensor.py::DimTests::test_size", "tests/test_tensor.py::DimTests::test_view", "tests/test_tensor.py::AddTests::test_inplace", "tests/test_tensor.py::AddTests::test_scalar", "tests/test_tensor.py::AddTests::test_simple", "tests/test_tensor.py::CeilTests::test_ceil", "tests/test_tensor.py::CeilTests::test_ceil_", "tests/test_tensor.py::ZeroTests::test_zero", "tests/test_tensor.py::FloorTests::test_floor_", "tests/test_tensor.py::SubTests::test_inplace", "tests/test_tensor.py::SubTests::test_scalar", "tests/test_tensor.py::SubTests::test_simple", "tests/test_tensor.py::MaxTests::test_axis", "tests/test_tensor.py::MaxTests::test_no_dim", "tests/test_tensor.py::MultTests::test_inplace", "tests/test_tensor.py::MultTests::test_scalar", "tests/test_tensor.py::MultTests::test_simple", "tests/test_tensor.py::DivTests::test_inplace", "tests/test_tensor.py::DivTests::test_scalar", "tests/test_tensor.py::DivTests::test_simple", "tests/test_tensor.py::AbsTests::test_abs", "tests/test_tensor.py::AbsTests::test_abs_", "tests/test_tensor.py::ShapeTests::test_shape", "tests/test_tensor.py::SqrtTests::test_sqrt", "tests/test_tensor.py::SqrtTests::test_sqrt_", "tests/test_tensor.py::SumTests::test_dim_is_not_none_int", "tests/test_tensor.py::SumTests::test_dim_none_int", "tests/test_tensor.py::EqualTests::test_equal", "tests/test_tensor.py::EqualTests::test_equal_operation", "tests/test_tensor.py::EqualTests::test_inequality_operation", "tests/test_tensor.py::EqualTests::test_not_equal", "tests/test_tensor.py::IndexTests::test_indexing", "tests/test_tensor.py::sigmoidTests::test_sigmoid", "tests/test_tensor.py::addmm::test_addmm_1d", "tests/test_tensor.py::addmm::test_addmm_2d", "tests/test_tensor.py::addmm::test_addmm__1d", "tests/test_tensor.py::addmm::test_addmm__2d", "tests/test_tensor.py::addcmulTests::test_addcmul_1d", "tests/test_tensor.py::addcmulTests::test_addcmul_2d", "tests/test_tensor.py::addcmulTests::test_addcmul__1d", "tests/test_tensor.py::addcmulTests::test_addcmul__2d", "tests/test_tensor.py::addcdivTests::test_addcdiv_1d", "tests/test_tensor.py::addcdivTests::test_addcdiv_2d", "tests/test_tensor.py::addcdivTests::test_addcdiv__1d", "tests/test_tensor.py::addcdivTests::test_addcdiv__2d", "tests/test_tensor.py::addmvTests::test_addmv", "tests/test_tensor.py::addmvTests::test_addmv_", "tests/test_tensor.py::bmmTests::test_bmm", "tests/test_tensor.py::bmmTests::test_bmm_size", "tests/test_tensor.py::addbmmTests::test_addbmm", "tests/test_tensor.py::addbmmTests::test_addbmm_", "tests/test_tensor.py::baddbmmTests::test_baddbmm", "tests/test_tensor.py::baddbmmTests::test_baddbmm_", "tests/test_tensor.py::transposeTests::test_t", "tests/test_tensor.py::transposeTests::test_transpose", "tests/test_tensor.py::transposeTests::test_transpose_", "tests/test_tensor.py::unsqueezeTests::test_unsqueeze", "tests/test_tensor.py::unsqueezeTests::test_unsqueeze_", "tests/test_tensor.py::expTests::test_exp", "tests/test_tensor.py::expTests::test_exp_", "tests/test_tensor.py::fracTests::test_frac", "tests/test_tensor.py::fracTests::test_frac_", "tests/test_tensor.py::rsqrtTests::test_rsqrt", "tests/test_tensor.py::rsqrtTests::test_rsqrt_", "tests/test_tensor.py::signTests::test_sign", "tests/test_tensor.py::signTests::test_sign_", "tests/test_tensor.py::numpyTests::test_numpy", "tests/test_tensor.py::reciprocalTests::test_reciprocal", "tests/test_tensor.py::reciprocalTests::test_reciprocal_", "tests/test_tensor.py::logTests::test_log", "tests/test_tensor.py::logTests::test_log_", "tests/test_tensor.py::logTests::test_log_1p", "tests/test_tensor.py::logTests::test_log_1p_", "tests/test_tensor.py::clampTests::test_clamp_float", "tests/test_tensor.py::clampTests::test_clamp_float_in_place", "tests/test_tensor.py::clampTests::test_clamp_int", "tests/test_tensor.py::clampTests::test_clamp_int_in_place", "tests/test_tensor.py::cloneTests::test_clone", "tests/test_tensor.py::chunkTests::test_chunk", "tests/test_tensor.py::chunkTests::test_chunk_same_size", "tests/test_tensor.py::gtTests::test_gt__in_place_with_number", "tests/test_tensor.py::gtTests::test_gt__in_place_with_tensor", "tests/test_tensor.py::gtTests::test_gt_with_encrypted", "tests/test_tensor.py::gtTests::test_gt_with_number", "tests/test_tensor.py::gtTests::test_gt_with_tensor", "tests/test_tensor.py::geTests::test_ge__in_place_with_number", "tests/test_tensor.py::geTests::test_ge__in_place_with_tensor", "tests/test_tensor.py::geTests::test_ge_with_encrypted", "tests/test_tensor.py::geTests::test_ge_with_number", "tests/test_tensor.py::geTests::test_ge_with_tensor", "tests/test_tensor.py::ltTests::test_lt__in_place_with_number", "tests/test_tensor.py::ltTests::test_lt__in_place_with_tensor", "tests/test_tensor.py::ltTests::test_lt_with_encrypted", "tests/test_tensor.py::ltTests::test_lt_with_number", "tests/test_tensor.py::ltTests::test_lt_with_tensor", "tests/test_tensor.py::leTests::test_le__in_place_with_number", "tests/test_tensor.py::leTests::test_le__in_place_with_tensor", "tests/test_tensor.py::leTests::test_le_with_encrypted", "tests/test_tensor.py::leTests::test_le_with_number", "tests/test_tensor.py::leTests::test_le_with_tensor", "tests/test_tensor.py::bernoulliTests::test_bernoulli", "tests/test_tensor.py::bernoulliTests::test_bernoulli_", "tests/test_tensor.py::cauchyTests::test_cauchy_", "tests/test_tensor.py::uniformTests::test_uniform", "tests/test_tensor.py::uniformTests::test_uniform_", "tests/test_tensor.py::geometricTests::test_geometric_", "tests/test_tensor.py::fillTests::test_fill_", "tests/test_tensor.py::topkTests::test_topK", "tests/test_tensor.py::tolistTests::test_to_list", "tests/test_tensor.py::traceTests::test_trace", "tests/test_tensor.py::roundTests::test_round", "tests/test_tensor.py::roundTests::test_round_", "tests/test_tensor.py::repeatTests::test_repeat", "tests/test_tensor.py::powTests::test_pow", "tests/test_tensor.py::powTests::test_pow_", "tests/test_tensor.py::prodTests::test_prod", "tests/test_tensor.py::randomTests::test_random_", "tests/test_tensor.py::nonzeroTests::test_non_zero", "tests/test_tensor.py::cumprodTest::test_cumprod", "tests/test_tensor.py::cumprodTest::test_cumprod_", "tests/test_tensor.py::splitTests::test_split", "tests/test_tensor.py::squeezeTests::test_squeeze", "tests/test_tensor.py::expandAsTests::test_expand_as", "tests/test_tensor.py::meanTests::test_mean", "tests/test_tensor.py::notEqualTests::test_ne", "tests/test_tensor.py::notEqualTests::test_ne_", "tests/test_tensor.py::index_selectTests::test_index_select", "tests/test_tensor.py::gatherTests::test_gather_numerical_1", "tests/test_tensor.py::gatherTests::test_gather_numerical_2", "tests/test_tensor.py::scatterTests::test_scatter_dim_out_Of_range", "tests/test_tensor.py::scatterTests::test_scatter_index_out_of_range", "tests/test_tensor.py::scatterTests::test_scatter_index_src_dimension_mismatch", "tests/test_tensor.py::scatterTests::test_scatter_index_type", "tests/test_tensor.py::scatterTests::test_scatter_numerical_0", "tests/test_tensor.py::scatterTests::test_scatter_numerical_1", "tests/test_tensor.py::scatterTests::test_scatter_numerical_2", "tests/test_tensor.py::scatterTests::test_scatter_numerical_3", "tests/test_tensor.py::scatterTests::test_scatter_numerical_4", "tests/test_tensor.py::scatterTests::test_scatter_numerical_5", "tests/test_tensor.py::scatterTests::test_scatter_numerical_6", "tests/test_tensor.py::remainderTests::test_remainder_", "tests/test_tensor.py::remainderTests::test_remainder_broadcasting", "tests/test_tensor.py::testMv::test_mv", "tests/test_tensor.py::testMv::test_mv_tensor", "tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_1", "tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_braodcasting_1", "tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_braodcasting_2", "tests/test_tensor.py::masked_fill_Tests::test_masked_fill_", "tests/test_tensor.py::masked_fill_Tests::test_masked_fill_broadcasting", "tests/test_tensor.py::masked_select_Tests::test_masked_select", "tests/test_tensor.py::masked_select_Tests::test_masked_select_broadcasting_1", "tests/test_tensor.py::masked_select_Tests::test_masked_select_broadcasting_2", "tests/test_tensor.py::masked_select_Tests::test_tensor_base_masked_select", "tests/test_tensor.py::eqTests::test_eq_in_place_with_number", "tests/test_tensor.py::eqTests::test_eq_in_place_with_tensor", "tests/test_tensor.py::eqTests::test_eq_with_number", "tests/test_tensor.py::eqTests::test_eq_with_tensor", "tests/test_tensor.py::mm_test::test_mm_1d", "tests/test_tensor.py::mm_test::test_mm_2d", "tests/test_tensor.py::mm_test::test_mm_3d", "tests/test_tensor.py::fmodTest::test_fmod_number", "tests/test_tensor.py::fmodTest::test_fmod_tensor", "tests/test_tensor.py::fmod_Test::test_fmod_number", "tests/test_tensor.py::fmod_Test::test_fmod_tensor" ]
[]
Apache License 2.0
1,729
[ "syft/tensor.py" ]
[ "syft/tensor.py" ]
oasis-open__cti-taxii-client-11
405bbbaa58d86371adc401ee4fe8830f429fb6b2
2017-10-04 14:31:24
7bfafa96153442b8dac5fe643e256127ec6304c5
diff --git a/taxii2client/__init__.py b/taxii2client/__init__.py index bee06a1..0efc770 100644 --- a/taxii2client/__init__.py +++ b/taxii2client/__init__.py @@ -478,7 +478,7 @@ class _HTTPConnection(object): resp.raise_for_status() content_type = resp.headers['Content-Type'] - if content_type != accept: + if not content_type.startswith(accept): msg = "Unexpected Response Content-Type: {}" raise TAXIIServiceException(msg.format(content_type))
Accept more flexible content type strings When checking the content type of packets the client receives, it checks if it is an exact match (https://github.com/oasis-open/cti-taxii-client/blob/master/taxii2client/__init__.py#L481). This fails if for example "; charset=utf-8" is appended to the content type.
oasis-open/cti-taxii-client
diff --git a/taxii2client/test/test_client.py b/taxii2client/test/test_client.py index 597ebb3..3a02747 100644 --- a/taxii2client/test/test_client.py +++ b/taxii2client/test/test_client.py @@ -3,7 +3,7 @@ import responses from taxii2client import ( MEDIA_TYPE_STIX_V20, MEDIA_TYPE_TAXII_V20, AccessError, ApiRoot, - Collection, Server + Collection, Server, TAXIIServiceException ) TAXII_SERVER = 'example.com' @@ -394,3 +394,23 @@ def test_get_status(api_root): assert len(status.failures) == 1 assert status.pending_count == 2 assert len(status.pendings) == 2 + + [email protected] +def test_content_type_valid(collection): + responses.add(responses.GET, GET_OBJECT_URL, GET_OBJECT_RESPONSE, + status=200, content_type="%s; charset=utf-8" % MEDIA_TYPE_STIX_V20) + + response = collection.get_object('indicator--252c7c11-daf2-42bd-843b-be65edca9f61') + indicator = response['objects'][0] + assert indicator['id'] == 'indicator--252c7c11-daf2-42bd-843b-be65edca9f61' + + [email protected] +def test_content_type_invalid(collection): + responses.add(responses.GET, GET_OBJECT_URL, GET_OBJECT_RESPONSE, + status=200, content_type="taxii") + + with pytest.raises(TAXIIServiceException) as excinfo: + collection.get_object('indicator--252c7c11-daf2-42bd-843b-be65edca9f61') + assert "Unexpected Response Content-Type" in str(excinfo.value)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "responses", "tox" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 distlib==0.3.9 filelock==3.4.1 idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 requests==2.27.1 responses==0.17.0 six==1.17.0 -e git+https://github.com/oasis-open/cti-taxii-client.git@405bbbaa58d86371adc401ee4fe8830f429fb6b2#egg=taxii2_client toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 urllib3==1.26.20 virtualenv==20.17.1 zipp==3.6.0
name: cti-taxii-client channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - coverage==6.2 - distlib==0.3.9 - filelock==3.4.1 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - requests==2.27.1 - responses==0.17.0 - six==1.17.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - urllib3==1.26.20 - virtualenv==20.17.1 - zipp==3.6.0 prefix: /opt/conda/envs/cti-taxii-client
[ "taxii2client/test/test_client.py::test_content_type_valid" ]
[]
[ "taxii2client/test/test_client.py::test_server_discovery", "taxii2client/test/test_client.py::test_minimal_discovery_response", "taxii2client/test/test_client.py::test_discovery_with_no_default", "taxii2client/test/test_client.py::test_api_root", "taxii2client/test/test_client.py::test_api_root_collections", "taxii2client/test/test_client.py::test_collection", "taxii2client/test/test_client.py::test_collection_unexpected_kwarg", "taxii2client/test/test_client.py::test_get_collection_objects", "taxii2client/test/test_client.py::test_get_object", "taxii2client/test/test_client.py::test_cannot_write_to_readonly_collection", "taxii2client/test/test_client.py::test_add_object_to_collection", "taxii2client/test/test_client.py::test_cannot_read_from_writeonly_collection", "taxii2client/test/test_client.py::test_get_manifest", "taxii2client/test/test_client.py::test_get_status", "taxii2client/test/test_client.py::test_content_type_invalid" ]
[]
BSD 3-Clause "New" or "Revised" License
1,730
[ "taxii2client/__init__.py" ]
[ "taxii2client/__init__.py" ]
ucfopen__canvasapi-92
7b7ec3c76028bc0ae186cb870deaadaf4da5e7a6
2017-10-04 17:18:31
f2faa1835e104aae764a1fc7638c284d2888639f
diff --git a/canvasapi/course.py b/canvasapi/course.py index 7cd4f40..d12d049 100644 --- a/canvasapi/course.py +++ b/canvasapi/course.py @@ -1,7 +1,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals from warnings import warn -from six import python_2_unicode_compatible +from six import python_2_unicode_compatible, text_type from canvasapi.canvas_object import CanvasObject from canvasapi.discussion_topic import DiscussionTopic @@ -899,15 +899,19 @@ class Course(CanvasObject): <https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics.reorder>`_ :param order: The ids of the pinned discussion topics in the desired order. - e.g. [104, 102, 103] - :type order: list + e.g. [104, 102, 103], (104, 102, 103), or "104,102,103" + :type order: list, tuple, or string :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.discussion_topic.DiscussionTopic` """ + # Convert list or tuple to comma-separated string + if isinstance(order, (list, tuple)): + order = ",".join([text_type(topic_id) for topic_id in order]) - if not isinstance(order, list): - raise ValueError("Param order needs to be string or a list.") + # Check if is a string with commas + if not isinstance(order, text_type) or "," not in order: + raise ValueError("Param `order` must be a list, tuple, or string.") response = self._requester.request( 'POST', diff --git a/canvasapi/group.py b/canvasapi/group.py index c6336f0..7376731 100644 --- a/canvasapi/group.py +++ b/canvasapi/group.py @@ -1,6 +1,6 @@ from __future__ import absolute_import, division, print_function, unicode_literals -from six import python_2_unicode_compatible +from six import python_2_unicode_compatible, text_type from canvasapi.canvas_object import CanvasObject from canvasapi.discussion_topic import DiscussionTopic @@ -474,8 +474,13 @@ class Group(CanvasObject): :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.discussion_topic.DiscussionTopic` """ - if not isinstance(order, list): - raise ValueError("Param order needs to be string or a list.") + # Convert list or tuple to comma-separated string + if isinstance(order, (list, tuple)): + order = ",".join([text_type(topic_id) for topic_id in order]) + + # Check if is a string with commas + if not isinstance(order, text_type) or "," not in order: + raise ValueError("Param `order` must be a list, tuple, or string.") response = self._requester.request( 'POST',
Course.reorder_pinned_topics cannot be called correctly In `canvasapi` 0.6.0, the `Course.reorder_pinned_topics` method has two defects that prevent it from being called correctly: 1. When called with a list of discussion topic ids, this method performs a `POST` call with the topic ids given as repeated `order` parameters. For example, an order list of `[1, 2, 3]` turns into a `POST` request body of `order=1,order=2,order=3`. However, the [documentation for the corresponding Canvas API method](https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics.reorder) indicates that the ordering should actually be given as a single comma-delimited parameter: `order=1,2,3`. 1. To work around the problem described above, one might consider turning the id list into a comma-delimited string before making the `Course.reorder_pinned_topics` call. For example, one might pass the `order` parameter to this call as the string `"1,2,3"` instead of the list `[1, 2, 3]`. However, the implementation of `Course.reorder_pinned_topics` uses `isinstance(order, list)` to check whether `order` is a list or not. If the argument is not a list, this method raises `ValueError("Param order needs to be string or a list.")`. Although the error message explains that either a string or a list would be accepted, the code in question really allows only a list. Taken together, these effectively prevent one from using `canvasapi` to reorder pinned discussions.
ucfopen/canvasapi
diff --git a/tests/fixtures/course.json b/tests/fixtures/course.json index 7ee017d..0226b68 100644 --- a/tests/fixtures/course.json +++ b/tests/fixtures/course.json @@ -1003,15 +1003,6 @@ }, "status_code": 200 }, - "reorder_pinned_topics_no_list": { - "method": "POST", - "endpoint": "courses/1/discussion_topics/reorder", - "data": { - "reorder": true, - "order": "1, 2, 3" - }, - "status_code": 200 - }, "get_course_level_participation_data": { "method": "GET", "endpoint": "courses/1/analytics/activity", diff --git a/tests/fixtures/group.json b/tests/fixtures/group.json index 6e542af..fbecbf8 100644 --- a/tests/fixtures/group.json +++ b/tests/fixtures/group.json @@ -679,16 +679,7 @@ }, "status_code": 200 }, - "reorder_pinned_topics_no_list": { - "method": "POST", - "endpoint": "groups/1/discussion_topics/reorder", - "data": { - "reorder": true, - "order": "1, 2, 3" - }, - "status_code": 200 - }, - "delete_external_feed": { + "delete_external_feed": { "method": "DELETE", "endpoint": "groups/1/external_feeds/1", "data": { diff --git a/tests/test_course.py b/tests/test_course.py index 367d304..ad15005 100644 --- a/tests/test_course.py +++ b/tests/test_course.py @@ -3,8 +3,10 @@ import unittest import uuid import warnings +import requests import requests_mock from six import text_type +from six.moves.urllib.parse import quote from canvasapi import Canvas from canvasapi.assignment import Assignment, AssignmentGroup @@ -491,18 +493,37 @@ class TestCourse(unittest.TestCase): # reorder_pinned_topics() def test_reorder_pinned_topics(self, m): - register_uris({'course': ['reorder_pinned_topics']}, m) + # Custom matcher to test that params are set correctly + def custom_matcher(request): + match_text = '1,2,3' + if request.text == 'order={}'.format(quote(match_text)): + resp = requests.Response() + resp._content = b'{"reorder": true, "order": [1, 2, 3]}' + resp.status_code = 200 + return resp + + m.add_matcher(custom_matcher) order = [1, 2, 3] + discussions = self.course.reorder_pinned_topics(order=order) + self.assertTrue(discussions) + def test_reorder_pinned_topics_tuple(self, m): + register_uris({'course': ['reorder_pinned_topics']}, m) + + order = (1, 2, 3) discussions = self.course.reorder_pinned_topics(order=order) self.assertTrue(discussions) - def test_reorder_pinned_topics_no_list(self, m): - register_uris({'course': ['reorder_pinned_topics_no_list']}, m) + def test_reorder_pinned_topics_comma_separated_string(self, m): + register_uris({'course': ['reorder_pinned_topics']}, m) - order = "1, 2, 3" + order = "1,2,3" + discussions = self.course.reorder_pinned_topics(order=order) + self.assertTrue(discussions) + def test_reorder_pinned_topics_invalid_input(self, m): + order = "invalid string" with self.assertRaises(ValueError): self.course.reorder_pinned_topics(order=order) diff --git a/tests/test_group.py b/tests/test_group.py index f32a529..0ab3687 100644 --- a/tests/test_group.py +++ b/tests/test_group.py @@ -2,7 +2,9 @@ from __future__ import absolute_import, division, print_function, unicode_litera import unittest import uuid +import requests import requests_mock +from six.moves.urllib.parse import quote from canvasapi import Canvas from canvasapi.group import Group, GroupMembership, GroupCategory @@ -249,18 +251,37 @@ class TestGroup(unittest.TestCase): # reorder_pinned_topics() def test_reorder_pinned_topics(self, m): - register_uris({'group': ['reorder_pinned_topics']}, m) + # Custom matcher to test that params are set correctly + def custom_matcher(request): + match_text = '1,2,3' + if request.text == 'order={}'.format(quote(match_text)): + resp = requests.Response() + resp._content = b'{"reorder": true, "order": [1, 2, 3]}' + resp.status_code = 200 + return resp + + m.add_matcher(custom_matcher) order = [1, 2, 3] + discussions = self.group.reorder_pinned_topics(order=order) + self.assertTrue(discussions) + def test_reorder_pinned_topics_tuple(self, m): + register_uris({'group': ['reorder_pinned_topics']}, m) + + order = (1, 2, 3) discussions = self.group.reorder_pinned_topics(order=order) self.assertTrue(discussions) - def test_reorder_pinned_topics_no_list(self, m): - register_uris({'group': ['reorder_pinned_topics_no_list']}, m) + def test_reorder_pinned_topics_comma_separated_string(self, m): + register_uris({'group': ['reorder_pinned_topics']}, m) - order = "1, 2, 3" + order = "1,2,3" + discussions = self.group.reorder_pinned_topics(order=order) + self.assertTrue(discussions) + def test_reorder_pinned_topics_invalid_input(self, m): + order = "invalid string" with self.assertRaises(ValueError): self.group.reorder_pinned_topics(order=order)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt", "dev_requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 -e git+https://github.com/ucfopen/canvasapi.git@7b7ec3c76028bc0ae186cb870deaadaf4da5e7a6#egg=canvasapi certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 docutils==0.17.1 flake8==5.0.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.2.0 iniconfig==1.1.1 Jinja2==3.0.3 MarkupSafe==2.0.1 mccabe==0.7.0 packaging==21.3 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytz==2025.2 requests==2.27.1 requests-mock==1.12.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==4.3.2 sphinx-rtd-theme==1.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: canvasapi channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - coverage==6.2 - docutils==0.17.1 - flake8==5.0.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - jinja2==3.0.3 - markupsafe==2.0.1 - mccabe==0.7.0 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytz==2025.2 - requests==2.27.1 - requests-mock==1.12.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==4.3.2 - sphinx-rtd-theme==1.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/canvasapi
[ "tests/test_course.py::TestCourse::test_reorder_pinned_topics", "tests/test_course.py::TestCourse::test_reorder_pinned_topics_comma_separated_string", "tests/test_course.py::TestCourse::test_reorder_pinned_topics_tuple", "tests/test_group.py::TestGroup::test_reorder_pinned_topics", "tests/test_group.py::TestGroup::test_reorder_pinned_topics_comma_separated_string", "tests/test_group.py::TestGroup::test_reorder_pinned_topics_tuple" ]
[]
[ "tests/test_course.py::TestCourse::test__str__", "tests/test_course.py::TestCourse::test_conclude", "tests/test_course.py::TestCourse::test_course_files", "tests/test_course.py::TestCourse::test_create_assignment", "tests/test_course.py::TestCourse::test_create_assignment_fail", "tests/test_course.py::TestCourse::test_create_assignment_group", "tests/test_course.py::TestCourse::test_create_course_section", "tests/test_course.py::TestCourse::test_create_discussion_topic", "tests/test_course.py::TestCourse::test_create_external_feed", "tests/test_course.py::TestCourse::test_create_external_tool", "tests/test_course.py::TestCourse::test_create_folder", "tests/test_course.py::TestCourse::test_create_group_category", "tests/test_course.py::TestCourse::test_create_module", "tests/test_course.py::TestCourse::test_create_module_fail", "tests/test_course.py::TestCourse::test_create_page", "tests/test_course.py::TestCourse::test_create_page_fail", "tests/test_course.py::TestCourse::test_create_quiz", "tests/test_course.py::TestCourse::test_create_quiz_fail", "tests/test_course.py::TestCourse::test_delete", "tests/test_course.py::TestCourse::test_delete_external_feed", "tests/test_course.py::TestCourse::test_edit_front_page", "tests/test_course.py::TestCourse::test_enroll_user", "tests/test_course.py::TestCourse::test_get_assignment", "tests/test_course.py::TestCourse::test_get_assignment_group", "tests/test_course.py::TestCourse::test_get_assignments", "tests/test_course.py::TestCourse::test_get_course_level_assignment_data", "tests/test_course.py::TestCourse::test_get_course_level_participation_data", "tests/test_course.py::TestCourse::test_get_course_level_student_summary_data", "tests/test_course.py::TestCourse::test_get_discussion_topic", "tests/test_course.py::TestCourse::test_get_discussion_topics", "tests/test_course.py::TestCourse::test_get_enrollments", "tests/test_course.py::TestCourse::test_get_external_tool", "tests/test_course.py::TestCourse::test_get_external_tools", "tests/test_course.py::TestCourse::test_get_file", "tests/test_course.py::TestCourse::test_get_folder", "tests/test_course.py::TestCourse::test_get_full_discussion_topic", "tests/test_course.py::TestCourse::test_get_module", "tests/test_course.py::TestCourse::test_get_modules", "tests/test_course.py::TestCourse::test_get_page", "tests/test_course.py::TestCourse::test_get_pages", "tests/test_course.py::TestCourse::test_get_quiz", "tests/test_course.py::TestCourse::test_get_quiz_fail", "tests/test_course.py::TestCourse::test_get_quizzes", "tests/test_course.py::TestCourse::test_get_recent_students", "tests/test_course.py::TestCourse::test_get_section", "tests/test_course.py::TestCourse::test_get_settings", "tests/test_course.py::TestCourse::test_get_submission", "tests/test_course.py::TestCourse::test_get_user", "tests/test_course.py::TestCourse::test_get_user_id_type", "tests/test_course.py::TestCourse::test_get_user_in_a_course_level_assignment_data", "tests/test_course.py::TestCourse::test_get_user_in_a_course_level_messaging_data", "tests/test_course.py::TestCourse::test_get_user_in_a_course_level_participation_data", "tests/test_course.py::TestCourse::test_get_users", "tests/test_course.py::TestCourse::test_list_assignment_groups", "tests/test_course.py::TestCourse::test_list_external_feeds", "tests/test_course.py::TestCourse::test_list_folders", "tests/test_course.py::TestCourse::test_list_gradeable_students", "tests/test_course.py::TestCourse::test_list_group_categories", "tests/test_course.py::TestCourse::test_list_groups", "tests/test_course.py::TestCourse::test_list_multiple_submissions", "tests/test_course.py::TestCourse::test_list_multiple_submissions_grouped_param", "tests/test_course.py::TestCourse::test_list_sections", "tests/test_course.py::TestCourse::test_list_submissions", "tests/test_course.py::TestCourse::test_list_tabs", "tests/test_course.py::TestCourse::test_mark_submission_as_read", "tests/test_course.py::TestCourse::test_mark_submission_as_unread", "tests/test_course.py::TestCourse::test_preview_html", "tests/test_course.py::TestCourse::test_reorder_pinned_topics_invalid_input", "tests/test_course.py::TestCourse::test_reset", "tests/test_course.py::TestCourse::test_show_front_page", "tests/test_course.py::TestCourse::test_subit_assignment_fail", "tests/test_course.py::TestCourse::test_submit_assignment", "tests/test_course.py::TestCourse::test_update", "tests/test_course.py::TestCourse::test_update_settings", "tests/test_course.py::TestCourse::test_update_submission", "tests/test_course.py::TestCourse::test_update_tab", "tests/test_course.py::TestCourse::test_upload", "tests/test_course.py::TestCourseNickname::test__str__", "tests/test_course.py::TestCourseNickname::test_remove", "tests/test_group.py::TestGroup::test__str__", "tests/test_group.py::TestGroup::test_create_discussion_topic", "tests/test_group.py::TestGroup::test_create_external_feed", "tests/test_group.py::TestGroup::test_create_folder", "tests/test_group.py::TestGroup::test_create_membership", "tests/test_group.py::TestGroup::test_create_page", "tests/test_group.py::TestGroup::test_create_page_fail", "tests/test_group.py::TestGroup::test_delete", "tests/test_group.py::TestGroup::test_delete_external_feed", "tests/test_group.py::TestGroup::test_edit", "tests/test_group.py::TestGroup::test_edit_front_page", "tests/test_group.py::TestGroup::test_get_activity_stream_summary", "tests/test_group.py::TestGroup::test_get_discussion_topic", "tests/test_group.py::TestGroup::test_get_discussion_topics", "tests/test_group.py::TestGroup::test_get_file", "tests/test_group.py::TestGroup::test_get_folder", "tests/test_group.py::TestGroup::test_get_full_discussion_topic", "tests/test_group.py::TestGroup::test_get_membership", "tests/test_group.py::TestGroup::test_get_page", "tests/test_group.py::TestGroup::test_get_pages", "tests/test_group.py::TestGroup::test_group_files", "tests/test_group.py::TestGroup::test_invite", "tests/test_group.py::TestGroup::test_list_external_feeds", "tests/test_group.py::TestGroup::test_list_folders", "tests/test_group.py::TestGroup::test_list_memberships", "tests/test_group.py::TestGroup::test_list_tabs", "tests/test_group.py::TestGroup::test_list_users", "tests/test_group.py::TestGroup::test_preview_processed_html", "tests/test_group.py::TestGroup::test_remove_user", "tests/test_group.py::TestGroup::test_reorder_pinned_topics_invalid_input", "tests/test_group.py::TestGroup::test_show_front_page", "tests/test_group.py::TestGroup::test_update_membership", "tests/test_group.py::TestGroup::test_upload", "tests/test_group.py::TestGroupMembership::test__str__", "tests/test_group.py::TestGroupMembership::test_remove_self", "tests/test_group.py::TestGroupMembership::test_remove_user", "tests/test_group.py::TestGroupMembership::test_update", "tests/test_group.py::TestGroupCategory::test__str__", "tests/test_group.py::TestGroupCategory::test_assign_members", "tests/test_group.py::TestGroupCategory::test_create_group", "tests/test_group.py::TestGroupCategory::test_delete_category", "tests/test_group.py::TestGroupCategory::test_list_groups", "tests/test_group.py::TestGroupCategory::test_list_users", "tests/test_group.py::TestGroupCategory::test_update" ]
[]
MIT License
1,732
[ "canvasapi/course.py", "canvasapi/group.py" ]
[ "canvasapi/course.py", "canvasapi/group.py" ]
frictionlessdata__goodtables-py-224
fa0cc77e216a4cabdc581e09ab0a398c40d0ae37
2017-10-05 15:12:08
fa0cc77e216a4cabdc581e09ab0a398c40d0ae37
diff --git a/goodtables/validate.py b/goodtables/validate.py index 19e85e4..f5d72f2 100644 --- a/goodtables/validate.py +++ b/goodtables/validate.py @@ -28,6 +28,15 @@ def validate(source, **options): settings['custom_checks'] = options.pop('custom_checks', None) settings = {key: value for key, value in settings.items() if value is not None} + # Support for pathlib.Path + if hasattr(source, 'joinpath'): + source = str(source) + if isinstance(source, list): + if source and isinstance(source[0], dict) and 'source' in source[0]: + for index, item in enumerate(source): + if hasattr(item['source'], 'joinpath'): + source[index]['source'] = str(item['source']) + # Extract/infer preset preset = options.pop('preset', None) if preset is None:
Posixpath support Would be great if `goodtables` supported a [pathlib](https://docs.python.org/3/library/pathlib.html) `Posixpath` ... Not sure, maybe it's only `stream.open` in `inspector.py`?
frictionlessdata/goodtables-py
diff --git a/tests/test_validate.py b/tests/test_validate.py index f1a78e6..7ae59e4 100644 --- a/tests/test_validate.py +++ b/tests/test_validate.py @@ -4,8 +4,10 @@ from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals +import sys import json import pytest +from importlib import import_module from goodtables import validate @@ -123,6 +125,24 @@ def test_validate_datapackage_dialect_header_false(log): assert log(report) == [] +# Source as pathlib.Path + [email protected](sys.version_info < (3, 4), reason='not supported') +def test_source_pathlib_path_table(): + pathlib = import_module('pathlib') + report = validate(pathlib.Path('data/valid.csv')) + assert report['table-count'] == 1 + assert report['valid'] + + [email protected](sys.version_info < (3, 4), reason='not supported') +def test_source_pathlib_path_datapackage(): + pathlib = import_module('pathlib') + report = validate(pathlib.Path('data/datapackages/valid/datapackage.json')) + assert report['table-count'] == 2 + assert report['valid'] + + # Issues def test_composite_primary_key_unique_issue_215(log):
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[develop,ods]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-mock", "mock", "pyyaml" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work boto3==1.23.10 botocore==1.26.10 cached-property==1.5.2 certifi==2021.5.30 chardet==5.0.0 charset-normalizer==2.0.12 click==6.7 coverage==6.2 dataflows-tabulator==1.54.3 datapackage==1.15.4 distlib==0.3.9 docutils==0.18.1 et-xmlfile==1.1.0 ezodf==0.3.2 filelock==3.4.1 -e git+https://github.com/frictionlessdata/goodtables-py.git@fa0cc77e216a4cabdc581e09ab0a398c40d0ae37#egg=goodtables greenlet==2.0.2 idna==3.10 ijson==3.3.0 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work isodate==0.6.1 jmespath==0.10.0 jsonlines==3.1.0 jsonpointer==2.3 jsonschema==3.2.0 linear-tsv==1.1.0 lxml==3.8.0 mccabe==0.7.0 mock==5.2.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work openpyxl==3.1.3 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work platformdirs==2.4.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pycodestyle==2.10.0 pydocstyle==6.3.0 pyflakes==3.0.1 pylama==7.7.1 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pyrsistent==0.18.0 pytest==6.2.4 pytest-cov==4.0.0 pytest-mock==3.6.1 python-dateutil==2.9.0.post0 PyYAML==6.0.1 requests==2.27.1 rfc3986==1.5.0 s3transfer==0.5.2 simpleeval==0.9.13 six==1.17.0 snowballstemmer==2.2.0 SQLAlchemy==1.4.54 statistics==1.0.3.5 tableschema==1.21.0 tabulator==1.53.5 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 tox==3.28.0 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work unicodecsv==0.14.1 urllib3==1.26.20 virtualenv==20.17.1 xlrd==2.0.1 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: goodtables-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - boto3==1.23.10 - botocore==1.26.10 - cached-property==1.5.2 - chardet==5.0.0 - charset-normalizer==2.0.12 - click==6.7 - coverage==6.2 - dataflows-tabulator==1.54.3 - datapackage==1.15.4 - distlib==0.3.9 - docutils==0.18.1 - et-xmlfile==1.1.0 - ezodf==0.3.2 - filelock==3.4.1 - greenlet==2.0.2 - idna==3.10 - ijson==3.3.0 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - isodate==0.6.1 - jmespath==0.10.0 - jsonlines==3.1.0 - jsonpointer==2.3 - jsonschema==3.2.0 - linear-tsv==1.1.0 - lxml==3.8.0 - mccabe==0.7.0 - mock==5.2.0 - openpyxl==3.1.3 - platformdirs==2.4.0 - pycodestyle==2.10.0 - pydocstyle==6.3.0 - pyflakes==3.0.1 - pylama==7.7.1 - pyrsistent==0.18.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - python-dateutil==2.9.0.post0 - pyyaml==6.0.1 - requests==2.27.1 - rfc3986==1.5.0 - s3transfer==0.5.2 - simpleeval==0.9.13 - six==1.17.0 - snowballstemmer==2.2.0 - sqlalchemy==1.4.54 - statistics==1.0.3.5 - tableschema==1.21.0 - tabulator==1.53.5 - tomli==1.2.3 - tox==3.28.0 - unicodecsv==0.14.1 - urllib3==1.26.20 - virtualenv==20.17.1 - xlrd==2.0.1 prefix: /opt/conda/envs/goodtables-py
[ "tests/test_validate.py::test_source_pathlib_path_datapackage" ]
[]
[ "tests/test_validate.py::test_validate_infer_table", "tests/test_validate.py::test_validate_infer_datapackage_path", "tests/test_validate.py::test_validate_infer_datapackage_dict", "tests/test_validate.py::test_validate_infer_nested", "tests/test_validate.py::test_validate_report_scheme_format_encoding", "tests/test_validate.py::test_validate_report_schema", "tests/test_validate.py::test_validate_report_schema_infer_schema", "tests/test_validate.py::test_validate_nested_checks", "tests/test_validate.py::test_validate_invalid_table_schema", "tests/test_validate.py::test_validate_datapackage_dialect_header_false", "tests/test_validate.py::test_source_pathlib_path_table", "tests/test_validate.py::test_composite_primary_key_unique_issue_215", "tests/test_validate.py::test_composite_primary_key_not_unique_issue_215", "tests/test_validate.py::test_validate_infer_fields_issue_223" ]
[]
MIT License
1,734
[ "goodtables/validate.py" ]
[ "goodtables/validate.py" ]
dpkp__kafka-python-1239
cec1bdc9965b3d6729d4415e31b4dac04d603873
2017-10-05 17:16:48
618c5051493693c1305aa9f08e8a0583d5fcf0e3
diff --git a/kafka/consumer/fetcher.py b/kafka/consumer/fetcher.py index b86c8ec..f552038 100644 --- a/kafka/consumer/fetcher.py +++ b/kafka/consumer/fetcher.py @@ -923,12 +923,17 @@ class Fetcher(six.Iterator): self._sensors.fetch_throttle_time_sensor.record(response.throttle_time_ms) self._sensors.fetch_latency.record((recv_time - send_time) * 1000) - class PartitionRecords(six.Iterator): + class PartitionRecords(object): def __init__(self, fetch_offset, tp, messages): self.fetch_offset = fetch_offset self.topic_partition = tp self.messages = messages - self.message_idx = 0 + # When fetching an offset that is in the middle of a + # compressed batch, we will get all messages in the batch. + # But we want to start 'take' at the fetch_offset + for i, msg in enumerate(messages): + if msg.offset == fetch_offset: + self.message_idx = i def discard(self): self.messages = None
Seek method returning incorrect messages on compressed topic when using max_poll_records While using seek method of `kafka.consumer.group.seek' for a given partition, offset, we are seeing the inconsistent behavior for the messages returned with the subsequent poll method. The issue is easily reproducible for the given topic (compacted). Part of Workflow: ``` from kafka.consumer.group import KafkaConsumer topic_partition = TopicPartition(topic, 0) consumer = KafkaConsumer(*consumer_config) consumer.assign([topic_partition]) start_offset = 100 # Example value: highwatermark - 10 consumer.seek(partition=topic_partition, offset=start_offset) messages = consumer.poll(timeout_ms=1000, max_records=1)[topic_partition] message = messages[0] print('Offset found:', message.offset, 'Expected offset:', start_offset) Sample Output: $ Offset found:80 Expected offset:100 ``` Observation: * If iterator interface is used instead of poll interface, the issue no longer exists. My guess is somewhere while polling for messages, the fetched offsets are not updated or fetched messages are not skipped. It looks like iterator method is not using fetched_records api that's why it works fine. * At times it does give correct messages (especially when given offset is closer to highwatermark) Please let me know if any other details are required.
dpkp/kafka-python
diff --git a/test/test_fetcher.py b/test/test_fetcher.py index 64eec1b..86d154f 100644 --- a/test/test_fetcher.py +++ b/test/test_fetcher.py @@ -7,7 +7,7 @@ import itertools from collections import OrderedDict from kafka.client_async import KafkaClient -from kafka.consumer.fetcher import Fetcher, NoOffsetForPartitionError +from kafka.consumer.fetcher import ConsumerRecord, Fetcher, NoOffsetForPartitionError from kafka.consumer.subscription_state import SubscriptionState from kafka.metrics import Metrics from kafka.protocol.fetch import FetchRequest @@ -282,3 +282,26 @@ def test__handle_offset_response(fetcher, mocker): fetcher._handle_offset_response(fut, res) assert fut.failed() assert isinstance(fut.exception, NotLeaderForPartitionError) + + +def test_partition_records_offset(): + """Test that compressed messagesets are handle correctly + when fetch offset is in the middle of the message list + """ + batch_start = 120 + batch_end = 130 + fetch_offset = 123 + tp = TopicPartition('foo', 0) + messages = [ConsumerRecord(tp.topic, tp.partition, i, + None, None, 'key', 'value', 'checksum', 0, 0) + for i in range(batch_start, batch_end)] + records = Fetcher.PartitionRecords(fetch_offset, None, messages) + assert records.has_more() + msgs = records.take(1) + assert msgs[0].offset == 123 + assert records.fetch_offset == 124 + msgs = records.take(2) + assert len(msgs) == 2 + assert records.has_more() + records.discard() + assert not records.has_more()
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
1.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-catchlog", "pytest-sugar", "pytest-mock", "mock", "python-snappy", "lz4", "xxhash" ], "pre_install": [ "apt-get update", "apt-get install -y libsnappy-dev" ], "python": "3.6", "reqs_path": [ "docs/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 cramjam==2.5.0 docutils==0.18.1 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 Jinja2==3.0.3 -e git+https://github.com/dpkp/kafka-python.git@cec1bdc9965b3d6729d4415e31b4dac04d603873#egg=kafka_python lz4==3.1.10 MarkupSafe==2.0.1 mock==5.2.0 packaging==21.3 pluggy==1.0.0 pockets==0.9.1 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-catchlog==1.2.2 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-sugar==0.9.6 python-snappy==0.7.3 pytz==2025.2 requests==2.27.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-napoleon==0.7 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 termcolor==1.1.0 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 xxhash==3.2.0 zipp==3.6.0
name: kafka-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - coverage==6.2 - cramjam==2.5.0 - docutils==0.18.1 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jinja2==3.0.3 - lz4==3.1.10 - markupsafe==2.0.1 - mock==5.2.0 - packaging==21.3 - pluggy==1.0.0 - pockets==0.9.1 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-catchlog==1.2.2 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-sugar==0.9.6 - python-snappy==0.7.3 - pytz==2025.2 - requests==2.27.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-napoleon==0.7 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - termcolor==1.1.0 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - xxhash==3.2.0 - zipp==3.6.0 prefix: /opt/conda/envs/kafka-python
[ "test/test_fetcher.py::test_partition_records_offset" ]
[]
[ "test/test_fetcher.py::test_send_fetches", "test/test_fetcher.py::test_create_fetch_requests[api_version0-3]", "test/test_fetcher.py::test_create_fetch_requests[api_version1-2]", "test/test_fetcher.py::test_create_fetch_requests[api_version2-1]", "test/test_fetcher.py::test_create_fetch_requests[api_version3-0]", "test/test_fetcher.py::test_update_fetch_positions", "test/test_fetcher.py::test__reset_offset", "test/test_fetcher.py::test__send_offset_requests", "test/test_fetcher.py::test__send_offset_requests_multiple_nodes", "test/test_fetcher.py::test__handle_offset_response" ]
[]
Apache License 2.0
1,735
[ "kafka/consumer/fetcher.py" ]
[ "kafka/consumer/fetcher.py" ]
CORE-GATECH-GROUP__serpent-tools-24
4b93cd86c6149b94960984892dad25de6fbbb41f
2017-10-05 20:12:21
224ef748f519903554f346d48071e58b43dcf902
drewejohnson: Reopened to squash and merge
diff --git a/serpentTools/__init__.py b/serpentTools/__init__.py index fdaf895..8d07887 100644 --- a/serpentTools/__init__.py +++ b/serpentTools/__init__.py @@ -3,10 +3,6 @@ from serpentTools import parsers # List TODOS/feature requests here for now -# Messages/Errors -# TODO: Add verbosity control -# TODO: Add specific exceptions and warnings -# TODO: Add logging module to help with warnings/exceptions/info # Compatability # TODO: Python 2 support # TODO: Test compatability with earlier numpy releases @@ -14,9 +10,10 @@ from serpentTools import parsers # TODO: Update rc with dictionary # TODO: Update rc with yaml file into dictionary # TODO: Capture materials with underscores for depletion -# TODO: Better version string management from ._version import get_versions __version__ = get_versions()['version'] del get_versions + +settings.messages.info('Using version {}'.format(__version__)) diff --git a/serpentTools/parsers/depletion.py b/serpentTools/parsers/depletion.py index 002f516..6c2ff82 100644 --- a/serpentTools/parsers/depletion.py +++ b/serpentTools/parsers/depletion.py @@ -8,6 +8,8 @@ from drewtils.parsers import KeywordParser from serpentTools.objects.readers import MaterialReader from serpentTools.objects.materials import DepletedMaterial +from serpentTools.settings import messages + class DepletionReader(MaterialReader): """Parser responsible for reading and working with depletion files. @@ -60,10 +62,13 @@ class DepletionReader(MaterialReader): """Return the patterns by which to find the requested materials.""" patterns = self.settings['materials'] or ['.*'] # match all materials if nothing given + if any(['_' in pat for pat in patterns]): + messages.warning('Materials with underscores are not supported.') return [re.compile(mat) for mat in patterns] def read(self): """Read through the depletion file and store requested data.""" + messages.info('Preparing to read {}'.format(self.filePath)) keys = ['MAT', 'TOT'] if self.settings['processTotal'] else ['MAT'] keys.extend(self.settings['metadataKeys']) separators = ['\n', '];'] @@ -74,6 +79,8 @@ class DepletionReader(MaterialReader): elif (('TOT' in chunk[0] and self.settings['processTotal']) or 'MAT' in chunk[0]): self._addMaterial(chunk) + messages.info('Done reading depletion file') + messages.debug(' found {} materials'.format(len(self.materials))) def _addMetadata(self, chunk): options = {'ZAI': 'zai', 'NAMES': 'names', 'DAYS': 'days', diff --git a/serpentTools/settings/__init__.py b/serpentTools/settings/__init__.py index 2223fc0..09f5fdb 100644 --- a/serpentTools/settings/__init__.py +++ b/serpentTools/settings/__init__.py @@ -1,4 +1,5 @@ """Settings to yield control to the user.""" +from serpentTools.settings import messages defaultSettings = { 'depletion.metadataKeys': { @@ -23,6 +24,13 @@ defaultSettings = { 'default': True, 'description': 'Option to store the depletion data from the TOT block', 'type': bool + }, + 'verbosity': { + 'default': 'warning', + 'options': messages.LOG_OPTS, + 'type': str, + 'description': 'Set the level of errors to be shown.', + 'updater': messages.updateLevel } } @@ -30,12 +38,13 @@ defaultSettings = { class DefaultSetting(object): """Store a single setting.""" - def __init__(self, name, default, varType, description, options): + def __init__(self, name, default, varType, description, options, updater): self.name = name self.description = description self.default = default self.type = varType self.options = options + self.updater = updater def __repr__(self): return '<DefaultSetting {}: value: {}>'.format(self.name, self.default) @@ -82,7 +91,8 @@ class DefaultSettingsLoader(dict): dict.__init__(self, self._load()) self.__locked__ = True - def _load(self): + @staticmethod + def _load(): """Load the default setting objects.""" defaults = {} for name, value in defaultSettings.items(): @@ -95,7 +105,8 @@ class DefaultSettingsLoader(dict): 'default': value['default'], 'varType': value['type'], 'options': options, - 'description': value['description'] + 'description': value['description'], + 'updater': value.get('updater', None) } defaults[name] = DefaultSetting(**settingsOptions) return defaults @@ -163,14 +174,17 @@ class UserSettingsLoader(dict): raise KeyError('Setting {} does not exist'.format(name)) self._defaultLoader[name].validate(value) # if we've made it here, then the value is valid + if self._defaultLoader[name].updater is not None: + value = self._defaultLoader[name].updater(value) dict.__setitem__(self, name, value) + messages.debug('Updated setting {} to {}'.format(name, value)) def getReaderSettings(self, readerName): """Get all module-wide and reader-specific settings. Parameters ---------- - readerLevel: str + readerName: str Name of the specific reader. Will look for settings with lead with ``readerName``, e.g. ``depletion.metadataKeys`` diff --git a/serpentTools/settings/messages.py b/serpentTools/settings/messages.py new file mode 100644 index 0000000..a0fa0b0 --- /dev/null +++ b/serpentTools/settings/messages.py @@ -0,0 +1,84 @@ +""" +System-wide methods for producing status update and errors. + +See Also +-------- +https://docs.python.org/2/library/logging.html +https://www.python.org/dev/peps/pep-0391/ +http://docs.python-guide.org/en/latest/writing/logging/ +https://docs.python.org/2/howto/logging-cookbook.html#logging-cookbook +https://fangpenlin.com/posts/2012/08/26/good-logging-practice-in-python/ +""" + + +import logging +from logging.config import dictConfig + + +class SerpentToolsException(Exception): + """Base-class for all exceptions in this project""" + pass + + +LOG_OPTS = ['critical', 'error', 'warning', 'info', 'debug'] + + +loggingConfig = { + 'version': 1, + 'formatters': { + 'brief': {'format': '%(levelname)-8s: %(name)-15s: %(message)s'}, + 'precise': { + 'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s' + } + }, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'formatter': 'brief', + 'level': logging.DEBUG, + 'stream': 'ext://sys.stdout' + } + }, + 'root': { + 'handlers': ['console'], + 'level': logging.INFO + } +} + +dictConfig(loggingConfig) + +__logger__ = logging.getLogger('serpentTools') + + +def debug(message): + """Log a debug message.""" + __logger__.debug('%s', message) + + +def info(message): + """Log an info message, e.g. status update.""" + __logger__.info('%s', message) + + +def warning(message): + """Log a warning that something that could go wrong or be avoided.""" + __logger__.warning('%s', message) + + +def error(message, fatal=True): + """Log that something went wrong.""" + if fatal: + __logger__.critical('%s', message, exc_info=True) + raise SerpentToolsException('%s', message) + __logger__.error('%s', message) + + +def updateLevel(level): + """Set the level of the logger.""" + if level.lower() not in LOG_OPTS: + __logger__.setLevel('INFO') + warning('Logger option {} not in options. Set to warning.') + return 'warning' + else: + __logger__.setLevel(level.upper()) + return level diff --git a/setup.py b/setup.py index fdb65d3..5dd24b4 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,8 @@ from setuptools import setup import versioneer +with open('README.md') as readme: + longDesc = readme.read() classifiers = [ 'License :: OSI Approved :: MIT License', @@ -23,6 +25,7 @@ setupArgs = { 'url': 'https://github.com/CORE-GATECH-GROUP/serpent-tools', 'description': ('A suite of parsers designed to make interacting with ' 'SERPENT output files simple, scriptable, and flawless'), + 'long_description': longDesc, 'test_suite': 'serpentTools.tests', 'author': 'Andrew Johnson', 'author_email': '[email protected]',
Feature: Implement a messaging and exception framework Implement a overarching data logger that controls warnings, errors, and debug statements that allows the user to set the verbosity through the `rc` system. Maybe piggyback off of the [logging module](https://docs.python.org/3.6/library/logging.html) Create a `SerpentToolsError` that is the base type for all critical errors thrown during operation. ## Usage ```from serpentTools.settings import rc rc['verbosity'] = 'debug' # print all status updates, errors, and debug statements rc['definitely not a setting'] = 'still not good' # raises SerpentToolsError or some subclass thereof rc['verbosity'] = 'quiet' # print only critical errors, same as `critical` ```
CORE-GATECH-GROUP/serpent-tools
diff --git a/serpentTools/tests/test_loaders.py b/serpentTools/tests/test_loaders.py index f6895e1..3bbd24e 100644 --- a/serpentTools/tests/test_loaders.py +++ b/serpentTools/tests/test_loaders.py @@ -57,7 +57,8 @@ class UserSettingsTester(unittest.TestCase): 'metadataKeys': ['ZAI', 'NAMES', 'DAYS', 'BU'], 'materialVariables': ['ADENS', 'MDENS', 'BURNUP'], 'materials': [], - 'processTotal': True + 'processTotal': True, + 'verbosity': 'warning' } actual = self.loader.getReaderSettings(readerName) self.assertDictEqual(expected, actual)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 4 }
1.00
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "drewtils>=0.1.5" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
contourpy==1.3.0 cycler==0.12.1 drewtils==0.1.9 exceptiongroup==1.2.2 fonttools==4.56.0 importlib_resources==6.5.2 iniconfig==2.1.0 kiwisolver==1.4.7 matplotlib==3.9.4 numpy==2.0.2 packaging==24.2 pillow==11.1.0 pluggy==1.5.0 pyparsing==3.2.3 pytest==8.3.5 python-dateutil==2.9.0.post0 -e git+https://github.com/CORE-GATECH-GROUP/serpent-tools.git@4b93cd86c6149b94960984892dad25de6fbbb41f#egg=serpentTools six==1.17.0 tomli==2.2.1 versioneer==0.29 zipp==3.21.0
name: serpent-tools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - contourpy==1.3.0 - cycler==0.12.1 - drewtils==0.1.9 - exceptiongroup==1.2.2 - fonttools==4.56.0 - importlib-resources==6.5.2 - iniconfig==2.1.0 - kiwisolver==1.4.7 - matplotlib==3.9.4 - numpy==2.0.2 - packaging==24.2 - pillow==11.1.0 - pluggy==1.5.0 - pyparsing==3.2.3 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - six==1.17.0 - tomli==2.2.1 - versioneer==0.29 - zipp==3.21.0 prefix: /opt/conda/envs/serpent-tools
[ "serpentTools/tests/test_loaders.py::UserSettingsTester::test_returnReaderSettings" ]
[]
[ "serpentTools/tests/test_loaders.py::DefaultSettingsTester::test_cannotChangeDefaults", "serpentTools/tests/test_loaders.py::DefaultSettingsTester::test_getDefault", "serpentTools/tests/test_loaders.py::UserSettingsTester::test_failAtBadSetting_options", "serpentTools/tests/test_loaders.py::UserSettingsTester::test_failAtBadSettings_type", "serpentTools/tests/test_loaders.py::UserSettingsTester::test_failAtNonexistentSetting", "serpentTools/tests/test_loaders.py::RCTester::test_readerWithUpdatedSettings" ]
[]
MIT License
1,736
[ "setup.py", "serpentTools/settings/__init__.py", "serpentTools/parsers/depletion.py", "serpentTools/__init__.py", "serpentTools/settings/messages.py" ]
[ "setup.py", "serpentTools/settings/__init__.py", "serpentTools/parsers/depletion.py", "serpentTools/__init__.py", "serpentTools/settings/messages.py" ]
pimutils__khal-720
e4fe38059c109c0d6efdec81c98e4e8abe80b2a2
2017-10-05 20:52:23
79fd6ea2535a7e6b1f7a4fd532a932aafa1e86df
diff --git a/khal/ui/editor.py b/khal/ui/editor.py index 4333004..94cadca 100644 --- a/khal/ui/editor.py +++ b/khal/ui/editor.py @@ -235,7 +235,7 @@ class StartEndEditor(urwid.WidgetWrap): return endval def _end_date_change(self, date): - self._enddt = self.localize_end(dt.datetime.combine(date, self._start_time)) + self._enddt = self.localize_end(dt.datetime.combine(date, self._end_time)) self.on_end_date_change(date) def toggle(self, checkbox, state): @@ -277,25 +277,23 @@ class StartEndEditor(urwid.WidgetWrap): self.widgets.endtime = urwid.Text('') elif state is False: timewidth = self._timewidth + 1 - edit = ValidatedEdit( + raw_start_time_widget = ValidatedEdit( dateformat=self.conf['locale']['timeformat'], EditWidget=TimeWidget, validate=self._validate_start_time, edit_text=self.startdt.strftime(self.conf['locale']['timeformat']), ) - edit = urwid.Padding( - edit, align='left', width=self._timewidth + 1, left=1) - self.widgets.starttime = edit + self.widgets.starttime = urwid.Padding( + raw_start_time_widget, align='left', width=self._timewidth + 1, left=1) - edit = ValidatedEdit( + raw_end_time_widget = ValidatedEdit( dateformat=self.conf['locale']['timeformat'], EditWidget=TimeWidget, validate=self._validate_end_time, edit_text=self.enddt.strftime(self.conf['locale']['timeformat']), ) - edit = urwid.Padding( - edit, align='left', width=self._timewidth + 1, left=1) - self.widgets.endtime = edit + self.widgets.endtime = urwid.Padding( + raw_end_time_widget, align='left', width=self._timewidth + 1, left=1) columns = NPile([ self.checkallday,
Double issue: invalid time range parsing + warning message display Two problems are shown in the attached screenshot: - when entering this event (using only default values, especially the start and end hours 8:00 to 9:00), the time range is not understood by khal - warning messages are displayed (very shortly, less than 1 sec) in the middle of the screen, and are erased as soon as the screen is being refreshed, which prevents the user to see them ![screenshot_20170608_082535](https://user-images.githubusercontent.com/412628/26914970-5213000a-4c24-11e7-86a0-877a53979524.png)
pimutils/khal
diff --git a/tests/ui/test_editor.py b/tests/ui/test_editor.py index 080a00d..27b7fa5 100644 --- a/tests/ui/test_editor.py +++ b/tests/ui/test_editor.py @@ -4,12 +4,21 @@ import icalendar from khal.ui.editor import RecurrenceEditor, StartEndEditor from ..utils import BERLIN, LOCALE_BERLIN +from .canvas_render import CanvasTranslator CONF = {'locale': LOCALE_BERLIN, 'keybindings': {}, 'view': {'monthdisplay': 'firstday'}} START = BERLIN.localize(dt.datetime(2015, 4, 26, 22, 23)) END = BERLIN.localize(dt.datetime(2015, 4, 27, 23, 23)) +palette = { + 'date header focused': 'blue', + 'date header': 'green', + 'default': 'black', + 'editf': 'red', + 'edit': 'blue', +} + def test_popup(monkeypatch): """making sure the popup calendar gets callend with the right inital value @@ -65,3 +74,60 @@ def test_check_understood_rrule(): assert not RecurrenceEditor.check_understood_rrule( icalendar.vRecur.from_ical('FREQ=MONTHLY;BYDAY=TH;BYSETPOS=3') ) + + +def test_editor(): + """test for the issue in #666""" + editor = StartEndEditor( + BERLIN.localize(dt.datetime(2017, 10, 2, 13)), + BERLIN.localize(dt.datetime(2017, 10, 4, 18)), + conf=CONF + ) + assert editor.startdt == BERLIN.localize(dt.datetime(2017, 10, 2, 13)) + assert editor.enddt == BERLIN.localize(dt.datetime(2017, 10, 4, 18)) + assert editor.changed is False + for _ in range(3): + editor.keypress((10, ), 'tab') + for _ in range(3): + editor.keypress((10, ), 'shift tab') + assert editor.startdt == BERLIN.localize(dt.datetime(2017, 10, 2, 13)) + assert editor.enddt == BERLIN.localize(dt.datetime(2017, 10, 4, 18)) + assert editor.changed is False + + +def test_convert_to_date(): + """test for the issue in #666""" + editor = StartEndEditor( + BERLIN.localize(dt.datetime(2017, 10, 2, 13)), + BERLIN.localize(dt.datetime(2017, 10, 4, 18)), + conf=CONF + ) + canvas = editor.render((50, ), True) + assert CanvasTranslator(canvas, palette).transform() == ( + '[ ] Allday\nFrom: \x1b[31m2.10.2017 \x1b[0m \x1b[34m13:00 \x1b[0m\n' + 'To: \x1b[34m04.10.2017\x1b[0m \x1b[34m18:00 \x1b[0m\n' + ) + + assert editor.startdt == BERLIN.localize(dt.datetime(2017, 10, 2, 13)) + assert editor.enddt == BERLIN.localize(dt.datetime(2017, 10, 4, 18)) + assert editor.changed is False + assert editor.allday is False + + # set to all day event + editor.keypress((10, ), 'shift tab') + editor.keypress((10, ), ' ') + for _ in range(3): + editor.keypress((10, ), 'tab') + for _ in range(3): + editor.keypress((10, ), 'shift tab') + + canvas = editor.render((50, ), True) + assert CanvasTranslator(canvas, palette).transform() == ( + '[X] Allday\nFrom: \x1b[34m02.10.2017\x1b[0m \n' + 'To: \x1b[34m04.10.2017\x1b[0m \n' + ) + + assert editor.changed is True + assert editor.allday is True + assert editor.startdt == dt.date(2017, 10, 2) + assert editor.enddt == dt.date(2017, 10, 4)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_media" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "freezegun", "vdirsyncer" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiohappyeyeballs==2.6.1 aiohttp==3.11.14 aiosignal==1.3.2 aiostream==0.4.5 async-timeout==5.0.1 atomicwrites==1.4.1 attrs==25.3.0 certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 click-log==0.4.0 configobj==5.0.9 coverage==7.8.0 exceptiongroup==1.2.2 freezegun==1.5.1 frozenlist==1.5.0 icalendar==6.1.3 idna==3.10 iniconfig==2.1.0 -e git+https://github.com/pimutils/khal.git@e4fe38059c109c0d6efdec81c98e4e8abe80b2a2#egg=khal multidict==6.2.0 packaging==24.2 pluggy==1.5.0 propcache==0.3.1 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 pyxdg==0.28 requests==2.32.3 six==1.17.0 tomli==2.2.1 typing_extensions==4.13.0 tzdata==2025.2 tzlocal==5.3.1 urllib3==2.3.0 urwid==2.6.16 vdirsyncer==0.19.3 wcwidth==0.2.13 yarl==1.18.3
name: khal channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aiohappyeyeballs==2.6.1 - aiohttp==3.11.14 - aiosignal==1.3.2 - aiostream==0.4.5 - async-timeout==5.0.1 - atomicwrites==1.4.1 - attrs==25.3.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - click==8.1.8 - click-log==0.4.0 - configobj==5.0.9 - coverage==7.8.0 - exceptiongroup==1.2.2 - freezegun==1.5.1 - frozenlist==1.5.0 - icalendar==6.1.3 - idna==3.10 - iniconfig==2.1.0 - multidict==6.2.0 - packaging==24.2 - pluggy==1.5.0 - propcache==0.3.1 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyxdg==0.28 - requests==2.32.3 - six==1.17.0 - tomli==2.2.1 - typing-extensions==4.13.0 - tzdata==2025.2 - tzlocal==5.3.1 - urllib3==2.3.0 - urwid==2.6.16 - vdirsyncer==0.19.3 - wcwidth==0.2.13 - yarl==1.18.3 prefix: /opt/conda/envs/khal
[ "tests/ui/test_editor.py::test_editor" ]
[ "tests/ui/test_editor.py::test_popup" ]
[ "tests/ui/test_editor.py::test_check_understood_rrule", "tests/ui/test_editor.py::test_convert_to_date" ]
[]
MIT License
1,737
[ "khal/ui/editor.py" ]
[ "khal/ui/editor.py" ]
oasis-open__cti-python-stix2-71
58f39f80af5cbfe02879c2efa4b3b4ef7a504390
2017-10-06 15:06:42
58f39f80af5cbfe02879c2efa4b3b4ef7a504390
diff --git a/stix2/__init__.py b/stix2/__init__.py index 53c2fb1..7be0904 100644 --- a/stix2/__init__.py +++ b/stix2/__init__.py @@ -8,8 +8,6 @@ from .common import (TLP_AMBER, TLP_GREEN, TLP_RED, TLP_WHITE, CustomMarking, MarkingDefinition, StatementMarking, TLPMarking) from .core import Bundle, _register_type, parse from .environment import Environment, ObjectFactory -from .markings import (add_markings, clear_markings, get_markings, is_marked, - remove_markings, set_markings) from .observables import (URL, AlternateDataStream, ArchiveExt, Artifact, AutonomousSystem, CustomObservable, Directory, DomainName, EmailAddress, EmailMessage, diff --git a/stix2/common.py b/stix2/common.py index d7994c6..a2e6918 100644 --- a/stix2/common.py +++ b/stix2/common.py @@ -3,7 +3,6 @@ from collections import OrderedDict from .base import _STIXBase -from .markings import MarkingsMixin from .properties import (HashesProperty, IDProperty, ListProperty, Property, ReferenceProperty, SelectorProperty, StringProperty, TimestampProperty, TypeProperty) @@ -77,7 +76,7 @@ class MarkingProperty(Property): raise ValueError("must be a Statement, TLP Marking or a registered marking.") -class MarkingDefinition(_STIXBase, MarkingsMixin): +class MarkingDefinition(_STIXBase): _type = 'marking-definition' _properties = OrderedDict() _properties.update([ diff --git a/stix2/markings/__init__.py b/stix2/markings/__init__.py index 41c761d..4f72e4c 100644 --- a/stix2/markings/__init__.py +++ b/stix2/markings/__init__.py @@ -212,16 +212,3 @@ def is_marked(obj, marking=None, selectors=None, inherited=False, descendants=Fa result = result or object_markings.is_marked(obj, object_marks) return result - - -class MarkingsMixin(): - pass - - -# Note that all of these methods will return a new object because of immutability -MarkingsMixin.get_markings = get_markings -MarkingsMixin.set_markings = set_markings -MarkingsMixin.remove_markings = remove_markings -MarkingsMixin.add_markings = add_markings -MarkingsMixin.clear_markings = clear_markings -MarkingsMixin.is_marked = is_marked diff --git a/stix2/markings/granular_markings.py b/stix2/markings/granular_markings.py index 5afd1cc..7e9ccc7 100644 --- a/stix2/markings/granular_markings.py +++ b/stix2/markings/granular_markings.py @@ -88,7 +88,6 @@ def remove_markings(obj, marking, selectors): """ selectors = utils.convert_to_list(selectors) - marking = utils.convert_to_marking_list(marking) utils.validate(obj, selectors) granular_markings = obj.get("granular_markings") @@ -98,9 +97,12 @@ def remove_markings(obj, marking, selectors): granular_markings = utils.expand_markings(granular_markings) - to_remove = [] - for m in marking: - to_remove.append({"marking_ref": m, "selectors": selectors}) + if isinstance(marking, list): + to_remove = [] + for m in marking: + to_remove.append({"marking_ref": m, "selectors": selectors}) + else: + to_remove = [{"marking_ref": marking, "selectors": selectors}] remove = utils.build_granular_marking(to_remove).get("granular_markings") @@ -138,12 +140,14 @@ def add_markings(obj, marking, selectors): """ selectors = utils.convert_to_list(selectors) - marking = utils.convert_to_marking_list(marking) utils.validate(obj, selectors) - granular_marking = [] - for m in marking: - granular_marking.append({"marking_ref": m, "selectors": sorted(selectors)}) + if isinstance(marking, list): + granular_marking = [] + for m in marking: + granular_marking.append({"marking_ref": m, "selectors": sorted(selectors)}) + else: + granular_marking = [{"marking_ref": marking, "selectors": sorted(selectors)}] if obj.get("granular_markings"): granular_marking.extend(obj.get("granular_markings")) @@ -240,7 +244,7 @@ def is_marked(obj, marking=None, selectors=None, inherited=False, descendants=Fa raise TypeError("Required argument 'selectors' must be provided") selectors = utils.convert_to_list(selectors) - marking = utils.convert_to_marking_list(marking) + marking = utils.convert_to_list(marking) utils.validate(obj, selectors) granular_markings = obj.get("granular_markings", []) diff --git a/stix2/markings/object_markings.py b/stix2/markings/object_markings.py index a775ddc..c39c036 100644 --- a/stix2/markings/object_markings.py +++ b/stix2/markings/object_markings.py @@ -31,7 +31,7 @@ def add_markings(obj, marking): A new version of the given SDO or SRO with specified markings added. """ - marking = utils.convert_to_marking_list(marking) + marking = utils.convert_to_list(marking) object_markings = set(obj.get("object_marking_refs", []) + marking) @@ -55,7 +55,7 @@ def remove_markings(obj, marking): A new version of the given SDO or SRO with specified markings removed. """ - marking = utils.convert_to_marking_list(marking) + marking = utils.convert_to_list(marking) object_markings = obj.get("object_marking_refs", []) @@ -121,7 +121,7 @@ def is_marked(obj, marking=None): provided marking refs match, True is returned. """ - marking = utils.convert_to_marking_list(marking) + marking = utils.convert_to_list(marking) object_markings = obj.get("object_marking_refs", []) if marking: diff --git a/stix2/markings/utils.py b/stix2/markings/utils.py index 1154d19..d0d38bb 100644 --- a/stix2/markings/utils.py +++ b/stix2/markings/utils.py @@ -37,12 +37,6 @@ def _validate_selector(obj, selector): return True -def _get_marking_id(marking): - if type(marking).__name__ is 'MarkingDefinition': # avoid circular import - return marking.id - return marking - - def validate(obj, selectors): """Given an SDO or SRO, check that each selector is valid.""" if selectors: @@ -63,15 +57,6 @@ def convert_to_list(data): return [data] -def convert_to_marking_list(data): - """Convert input into a list of marking identifiers.""" - if data is not None: - if isinstance(data, list): - return [_get_marking_id(x) for x in data] - else: - return [_get_marking_id(data)] - - def compress_markings(granular_markings): """ Compress granular markings list. If there is more than one marking diff --git a/stix2/patterns.py b/stix2/patterns.py index 03b7657..7858146 100644 --- a/stix2/patterns.py +++ b/stix2/patterns.py @@ -24,7 +24,7 @@ class TimestampConstant(_Constant): self.value = value def __str__(self): - return "t'%s'" % escape_quotes_and_backslashes(self.value) + return "'%s'" % escape_quotes_and_backslashes(self.value) class IntegerConstant(_Constant): diff --git a/stix2/sdo.py b/stix2/sdo.py index 53f965d..77c781a 100644 --- a/stix2/sdo.py +++ b/stix2/sdo.py @@ -6,7 +6,6 @@ import stix2 from .base import _STIXBase from .common import ExternalReference, GranularMarking, KillChainPhase -from .markings import MarkingsMixin from .observables import ObservableProperty from .properties import (BooleanProperty, IDProperty, IntegerProperty, ListProperty, PatternProperty, ReferenceProperty, @@ -14,11 +13,7 @@ from .properties import (BooleanProperty, IDProperty, IntegerProperty, from .utils import NOW -class STIXDomainObject(_STIXBase, MarkingsMixin): - pass - - -class AttackPattern(STIXDomainObject): +class AttackPattern(_STIXBase): _type = 'attack-pattern' _properties = OrderedDict() @@ -39,7 +34,7 @@ class AttackPattern(STIXDomainObject): ]) -class Campaign(STIXDomainObject): +class Campaign(_STIXBase): _type = 'campaign' _properties = OrderedDict() @@ -63,7 +58,7 @@ class Campaign(STIXDomainObject): ]) -class CourseOfAction(STIXDomainObject): +class CourseOfAction(_STIXBase): _type = 'course-of-action' _properties = OrderedDict() @@ -83,7 +78,7 @@ class CourseOfAction(STIXDomainObject): ]) -class Identity(STIXDomainObject): +class Identity(_STIXBase): _type = 'identity' _properties = OrderedDict() @@ -106,7 +101,7 @@ class Identity(STIXDomainObject): ]) -class Indicator(STIXDomainObject): +class Indicator(_STIXBase): _type = 'indicator' _properties = OrderedDict() @@ -130,7 +125,7 @@ class Indicator(STIXDomainObject): ]) -class IntrusionSet(STIXDomainObject): +class IntrusionSet(_STIXBase): _type = 'intrusion-set' _properties = OrderedDict() @@ -157,7 +152,7 @@ class IntrusionSet(STIXDomainObject): ]) -class Malware(STIXDomainObject): +class Malware(_STIXBase): _type = 'malware' _properties = OrderedDict() @@ -178,7 +173,7 @@ class Malware(STIXDomainObject): ]) -class ObservedData(STIXDomainObject): +class ObservedData(_STIXBase): _type = 'observed-data' _properties = OrderedDict() @@ -200,7 +195,7 @@ class ObservedData(STIXDomainObject): ]) -class Report(STIXDomainObject): +class Report(_STIXBase): _type = 'report' _properties = OrderedDict() @@ -222,7 +217,7 @@ class Report(STIXDomainObject): ]) -class ThreatActor(STIXDomainObject): +class ThreatActor(_STIXBase): _type = 'threat-actor' _properties = OrderedDict() @@ -250,7 +245,7 @@ class ThreatActor(STIXDomainObject): ]) -class Tool(STIXDomainObject): +class Tool(_STIXBase): _type = 'tool' _properties = OrderedDict() @@ -272,7 +267,7 @@ class Tool(STIXDomainObject): ]) -class Vulnerability(STIXDomainObject): +class Vulnerability(_STIXBase): _type = 'vulnerability' _properties = OrderedDict() @@ -321,7 +316,7 @@ def CustomObject(type='x-custom-type', properties=None): def custom_builder(cls): - class _Custom(cls, STIXDomainObject): + class _Custom(cls, _STIXBase): _type = type _properties = OrderedDict() _properties.update([ diff --git a/stix2/sources/filesystem.py b/stix2/sources/filesystem.py index c45f281..d80468f 100644 --- a/stix2/sources/filesystem.py +++ b/stix2/sources/filesystem.py @@ -102,7 +102,7 @@ class FileSystemSink(DataSink): # adding json encoded string of STIX content stix_data = parse(stix_data) if stix_data["type"] == "bundle": - for stix_obj in stix_data: + for stix_obj in stix_data["objects"]: self.add(stix_obj) else: self.add(stix_data) @@ -113,7 +113,7 @@ class FileSystemSink(DataSink): self.add(stix_obj) else: - raise ValueError("stix_data must be a STIX object(or list of, json formatted STIX(or list of) or a json formatted STIX bundle") + raise ValueError("stix_data must be a STIX object(or list of), json formatted STIX(or list of) or a json formatted STIX bundle") class FileSystemSource(DataSource): @@ -159,9 +159,13 @@ class FileSystemSource(DataSource): all_data = self.query(query=query, _composite_filters=_composite_filters) - stix_obj = sorted(all_data, key=lambda k: k['modified'])[0] + if all_data: + stix_obj = sorted(all_data, key=lambda k: k['modified'])[0] + stix_obj = parse(stix_obj) + else: + stix_obj = None - return parse(stix_obj) + return stix_obj def all_versions(self, stix_id, _composite_filters=None): """retrieve STIX object from file directory via STIX ID, all versions diff --git a/stix2/sources/filters.py b/stix2/sources/filters.py index a565006..3b476a3 100644 --- a/stix2/sources/filters.py +++ b/stix2/sources/filters.py @@ -250,9 +250,10 @@ def check_external_references_filter(filter_, stix_obj): for er in stix_obj["external_references"]: # grab er property name from filter field filter_field = filter_.field.split(".")[1] - r = _string_filter(filter_, er[filter_field]) - if r: - return r + if filter_field in er: + r = _string_filter(filter_, er[filter_field]) + if r: + return r return False diff --git a/stix2/sources/taxii.py b/stix2/sources/taxii.py index 63d5226..10ec745 100644 --- a/stix2/sources/taxii.py +++ b/stix2/sources/taxii.py @@ -130,10 +130,11 @@ class TAXIICollectionSource(DataSource): if len(stix_obj): stix_obj = stix_obj[0] + stix_obj = parse(stix_obj) else: stix_obj = None - return parse(stix_obj) + return stix_obj def all_versions(self, stix_id, _composite_filters=None): """retrieve STIX object from local/remote TAXII Collection diff --git a/stix2/sro.py b/stix2/sro.py index 4fa0465..af483bc 100644 --- a/stix2/sro.py +++ b/stix2/sro.py @@ -4,18 +4,13 @@ from collections import OrderedDict from .base import _STIXBase from .common import ExternalReference, GranularMarking -from .markings import MarkingsMixin from .properties import (BooleanProperty, IDProperty, IntegerProperty, ListProperty, ReferenceProperty, StringProperty, TimestampProperty, TypeProperty) from .utils import NOW -class STIXRelationshipObject(_STIXBase, MarkingsMixin): - pass - - -class Relationship(STIXRelationshipObject): +class Relationship(_STIXBase): _type = 'relationship' _properties = OrderedDict() @@ -50,7 +45,7 @@ class Relationship(STIXRelationshipObject): super(Relationship, self).__init__(**kwargs) -class Sighting(STIXRelationshipObject): +class Sighting(_STIXBase): _type = 'sighting' _properties = OrderedDict() _properties.update([
Add tests for FileSystem Data Stores, Sources, Sinks Specifically for adding/pushing STIX objects to these DataStores/Sinks as they can now be supplied with STIX content of a variety of forms
oasis-open/cti-python-stix2
diff --git a/stix2/test/stix2_data/attack-pattern/attack-pattern--0a3ead4e-6d47-4ccb-854c-a6a4f9d96b22.json b/stix2/test/stix2_data/attack-pattern/attack-pattern--0a3ead4e-6d47-4ccb-854c-a6a4f9d96b22.json new file mode 100755 index 0000000..47dd5f8 --- /dev/null +++ b/stix2/test/stix2_data/attack-pattern/attack-pattern--0a3ead4e-6d47-4ccb-854c-a6a4f9d96b22.json @@ -0,0 +1,42 @@ +{ + "id": "bundle--f68640b4-0cdc-42ae-b176-def1754a1ea0", + "objects": [ + { + "created": "2017-05-31T21:30:19.73501Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Credential dumping is the process of obtaining account login and password information from the operating system and software. Credentials can be used to perform Windows Credential Editor, Mimikatz, and gsecdump. These tools are in use by both professional security testers and adversaries.\n\nPlaintext passwords can be obtained using tools such as Mimikatz to extract passwords stored by the Local Security Authority (LSA). If smart cards are used to authenticate to a domain using a personal identification number (PIN), then that PIN is also cached as a result and may be dumped.Mimikatz access the LSA Subsystem Service (LSASS) process by opening the process, locating the LSA secrets key, and decrypting the sections in memory where credential details are stored. Credential dumpers may also use methods for reflective DLL Injection to reduce potential indicators of malicious activity.\n\nNTLM hash dumpers open the Security Accounts Manager (SAM) on the local file system (%SystemRoot%/system32/config/SAM) or create a dump of the Registry SAM key to access stored account password hashes. Some hash dumpers will open the local file system as a device and parse to the SAM table to avoid file access defenses. Others will make an in-memory copy of the SAM table before reading hashes. Detection of compromised Legitimate Credentials in-use by adversaries may help as well. \n\nOn Windows 8.1 and Windows Server 2012 R2, monitor Windows Logs for LSASS.exe creation to verify that LSASS started as a protected process.\n\nMonitor processes and command-line arguments for program execution that may be indicative of credential dumping. Remote access tools may contain built-in features or incorporate existing tools like Mimikatz. PowerShell scripts also exist that contain credential dumping functionality, such as PowerSploit's Invoke-Mimikatz module,[[Citation: Powersploit]] which may require additional logging features to be configured in the operating system to collect necessary information for analysis.\n\nPlatforms: Windows Server 2003, Windows Server 2008, Windows Server 2012, Windows XP, Windows 7, Windows 8, Windows Server 2003 R2, Windows Server 2008 R2, Windows Server 2012 R2, Windows Vista, Windows 8.1\n\nData Sources: API monitoring, Process command-line parameters, Process monitoring, PowerShell logs", + "external_references": [ + { + "external_id": "T1003", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Technique/T1003" + }, + { + "description": "Delpy, B. (2014, September 14). Mimikatz module ~ sekurlsa. Retrieved January 10, 2016.", + "source_name": "Github Mimikatz Module sekurlsa", + "url": "https://github.com/gentilkiwi/mimikatz/wiki/module-~-sekurlsa" + }, + { + "description": "PowerSploit. (n.d.). Retrieved December 4, 2014.", + "source_name": "Powersploit", + "url": "https://github.com/mattifestation/PowerSploit" + } + ], + "id": "attack-pattern--0a3ead4e-6d47-4ccb-854c-a6a4f9d96b22", + "kill_chain_phases": [ + { + "kill_chain_name": "mitre-attack", + "phase_name": "credential-access" + } + ], + "modified": "2017-05-31T21:30:19.73501Z", + "name": "Credential Dumping", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "attack-pattern" + } + ], + "spec_version": "2.0", + "type": "bundle" +} diff --git a/stix2/test/stix2_data/attack-pattern/attack-pattern--0f20e3cb-245b-4a61-8a91-2d93f7cb0e9b.json b/stix2/test/stix2_data/attack-pattern/attack-pattern--0f20e3cb-245b-4a61-8a91-2d93f7cb0e9b.json new file mode 100755 index 0000000..13f900f --- /dev/null +++ b/stix2/test/stix2_data/attack-pattern/attack-pattern--0f20e3cb-245b-4a61-8a91-2d93f7cb0e9b.json @@ -0,0 +1,37 @@ +{ + "id": "bundle--b07d6fd6-7cc5-492d-a1eb-9ba956b329d5", + "objects": [ + { + "created": "2017-05-31T21:30:26.496201Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Rootkits are programs that hide the existence of malware by intercepting and modifying operating system API calls that supply system information. Rootkits or rootkit enabling functionality may reside at the user or kernel level in the operating system or lower, to include a Hypervisor, Master Boot Record, or the Basic Input/Output System.[[Citation: Wikipedia Rootkit]]\n\nAdversaries may use rootkits to hide the presence of programs, files, network connections, services, drivers, and other system components.\n\nDetection: Some rootkit protections may be built into anti-virus or operating system software. There are dedicated rootkit detection tools that look for specific types of rootkit behavior. Monitor for the existence of unrecognized DLLs, devices, services, and changes to the MBR.[[Citation: Wikipedia Rootkit]]\n\nPlatforms: Windows Server 2003, Windows Server 2008, Windows Server 2012, Windows XP, Windows 7, Windows 8, Windows Server 2003 R2, Windows Server 2008 R2, Windows Server 2012 R2, Windows Vista, Windows 8.1\n\nData Sources: BIOS, MBR, System calls", + "external_references": [ + { + "external_id": "T1014", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Technique/T1014" + }, + { + "description": "Wikipedia. (2016, June 1). Rootkit. Retrieved June 2, 2016.", + "source_name": "Wikipedia Rootkit", + "url": "https://en.wikipedia.org/wiki/Rootkit" + } + ], + "id": "attack-pattern--0f20e3cb-245b-4a61-8a91-2d93f7cb0e9b", + "kill_chain_phases": [ + { + "kill_chain_name": "mitre-attack", + "phase_name": "defense-evasion" + } + ], + "modified": "2017-05-31T21:30:26.496201Z", + "name": "Rootkit", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "attack-pattern" + } + ], + "spec_version": "2.0", + "type": "bundle" +} diff --git a/stix2/test/stix2_data/attack-pattern/attack-pattern--774a3188-6ba9-4dc4-879d-d54ee48a5ce9.json b/stix2/test/stix2_data/attack-pattern/attack-pattern--774a3188-6ba9-4dc4-879d-d54ee48a5ce9.json new file mode 100755 index 0000000..db57e2c --- /dev/null +++ b/stix2/test/stix2_data/attack-pattern/attack-pattern--774a3188-6ba9-4dc4-879d-d54ee48a5ce9.json @@ -0,0 +1,32 @@ +{ + "id": "bundle--1a854c96-639e-4771-befb-e7b960a65974", + "objects": [ + { + "created": "2017-05-31T21:30:29.45894Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Data, such as sensitive documents, may be exfiltrated through the use of automated processing or Scripting after being gathered during Exfiltration Over Command and Control Channel and Exfiltration Over Alternative Protocol.\n\nDetection: Monitor process file access patterns and network behavior. Unrecognized processes or scripts that appear to be traversing file systems and sending network traffic may be suspicious.\n\nPlatforms: Windows Server 2003, Windows Server 2008, Windows Server 2012, Windows XP, Windows 7, Windows 8, Windows Server 2003 R2, Windows Server 2008 R2, Windows Server 2012 R2, Windows Vista, Windows 8.1\n\nData Sources: File monitoring, Process monitoring, Process use of network", + "external_references": [ + { + "external_id": "T1020", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Technique/T1020" + } + ], + "id": "attack-pattern--774a3188-6ba9-4dc4-879d-d54ee48a5ce9", + "kill_chain_phases": [ + { + "kill_chain_name": "mitre-attack", + "phase_name": "exfiltration" + } + ], + "modified": "2017-05-31T21:30:29.45894Z", + "name": "Automated Exfiltration", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "attack-pattern" + } + ], + "spec_version": "2.0", + "type": "bundle" +} diff --git a/stix2/test/stix2_data/attack-pattern/attack-pattern--7e150503-88e7-4861-866b-ff1ac82c4475.json b/stix2/test/stix2_data/attack-pattern/attack-pattern--7e150503-88e7-4861-866b-ff1ac82c4475.json new file mode 100755 index 0000000..d48092d --- /dev/null +++ b/stix2/test/stix2_data/attack-pattern/attack-pattern--7e150503-88e7-4861-866b-ff1ac82c4475.json @@ -0,0 +1,32 @@ +{ + "id": "bundle--33e3e33a-38b8-4a37-9455-5b8c82d3b10a", + "objects": [ + { + "created": "2017-05-31T21:30:45.139269Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Adversaries may attempt to get a listing of network connections to or from the compromised system.\nUtilities and commands that acquire this information include netstat, \"net use,\" and \"net session\" with Net.\n\nDetection: System and network discovery techniques normally occur throughout an operation as an adversary learns the environment. Data and events should not be viewed in isolation, but as part of a chain of behavior that could lead to other activities, such as Windows Management Instrumentation and PowerShell.\n\nPlatforms: Windows Server 2003, Windows Server 2008, Windows Server 2012, Windows XP, Windows 7, Windows 8, Windows Server 2003 R2, Windows Server 2008 R2, Windows Server 2012 R2, Windows Vista, Windows 8.1\n\nData Sources: Process command-line parameters, Process monitoring", + "external_references": [ + { + "external_id": "T1049", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Technique/T1049" + } + ], + "id": "attack-pattern--7e150503-88e7-4861-866b-ff1ac82c4475", + "kill_chain_phases": [ + { + "kill_chain_name": "mitre-attack", + "phase_name": "discovery" + } + ], + "modified": "2017-05-31T21:30:45.139269Z", + "name": "Local Network Connections Discovery", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "attack-pattern" + } + ], + "spec_version": "2.0", + "type": "bundle" +} diff --git a/stix2/test/stix2_data/attack-pattern/attack-pattern--ae676644-d2d2-41b7-af7e-9bed1b55898c.json b/stix2/test/stix2_data/attack-pattern/attack-pattern--ae676644-d2d2-41b7-af7e-9bed1b55898c.json new file mode 100755 index 0000000..031419e --- /dev/null +++ b/stix2/test/stix2_data/attack-pattern/attack-pattern--ae676644-d2d2-41b7-af7e-9bed1b55898c.json @@ -0,0 +1,32 @@ +{ + "id": "bundle--a87938c5-cc1e-4e06-a8a3-b10243ae397d", + "objects": [ + { + "created": "2017-05-31T21:30:41.022897Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Sensitive data can be collected from remote systems via shared network drives (host shared directory, network file server, etc.) that are accessible from the current system prior to cmd may be used to gather information.\n\nDetection: Monitor processes and command-line arguments for actions that could be taken to collect files from a network share. Remote access tools with built-in features may interact directly with the Windows API to gather data. Data may also be acquired through Windows system management tools such as Windows Management Instrumentation and PowerShell.\n\nPlatforms: Windows Server 2003, Windows Server 2008, Windows Server 2012, Windows XP, Windows 7, Windows 8, Windows Server 2003 R2, Windows Server 2008 R2, Windows Server 2012 R2, Windows Vista, Windows 8.1\n\nData Sources: File monitoring, Process monitoring, Process command-line parameters", + "external_references": [ + { + "external_id": "T1039", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Technique/T1039" + } + ], + "id": "attack-pattern--ae676644-d2d2-41b7-af7e-9bed1b55898c", + "kill_chain_phases": [ + { + "kill_chain_name": "mitre-attack", + "phase_name": "collection" + } + ], + "modified": "2017-05-31T21:30:41.022897Z", + "name": "Data from Network Shared Drive", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "attack-pattern" + } + ], + "spec_version": "2.0", + "type": "bundle" +} diff --git a/stix2/test/stix2_data/attack-pattern/attack-pattern--b3d682b6-98f2-4fb0-aa3b-b4df007ca70a.json b/stix2/test/stix2_data/attack-pattern/attack-pattern--b3d682b6-98f2-4fb0-aa3b-b4df007ca70a.json new file mode 100755 index 0000000..67c380c --- /dev/null +++ b/stix2/test/stix2_data/attack-pattern/attack-pattern--b3d682b6-98f2-4fb0-aa3b-b4df007ca70a.json @@ -0,0 +1,32 @@ +{ + "id": "bundle--5ddaeff9-eca7-4094-9e65-4f53da21a444", + "objects": [ + { + "created": "2017-05-31T21:30:32.662702Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Adversaries may attempt to make an executable or file difficult to discover or analyze by encrypting, encoding, or otherwise obfuscating its contents on the system.\n\nDetection: Detection of file obfuscation is difficult unless artifacts are left behind by the obfuscation process that are uniquely detectable with a signature. If detection of the obfuscation itself is not possible, it may be possible to detect the malicious activity that caused the obfuscated file (for example, the method that was used to write, read, or modify the file on the file system).\n\nPlatforms: Windows Server 2003, Windows Server 2008, Windows Server 2012, Windows XP, Windows 7, Windows 8, Windows Server 2003 R2, Windows Server 2008 R2, Windows Server 2012 R2, Windows Vista, Windows 8.1\n\nData Sources: Network protocol analysis, Process use of network, Binary file metadata, File monitoring, Malware reverse engineering", + "external_references": [ + { + "external_id": "T1027", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Technique/T1027" + } + ], + "id": "attack-pattern--b3d682b6-98f2-4fb0-aa3b-b4df007ca70a", + "kill_chain_phases": [ + { + "kill_chain_name": "mitre-attack", + "phase_name": "defense-evasion" + } + ], + "modified": "2017-05-31T21:30:32.662702Z", + "name": "Obfuscated Files or Information", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "attack-pattern" + } + ], + "spec_version": "2.0", + "type": "bundle" +} diff --git a/stix2/test/stix2_data/course-of-action/course-of-action--95ddb356-7ba0-4bd9-a889-247262b8946f.json b/stix2/test/stix2_data/course-of-action/course-of-action--95ddb356-7ba0-4bd9-a889-247262b8946f.json new file mode 100755 index 0000000..bf14aa7 --- /dev/null +++ b/stix2/test/stix2_data/course-of-action/course-of-action--95ddb356-7ba0-4bd9-a889-247262b8946f.json @@ -0,0 +1,16 @@ +{ + "id": "bundle--a42d26fe-c938-4074-a1b3-50d852e6f0bd", + "objects": [ + { + "created": "2017-05-31T21:30:26.495974Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Identify potentially malicious software that may contain rootkit functionality, and audit and/or block it by using whitelisting[[CiteRef::Beechey 2010]] tools, like AppLocker,[[CiteRef::Windows Commands JPCERT]][[CiteRef::NSA MS AppLocker]] or Software Restriction Policies[[CiteRef::Corio 2008]] where appropriate.[[CiteRef::TechNet Applocker vs SRP]]", + "id": "course-of-action--95ddb356-7ba0-4bd9-a889-247262b8946f", + "modified": "2017-05-31T21:30:26.495974Z", + "name": "Rootkit Mitigation", + "type": "course-of-action" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/course-of-action/course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd.json b/stix2/test/stix2_data/course-of-action/course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd.json new file mode 100755 index 0000000..5bfb8bb --- /dev/null +++ b/stix2/test/stix2_data/course-of-action/course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd.json @@ -0,0 +1,16 @@ +{ + "id": "bundle--2ed6ab6a-ca68-414f-8493-e4db8b75dd51", + "objects": [ + { + "created": "2017-05-31T21:30:41.022744Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Identify unnecessary system utilities or potentially malicious software that may be used to collect data from a network share, and audit and/or block them by using whitelisting[[CiteRef::Beechey 2010]] tools, like AppLocker,[[CiteRef::Windows Commands JPCERT]][[CiteRef::NSA MS AppLocker]] or Software Restriction Policies[[CiteRef::Corio 2008]] where appropriate.[[CiteRef::TechNet Applocker vs SRP]]", + "id": "course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd", + "modified": "2017-05-31T21:30:41.022744Z", + "name": "Data from Network Shared Drive Mitigation", + "type": "course-of-action" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/identity/identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5.json b/stix2/test/stix2_data/identity/identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5.json new file mode 100755 index 0000000..77d4464 --- /dev/null +++ b/stix2/test/stix2_data/identity/identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5.json @@ -0,0 +1,15 @@ +{ + "id": "bundle--81884287-2548-47fc-a997-39489ddd5462", + "objects": [ + { + "created": "2017-06-01T00:00:00Z", + "id": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "identity_class": "organization", + "modified": "2017-06-01T00:00:00Z", + "name": "The MITRE Corporation", + "type": "identity" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/intrusion-set/intrusion-set--a653431d-6a5e-4600-8ad3-609b5af57064.json b/stix2/test/stix2_data/intrusion-set/intrusion-set--a653431d-6a5e-4600-8ad3-609b5af57064.json new file mode 100755 index 0000000..10ef3a5 --- /dev/null +++ b/stix2/test/stix2_data/intrusion-set/intrusion-set--a653431d-6a5e-4600-8ad3-609b5af57064.json @@ -0,0 +1,54 @@ +{ + "id": "bundle--7790ee4c-2d57-419a-bc9d-8805b5bb4118", + "objects": [ + { + "aliases": [ + "Deep Panda", + "Shell Crew", + "WebMasters", + "KungFu Kittens", + "PinkPanther", + "Black Vine" + ], + "created": "2017-05-31T21:31:49.412497Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Deep Panda is a suspected Chinese threat group known to target many industries, including government, defense, financial, and telecommunications.Deep Panda.Deep Panda also appears to be known as Black Vine based on the attribution of both group names to the Anthem intrusion.[[Citation: Symantec Black Vine]]", + "external_references": [ + { + "external_id": "G0009", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Group/G0009" + }, + { + "description": "Alperovitch, D. (2014, July 7). Deep in Thought: Chinese Targeting of National Security Think Tanks. Retrieved November 12, 2014.", + "source_name": "Alperovitch 2014", + "url": "http://blog.crowdstrike.com/deep-thought-chinese-targeting-national-security-think-tanks/" + }, + { + "description": "DiMaggio, J.. (2015, August 6). The Black Vine cyberespionage group. Retrieved January 26, 2016.", + "source_name": "Symantec Black Vine", + "url": "http://www.symantec.com/content/en/us/enterprise/media/security%20response/whitepapers/the-black-vine-cyberespionage-group.pdf" + }, + { + "description": "RSA Incident Response. (2014, January). RSA Incident Response Emerging Threat Profile: Shell Crew. Retrieved January 14, 2016.", + "source_name": "RSA Shell Crew", + "url": "https://www.emc.com/collateral/white-papers/h12756-wp-shell-crew.pdf" + }, + { + "description": "ThreatConnect Research Team. (2015, February 27). The Anthem Hack: All Roads Lead to China. Retrieved January 26, 2016.", + "source_name": "ThreatConnect Anthem", + "url": "https://www.threatconnect.com/the-anthem-hack-all-roads-lead-to-china/" + } + ], + "id": "intrusion-set--a653431d-6a5e-4600-8ad3-609b5af57064", + "modified": "2017-05-31T21:31:49.412497Z", + "name": "Deep Panda", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "intrusion-set" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/intrusion-set/intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a.json b/stix2/test/stix2_data/intrusion-set/intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a.json new file mode 100755 index 0000000..84b75b1 --- /dev/null +++ b/stix2/test/stix2_data/intrusion-set/intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a.json @@ -0,0 +1,44 @@ +{ + "id": "bundle--96a6ea7a-fcff-4aab-925b-a494bcdf0480", + "objects": [ + { + "aliases": [ + "DragonOK" + ], + "created": "2017-05-31T21:31:53.197755Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "DragonOK is a threat group that has targeted Japanese organizations with phishing emails. Due to overlapping TTPs, including similar custom tools, DragonOK is thought to have a direct or indirect relationship with the threat group Moafee. [[Citation: Operation Quantum Entanglement]][[Citation: Symbiotic APT Groups]] It is known to use a variety of malware, including Sysget/HelloBridge, PlugX, PoisonIvy, FormerFirstRat, NFlog, and NewCT. [[Citation: New DragonOK]]", + "external_references": [ + { + "external_id": "G0017", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Group/G0017" + }, + { + "description": "Haq, T., Moran, N., Vashisht, S., Scott, M. (2014, September). OPERATION QUANTUM ENTANGLEMENT. Retrieved November 4, 2015.", + "source_name": "Operation Quantum Entanglement", + "url": "https://www.fireeye.com/content/dam/fireeye-www/global/en/current-threats/pdfs/wp-operation-quantum-entanglement.pdf" + }, + { + "description": "Haq, T. (2014, October). An Insight into Symbiotic APT Groups. Retrieved November 4, 2015.", + "source_name": "Symbiotic APT Groups", + "url": "https://dl.mandiant.com/EE/library/MIRcon2014/MIRcon%202014%20R&D%20Track%20Insight%20into%20Symbiotic%20APT.pdf" + }, + { + "description": "Miller-Osborn, J., Grunzweig, J.. (2015, April). Unit 42 Identifies New DragonOK Backdoor Malware Deployed Against Japanese Targets. Retrieved November 4, 2015.", + "source_name": "New DragonOK", + "url": "http://researchcenter.paloaltonetworks.com/2015/04/unit-42-identifies-new-dragonok-backdoor-malware-deployed-against-japanese-targets/" + } + ], + "id": "intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a", + "modified": "2017-05-31T21:31:53.197755Z", + "name": "DragonOK", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "intrusion-set" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/malware/malware--6b616fc1-1505-48e3-8b2c-0d19337bff38.json b/stix2/test/stix2_data/malware/malware--6b616fc1-1505-48e3-8b2c-0d19337bff38.json new file mode 100755 index 0000000..669f00c --- /dev/null +++ b/stix2/test/stix2_data/malware/malware--6b616fc1-1505-48e3-8b2c-0d19337bff38.json @@ -0,0 +1,34 @@ +{ + "id": "bundle--f64de948-7067-4534-8018-85f03d470625", + "objects": [ + { + "created": "2017-05-31T21:32:58.226477Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Rover is malware suspected of being used for espionage purposes. It was used in 2015 in a targeted email sent to an Indian Ambassador to Afghanistan.[[Citation: Palo Alto Rover]]", + "external_references": [ + { + "external_id": "S0090", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Software/S0090" + }, + { + "description": "Ray, V., Hayashi, K. (2016, February 29). New Malware \u2018Rover\u2019 Targets Indian Ambassador to Afghanistan. Retrieved February 29, 2016.", + "source_name": "Palo Alto Rover", + "url": "http://researchcenter.paloaltonetworks.com/2016/02/new-malware-rover-targets-indian-ambassador-to-afghanistan/" + } + ], + "id": "malware--6b616fc1-1505-48e3-8b2c-0d19337bff38", + "labels": [ + "malware" + ], + "modified": "2017-05-31T21:32:58.226477Z", + "name": "Rover", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "malware" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/malware/malware--92ec0cbd-2c30-44a2-b270-73f4ec949841.json b/stix2/test/stix2_data/malware/malware--92ec0cbd-2c30-44a2-b270-73f4ec949841.json new file mode 100755 index 0000000..9eaf8ad --- /dev/null +++ b/stix2/test/stix2_data/malware/malware--92ec0cbd-2c30-44a2-b270-73f4ec949841.json @@ -0,0 +1,34 @@ +{ + "id": "bundle--c633942b-545c-4c87-91b7-9fe5740365e0", + "objects": [ + { + "created": "2017-05-31T21:33:26.565056Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "RTM is custom malware written in Delphi. It is used by the group of the same name (RTM).[[Citation: ESET RTM Feb 2017]]", + "external_references": [ + { + "external_id": "S0148", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Software/S0148" + }, + { + "description": "Faou, M. and Boutin, J.. (2017, February). Read The Manual: A Guide to the RTM Banking Trojan. Retrieved March 9, 2017.", + "source_name": "ESET RTM Feb 2017", + "url": "https://www.welivesecurity.com/wp-content/uploads/2017/02/Read-The-Manual.pdf" + } + ], + "id": "malware--92ec0cbd-2c30-44a2-b270-73f4ec949841", + "labels": [ + "malware" + ], + "modified": "2017-05-31T21:33:26.565056Z", + "name": "RTM", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "malware" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/malware/malware--96b08451-b27a-4ff6-893f-790e26393a8e.json b/stix2/test/stix2_data/malware/malware--96b08451-b27a-4ff6-893f-790e26393a8e.json new file mode 100755 index 0000000..224f6a9 --- /dev/null +++ b/stix2/test/stix2_data/malware/malware--96b08451-b27a-4ff6-893f-790e26393a8e.json @@ -0,0 +1,34 @@ +{ + "id": "bundle--09ce4338-8741-4fcf-9738-d216c8e40974", + "objects": [ + { + "created": "2017-05-31T21:32:48.482655Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Sakula is a remote access tool (RAT) that first surfaced in 2012 and was used in intrusions throughout 2015.[[Citation: Dell Sakula]]\n\nAliases: Sakula, Sakurel, VIPER", + "external_references": [ + { + "external_id": "S0074", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Software/S0074" + }, + { + "description": "Dell SecureWorks Counter Threat Unit Threat Intelligence. (2015, July 30). Sakula Malware Family. Retrieved January 26, 2016.", + "source_name": "Dell Sakula", + "url": "http://www.secureworks.com/cyber-threat-intelligence/threats/sakula-malware-family/" + } + ], + "id": "malware--96b08451-b27a-4ff6-893f-790e26393a8e", + "labels": [ + "malware" + ], + "modified": "2017-05-31T21:32:48.482655Z", + "name": "Sakula", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "malware" + } + ], + "spec_version": "2.0", + "type": "bundle" +} diff --git a/stix2/test/stix2_data/malware/malware--b42378e0-f147-496f-992a-26a49705395b.json b/stix2/test/stix2_data/malware/malware--b42378e0-f147-496f-992a-26a49705395b.json new file mode 100755 index 0000000..3e1c870 --- /dev/null +++ b/stix2/test/stix2_data/malware/malware--b42378e0-f147-496f-992a-26a49705395b.json @@ -0,0 +1,34 @@ +{ + "id": "bundle--611947ce-ae3b-4fdb-b297-aed8eab22e4f", + "objects": [ + { + "created": "2017-05-31T21:32:15.263882Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "PoisonIvy is a popular remote access tool (RAT) that has been used by many groups.[[Citation: FireEye Poison Ivy]]\n\nAliases: PoisonIvy, Poison Ivy", + "external_references": [ + { + "external_id": "S0012", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Software/S0012" + }, + { + "description": "FireEye. (2014). POISON IVY: Assessing Damage and Extracting Intelligence. Retrieved November 12, 2014.", + "source_name": "FireEye Poison Ivy", + "url": "https://www.fireeye.com/content/dam/fireeye-www/global/en/current-threats/pdfs/rpt-poison-ivy.pdf" + } + ], + "id": "malware--b42378e0-f147-496f-992a-26a49705395b", + "labels": [ + "malware" + ], + "modified": "2017-05-31T21:32:15.263882Z", + "name": "PoisonIvy", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "malware" + } + ], + "spec_version": "2.0", + "type": "bundle" +} diff --git a/stix2/test/stix2_data/marking-definition/marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168.json b/stix2/test/stix2_data/marking-definition/marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168.json new file mode 100755 index 0000000..bcae183 --- /dev/null +++ b/stix2/test/stix2_data/marking-definition/marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168.json @@ -0,0 +1,16 @@ +{ + "id": "bundle--0f4a3025-7788-4f25-a0c7-26171056dfae", + "objects": [ + { + "created": "2017-06-01T00:00:00Z", + "definition": { + "statement": "Copyright 2017, The MITRE Corporation" + }, + "definition_type": "statement", + "id": "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168", + "type": "marking-definition" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/relationship/relationship--0d4a7788-7f3b-4df8-a498-31a38003c883.json b/stix2/test/stix2_data/relationship/relationship--0d4a7788-7f3b-4df8-a498-31a38003c883.json new file mode 100755 index 0000000..ac59925 --- /dev/null +++ b/stix2/test/stix2_data/relationship/relationship--0d4a7788-7f3b-4df8-a498-31a38003c883.json @@ -0,0 +1,20 @@ +{ + "id": "bundle--7e715462-dd9d-40b9-968a-10ef0ecf126d", + "objects": [ + { + "created": "2017-05-31T21:33:27.182784Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "id": "relationship--0d4a7788-7f3b-4df8-a498-31a38003c883", + "modified": "2017-05-31T21:33:27.182784Z", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "relationship_type": "uses", + "source_ref": "attack-pattern--b3d682b6-98f2-4fb0-aa3b-b4df007ca70a", + "target_ref": "malware--92ec0cbd-2c30-44a2-b270-73f4ec949841", + "type": "relationship" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/relationship/relationship--0e55ee98-0c6d-43d4-b424-b18a0036b227.json b/stix2/test/stix2_data/relationship/relationship--0e55ee98-0c6d-43d4-b424-b18a0036b227.json new file mode 100755 index 0000000..ee97edf --- /dev/null +++ b/stix2/test/stix2_data/relationship/relationship--0e55ee98-0c6d-43d4-b424-b18a0036b227.json @@ -0,0 +1,20 @@ +{ + "id": "bundle--a53eef35-abfc-4bcd-b84e-a048f7b4a9bf", + "objects": [ + { + "created": "2017-05-31T21:33:27.082801Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "id": "relationship--0e55ee98-0c6d-43d4-b424-b18a0036b227", + "modified": "2017-05-31T21:33:27.082801Z", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "relationship_type": "uses", + "source_ref": "attack-pattern--0a3ead4e-6d47-4ccb-854c-a6a4f9d96b22", + "target_ref": "tool--242f3da3-4425-4d11-8f5c-b842886da966", + "type": "relationship" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/relationship/relationship--1e91cd45-a725-4965-abe3-700694374432.json b/stix2/test/stix2_data/relationship/relationship--1e91cd45-a725-4965-abe3-700694374432.json new file mode 100755 index 0000000..ff0d8cc --- /dev/null +++ b/stix2/test/stix2_data/relationship/relationship--1e91cd45-a725-4965-abe3-700694374432.json @@ -0,0 +1,20 @@ +{ + "id": "bundle--0b9f6412-314f-44e3-8779-9738c9578ef5", + "objects": [ + { + "created": "2017-05-31T21:33:27.018782Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "id": "relationship--1e91cd45-a725-4965-abe3-700694374432", + "modified": "2017-05-31T21:33:27.018782Z", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "relationship_type": "mitigates", + "source_ref": "course-of-action--95ddb356-7ba0-4bd9-a889-247262b8946f", + "target_ref": "attack-pattern--0f20e3cb-245b-4a61-8a91-2d93f7cb0e9b", + "type": "relationship" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/relationship/relationship--3a3084f9-0302-4fd5-9b8a-e0db10f5345e.json b/stix2/test/stix2_data/relationship/relationship--3a3084f9-0302-4fd5-9b8a-e0db10f5345e.json new file mode 100755 index 0000000..36d1482 --- /dev/null +++ b/stix2/test/stix2_data/relationship/relationship--3a3084f9-0302-4fd5-9b8a-e0db10f5345e.json @@ -0,0 +1,20 @@ +{ + "id": "bundle--6d5b04a8-efb2-4179-990e-74f1dcc76e0c", + "objects": [ + { + "created": "2017-05-31T21:33:27.100701Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "id": "relationship--3a3084f9-0302-4fd5-9b8a-e0db10f5345e", + "modified": "2017-05-31T21:33:27.100701Z", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "relationship_type": "uses", + "source_ref": "attack-pattern--7e150503-88e7-4861-866b-ff1ac82c4475", + "target_ref": "tool--03342581-f790-4f03-ba41-e82e67392e23", + "type": "relationship" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/relationship/relationship--3a3ed0b2-0c38-441f-ac40-53b873e545d1.json b/stix2/test/stix2_data/relationship/relationship--3a3ed0b2-0c38-441f-ac40-53b873e545d1.json new file mode 100755 index 0000000..888cc3b --- /dev/null +++ b/stix2/test/stix2_data/relationship/relationship--3a3ed0b2-0c38-441f-ac40-53b873e545d1.json @@ -0,0 +1,20 @@ +{ + "id": "bundle--a7efc025-040d-49c7-bf97-e5a1120ecacc", + "objects": [ + { + "created": "2017-05-31T21:33:27.143973Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "id": "relationship--3a3ed0b2-0c38-441f-ac40-53b873e545d1", + "modified": "2017-05-31T21:33:27.143973Z", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "relationship_type": "uses", + "source_ref": "attack-pattern--774a3188-6ba9-4dc4-879d-d54ee48a5ce9", + "target_ref": "malware--6b616fc1-1505-48e3-8b2c-0d19337bff38", + "type": "relationship" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/relationship/relationship--592d0c31-e61f-495e-a60e-70d7be59a719.json b/stix2/test/stix2_data/relationship/relationship--592d0c31-e61f-495e-a60e-70d7be59a719.json new file mode 100755 index 0000000..d9078d1 --- /dev/null +++ b/stix2/test/stix2_data/relationship/relationship--592d0c31-e61f-495e-a60e-70d7be59a719.json @@ -0,0 +1,20 @@ +{ + "id": "bundle--9f013d47-7704-41c2-9749-23d0d94af94d", + "objects": [ + { + "created": "2017-05-31T21:33:27.021562Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "id": "relationship--592d0c31-e61f-495e-a60e-70d7be59a719", + "modified": "2017-05-31T21:33:27.021562Z", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "relationship_type": "mitigates", + "source_ref": "course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd", + "target_ref": "attack-pattern--ae676644-d2d2-41b7-af7e-9bed1b55898c", + "type": "relationship" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/relationship/relationship--70dc6b5c-c524-429e-a6ab-0dd40f0482c1.json b/stix2/test/stix2_data/relationship/relationship--70dc6b5c-c524-429e-a6ab-0dd40f0482c1.json new file mode 100755 index 0000000..ef1c4b2 --- /dev/null +++ b/stix2/test/stix2_data/relationship/relationship--70dc6b5c-c524-429e-a6ab-0dd40f0482c1.json @@ -0,0 +1,20 @@ +{ + "id": "bundle--15167b24-4cee-4c96-a140-32a6c37df4b4", + "objects": [ + { + "created": "2017-05-31T21:33:27.044387Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "id": "relationship--70dc6b5c-c524-429e-a6ab-0dd40f0482c1", + "modified": "2017-05-31T21:33:27.044387Z", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "relationship_type": "uses", + "source_ref": "intrusion-set--a653431d-6a5e-4600-8ad3-609b5af57064", + "target_ref": "malware--96b08451-b27a-4ff6-893f-790e26393a8e", + "type": "relationship" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/relationship/relationship--8797579b-e3be-4209-a71b-255a4d08243d.json b/stix2/test/stix2_data/relationship/relationship--8797579b-e3be-4209-a71b-255a4d08243d.json new file mode 100755 index 0000000..1f20179 --- /dev/null +++ b/stix2/test/stix2_data/relationship/relationship--8797579b-e3be-4209-a71b-255a4d08243d.json @@ -0,0 +1,20 @@ +{ + "id": "bundle--ff845dca-7036-416f-aae0-95030994c49f", + "objects": [ + { + "created": "2017-05-31T21:33:27.051532Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "id": "relationship--8797579b-e3be-4209-a71b-255a4d08243d", + "modified": "2017-05-31T21:33:27.051532Z", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "relationship_type": "uses", + "source_ref": "intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a", + "target_ref": "malware--b42378e0-f147-496f-992a-26a49705395b", + "type": "relationship" + } + ], + "spec_version": "2.0", + "type": "bundle" +} \ No newline at end of file diff --git a/stix2/test/stix2_data/tool/tool--03342581-f790-4f03-ba41-e82e67392e23.json b/stix2/test/stix2_data/tool/tool--03342581-f790-4f03-ba41-e82e67392e23.json new file mode 100755 index 0000000..9d47880 --- /dev/null +++ b/stix2/test/stix2_data/tool/tool--03342581-f790-4f03-ba41-e82e67392e23.json @@ -0,0 +1,39 @@ +{ + "id": "bundle--d8826afc-1561-4362-a4e3-05a4c2c3ac3c", + "objects": [ + { + "created": "2017-05-31T21:32:31.601148Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "The Net utility is a component of the Windows operating system. It is used in command-line operations for control of users, groups, services, and network connections.Net has a great deal of functionality,[[Citation: Savill 1999]] much of which is useful for an adversary, such as gathering system and network information for [[Discovery]], moving laterally through [[Windows admin shares]] using <code>net use</code> commands, and interacting with services.\n\nAliases: Net, net.exe", + "external_references": [ + { + "external_id": "S0039", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Software/S0039" + }, + { + "description": "Microsoft. (2006, October 18). Net.exe Utility. Retrieved September 22, 2015.", + "source_name": "Microsoft Net Utility", + "url": "https://msdn.microsoft.com/en-us/library/aa939914" + }, + { + "description": "Savill, J. (1999, March 4). Net.exe reference. Retrieved September 22, 2015.", + "source_name": "Savill 1999", + "url": "http://windowsitpro.com/windows/netexe-reference" + } + ], + "id": "tool--03342581-f790-4f03-ba41-e82e67392e23", + "labels": [ + "tool" + ], + "modified": "2017-05-31T21:32:31.601148Z", + "name": "Net", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "tool" + } + ], + "spec_version": "2.0", + "type": "bundle" +} diff --git a/stix2/test/stix2_data/tool/tool--242f3da3-4425-4d11-8f5c-b842886da966.json b/stix2/test/stix2_data/tool/tool--242f3da3-4425-4d11-8f5c-b842886da966.json new file mode 100755 index 0000000..281888e --- /dev/null +++ b/stix2/test/stix2_data/tool/tool--242f3da3-4425-4d11-8f5c-b842886da966.json @@ -0,0 +1,34 @@ +{ + "id": "bundle--7dbde18f-6f14-4bf0-8389-505c89d6d5a6", + "objects": [ + { + "created": "2017-05-31T21:32:12.684914Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Windows Credential Editor is a password dumping tool.[[Citation: Amplia WCE]]\n\nAliases: Windows Credential Editor, WCE", + "external_references": [ + { + "external_id": "S0005", + "source_name": "mitre-attack", + "url": "https://attack.mitre.org/wiki/Software/S0005" + }, + { + "description": "Amplia Security. (n.d.). Windows Credentials Editor (WCE) F.A.Q.. Retrieved December 17, 2015.", + "source_name": "Amplia WCE", + "url": "http://www.ampliasecurity.com/research/wcefaq.html" + } + ], + "id": "tool--242f3da3-4425-4d11-8f5c-b842886da966", + "labels": [ + "tool" + ], + "modified": "2017-05-31T21:32:12.684914Z", + "name": "Windows Credential Editor", + "object_marking_refs": [ + "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" + ], + "type": "tool" + } + ], + "spec_version": "2.0", + "type": "bundle" +} diff --git a/stix2/test/test_data_sources.py b/stix2/test/test_data_sources.py index e34d603..35e1e23 100644 --- a/stix2/test/test_data_sources.py +++ b/stix2/test/test_data_sources.py @@ -1,13 +1,17 @@ +import os + import pytest from taxii2client import Collection +from stix2 import (Campaign, FileSystemSink, FileSystemSource, FileSystemStore, + Filter, MemorySource, MemoryStore) from stix2.sources import (CompositeDataSource, DataSink, DataSource, DataStore, make_id, taxii) -from stix2.sources.filters import Filter, apply_common_filters -from stix2.sources.memory import MemorySource, MemoryStore +from stix2.sources.filters import apply_common_filters from stix2.utils import deduplicate COLLECTION_URL = 'https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/' +FS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "stix2_data") class MockTAXIIClient(object): @@ -508,3 +512,207 @@ def test_composite_datasource_operations(): # STIX_OBJS2 has indicator with later time, one with different id, one with # original time in STIX_OBJS1 assert len(results) == 3 + + +def test_filesytem_source(): + # creation + fs_source = FileSystemSource(FS_PATH) + assert fs_source.stix_dir == FS_PATH + + # get object + mal = fs_source.get("malware--6b616fc1-1505-48e3-8b2c-0d19337bff38") + assert mal.id == "malware--6b616fc1-1505-48e3-8b2c-0d19337bff38" + assert mal.name == "Rover" + + # all versions - (currently not a true all versions call as FileSystem cant have multiple versions) + id_ = fs_source.get("identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5") + assert id_.id == "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5" + assert id_.name == "The MITRE Corporation" + assert id_.type == "identity" + + # query + intrusion_sets = fs_source.query([Filter("type", '=', "intrusion-set")]) + assert len(intrusion_sets) == 2 + assert "intrusion-set--a653431d-6a5e-4600-8ad3-609b5af57064" in [is_.id for is_ in intrusion_sets] + assert "intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a" in [is_.id for is_ in intrusion_sets] + + is_1 = [is_ for is_ in intrusion_sets if is_.id == "intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a"][0] + assert "DragonOK" in is_1.aliases + assert len(is_1.external_references) == 4 + + # query2 + is_2 = fs_source.query([Filter("external_references.external_id", '=', "T1027")]) + assert len(is_2) == 1 + + is_2 = is_2[0] + assert is_2.id == "attack-pattern--b3d682b6-98f2-4fb0-aa3b-b4df007ca70a" + assert is_2.type == "attack-pattern" + + +def test_filesystem_sink(): + # creation + fs_sink = FileSystemSink(FS_PATH) + assert fs_sink.stix_dir == FS_PATH + + fs_source = FileSystemSource(FS_PATH) + + # Test all the ways stix objects can be added (via different supplied forms) + + # add python stix object + camp1 = Campaign(name="Hannibal", + objective="Targeting Italian and Spanish Diplomat internet accounts", + aliases=["War Elephant"]) + + fs_sink.add(camp1) + + assert os.path.exists(os.path.join(FS_PATH, "campaign", camp1.id + ".json")) + + camp1_r = fs_source.get(camp1.id) + assert camp1_r.id == camp1.id + assert camp1_r.name == "Hannibal" + assert "War Elephant" in camp1_r.aliases + + # add stix object dict + camp2 = { + "name": "Aurelius", + "type": "campaign", + "objective": "German and French Intelligence Services", + "aliases": ["Purple Robes"], + "id": "campaign--111111b6-1112-4fb0-111b-b111107ca70a", + "created": "2017-05-31T21:31:53.197755Z" + } + + fs_sink.add(camp2) + + assert os.path.exists(os.path.join(FS_PATH, "campaign", camp2["id"] + ".json")) + + camp2_r = fs_source.get(camp2["id"]) + assert camp2_r.id == camp2["id"] + assert camp2_r.name == camp2["name"] + assert "Purple Robes" in camp2_r.aliases + + # add stix bundle dict + bund = { + "type": "bundle", + "id": "bundle--112211b6-1112-4fb0-111b-b111107ca70a", + "spec_version": "2.0", + "objects": [ + { + "name": "Atilla", + "type": "campaign", + "objective": "Bulgarian, Albanian and Romanian Intelligence Services", + "aliases": ["Huns"], + "id": "campaign--133111b6-1112-4fb0-111b-b111107ca70a", + "created": "2017-05-31T21:31:53.197755Z" + } + ] + } + + fs_sink.add(bund) + + assert os.path.exists(os.path.join(FS_PATH, "campaign", bund["objects"][0]["id"] + ".json")) + + camp3_r = fs_source.get(bund["objects"][0]["id"]) + assert camp3_r.id == bund["objects"][0]["id"] + assert camp3_r.name == bund["objects"][0]["name"] + assert "Huns" in camp3_r.aliases + + # add json-encoded stix obj + camp4 = '{"type": "campaign", "id":"campaign--144111b6-1112-4fb0-111b-b111107ca70a",'\ + ' "created":"2017-05-31T21:31:53.197755Z", "name": "Ghengis Khan", "objective": "China and Russian infrastructure"}' + + fs_sink.add(camp4) + + assert os.path.exists(os.path.join(FS_PATH, "campaign", "campaign--144111b6-1112-4fb0-111b-b111107ca70a" + ".json")) + + camp4_r = fs_source.get("campaign--144111b6-1112-4fb0-111b-b111107ca70a") + assert camp4_r.id == "campaign--144111b6-1112-4fb0-111b-b111107ca70a" + assert camp4_r.name == "Ghengis Khan" + + # add json-encoded stix bundle + bund2 = '{"type": "bundle", "id": "bundle--332211b6-1132-4fb0-111b-b111107ca70a",' \ + ' "spec_version": "2.0", "objects": [{"type": "campaign", "id": "campaign--155155b6-1112-4fb0-111b-b111107ca70a",' \ + ' "created":"2017-05-31T21:31:53.197755Z", "name": "Spartacus", "objective": "Oppressive regimes of Africa and Middle East"}]}' + fs_sink.add(bund2) + + assert os.path.exists(os.path.join(FS_PATH, "campaign", "campaign--155155b6-1112-4fb0-111b-b111107ca70a" + ".json")) + + camp5_r = fs_source.get("campaign--155155b6-1112-4fb0-111b-b111107ca70a") + assert camp5_r.id == "campaign--155155b6-1112-4fb0-111b-b111107ca70a" + assert camp5_r.name == "Spartacus" + + # add list of objects + camp6 = Campaign(name="Comanche", + objective="US Midwest manufacturing firms, oil refineries, and businesses", + aliases=["Horse Warrior"]) + + camp7 = { + "name": "Napolean", + "type": "campaign", + "objective": "Central and Eastern Europe military commands and departments", + "aliases": ["The Frenchmen"], + "id": "campaign--122818b6-1112-4fb0-111b-b111107ca70a", + "created": "2017-05-31T21:31:53.197755Z" + } + + fs_sink.add([camp6, camp7]) + + assert os.path.exists(os.path.join(FS_PATH, "campaign", camp6.id + ".json")) + assert os.path.exists(os.path.join(FS_PATH, "campaign", "campaign--122818b6-1112-4fb0-111b-b111107ca70a" + ".json")) + + camp6_r = fs_source.get(camp6.id) + assert camp6_r.id == camp6.id + assert "Horse Warrior" in camp6_r.aliases + + camp7_r = fs_source.get(camp7["id"]) + assert camp7_r.id == camp7["id"] + assert "The Frenchmen" in camp7_r.aliases + + # remove all added objects + os.remove(os.path.join(FS_PATH, "campaign", camp1_r.id + ".json")) + os.remove(os.path.join(FS_PATH, "campaign", camp2_r.id + ".json")) + os.remove(os.path.join(FS_PATH, "campaign", camp3_r.id + ".json")) + os.remove(os.path.join(FS_PATH, "campaign", camp4_r.id + ".json")) + os.remove(os.path.join(FS_PATH, "campaign", camp5_r.id + ".json")) + os.remove(os.path.join(FS_PATH, "campaign", camp6_r.id + ".json")) + os.remove(os.path.join(FS_PATH, "campaign", camp7_r.id + ".json")) + + # remove campaign dir (that was added in course of testing) + os.rmdir(os.path.join(FS_PATH, "campaign")) + + +def test_filesystem_store(): + # creation + fs_store = FileSystemStore(FS_PATH) + + # get() + coa = fs_store.get("course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd") + assert coa.id == "course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd" + assert coa.type == "course-of-action" + + # all versions() - (note at this time, all_versions() is still not applicable to FileSystem, as only one version is ever stored) + rel = fs_store.all_versions("relationship--70dc6b5c-c524-429e-a6ab-0dd40f0482c1")[0] + assert rel.id == "relationship--70dc6b5c-c524-429e-a6ab-0dd40f0482c1" + assert rel.type == "relationship" + + # query() + tools = fs_store.query([Filter("labels", "in", "tool")]) + assert len(tools) == 2 + assert "tool--242f3da3-4425-4d11-8f5c-b842886da966" in [tool.id for tool in tools] + assert "tool--03342581-f790-4f03-ba41-e82e67392e23" in [tool.id for tool in tools] + + # add() + camp1 = Campaign(name="Great Heathen Army", + objective="Targeting the government of United Kingdom and insitutions affiliated with the Church Of Englang", + aliases=["Ragnar"]) + fs_store.add(camp1) + + camp1_r = fs_store.get(camp1.id) + assert camp1_r.id == camp1.id + assert camp1_r.name == camp1.name + + # remove + os.remove(os.path.join(FS_PATH, "campaign", camp1_r.id + ".json")) + + # remove campaign dir + os.rmdir(os.path.join(FS_PATH, "campaign")) diff --git a/stix2/test/test_granular_markings.py b/stix2/test/test_granular_markings.py index f8fc803..e910ad3 100644 --- a/stix2/test/test_granular_markings.py +++ b/stix2/test/test_granular_markings.py @@ -1,7 +1,7 @@ import pytest -from stix2 import TLP_RED, Malware, markings +from stix2 import Malware, markings from .constants import MALWARE_MORE_KWARGS as MALWARE_KWARGS_CONST from .constants import MARKING_IDS @@ -45,7 +45,6 @@ def test_add_marking_mark_one_selector_multiple_refs(): }, ], **MALWARE_KWARGS), - MARKING_IDS[0], ), ( MALWARE_KWARGS, @@ -57,26 +56,13 @@ def test_add_marking_mark_one_selector_multiple_refs(): }, ], **MALWARE_KWARGS), - MARKING_IDS[0], - ), - ( - Malware(**MALWARE_KWARGS), - Malware( - granular_markings=[ - { - "selectors": ["description", "name"], - "marking_ref": TLP_RED.id, - }, - ], - **MALWARE_KWARGS), - TLP_RED, ), ]) def test_add_marking_mark_multiple_selector_one_refs(data): before = data[0] after = data[1] - before = markings.add_markings(before, data[2], ["description", "name"]) + before = markings.add_markings(before, [MARKING_IDS[0]], ["description", "name"]) for m in before["granular_markings"]: assert m in after["granular_markings"] @@ -361,42 +347,36 @@ def test_get_markings_positional_arguments_combinations(data): assert set(markings.get_markings(data, "x.z.foo2", False, True)) == set(["10"]) [email protected]("data", [ - ( - Malware( - granular_markings=[ - { - "selectors": ["description"], - "marking_ref": MARKING_IDS[0] - }, - { - "selectors": ["description"], - "marking_ref": MARKING_IDS[1] - }, - ], - **MALWARE_KWARGS - ), - [MARKING_IDS[0], MARKING_IDS[1]], [email protected]("before", [ + Malware( + granular_markings=[ + { + "selectors": ["description"], + "marking_ref": MARKING_IDS[0] + }, + { + "selectors": ["description"], + "marking_ref": MARKING_IDS[1] + }, + ], + **MALWARE_KWARGS ), - ( - dict( - granular_markings=[ - { - "selectors": ["description"], - "marking_ref": MARKING_IDS[0] - }, - { - "selectors": ["description"], - "marking_ref": MARKING_IDS[1] - }, - ], - **MALWARE_KWARGS - ), - [MARKING_IDS[0], MARKING_IDS[1]], + dict( + granular_markings=[ + { + "selectors": ["description"], + "marking_ref": MARKING_IDS[0] + }, + { + "selectors": ["description"], + "marking_ref": MARKING_IDS[1] + }, + ], + **MALWARE_KWARGS ), ]) -def test_remove_marking_remove_one_selector_with_multiple_refs(data): - before = markings.remove_markings(data[0], data[1], ["description"]) +def test_remove_marking_remove_one_selector_with_multiple_refs(before): + before = markings.remove_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], ["description"]) assert "granular_markings" not in before diff --git a/stix2/test/test_markings.py b/stix2/test/test_markings.py index 456bf92..0c6069a 100644 --- a/stix2/test/test_markings.py +++ b/stix2/test/test_markings.py @@ -241,14 +241,4 @@ def test_marking_wrong_type_construction(): assert str(excinfo.value) == "Must supply a list, containing tuples. For example, [('property1', IntegerProperty())]" -def test_campaign_add_markings(): - campaign = stix2.Campaign( - id="campaign--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f", - created_by_ref="identity--f431f809-377b-45e0-aa1c-6a4751cae5ff", - created="2016-04-06T20:03:00Z", - modified="2016-04-06T20:03:00Z", - name="Green Group Attacks Against Finance", - description="Campaign by Green Group against a series of targets in the financial services sector.", - ) - campaign = campaign.add_markings(TLP_WHITE) - assert campaign.object_marking_refs[0] == TLP_WHITE.id +# TODO: Add other examples diff --git a/stix2/test/test_object_markings.py b/stix2/test/test_object_markings.py index 10949ab..36e8e4d 100644 --- a/stix2/test/test_object_markings.py +++ b/stix2/test/test_object_markings.py @@ -1,7 +1,7 @@ import pytest -from stix2 import TLP_AMBER, Malware, exceptions, markings +from stix2 import Malware, exceptions, markings from .constants import FAKE_TIME, MALWARE_ID, MARKING_IDS from .constants import MALWARE_KWARGS as MALWARE_KWARGS_CONST @@ -21,26 +21,18 @@ MALWARE_KWARGS.update({ Malware(**MALWARE_KWARGS), Malware(object_marking_refs=[MARKING_IDS[0]], **MALWARE_KWARGS), - MARKING_IDS[0], ), ( MALWARE_KWARGS, dict(object_marking_refs=[MARKING_IDS[0]], **MALWARE_KWARGS), - MARKING_IDS[0], - ), - ( - Malware(**MALWARE_KWARGS), - Malware(object_marking_refs=[TLP_AMBER.id], - **MALWARE_KWARGS), - TLP_AMBER, ), ]) def test_add_markings_one_marking(data): before = data[0] after = data[1] - before = markings.add_markings(before, data[2], None) + before = markings.add_markings(before, MARKING_IDS[0], None) for m in before["object_marking_refs"]: assert m in after["object_marking_refs"] @@ -288,28 +280,19 @@ def test_remove_markings_object_level(data): **MALWARE_KWARGS), Malware(object_marking_refs=[MARKING_IDS[1]], **MALWARE_KWARGS), - [MARKING_IDS[0], MARKING_IDS[2]], ), ( dict(object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], MARKING_IDS[2]], **MALWARE_KWARGS), dict(object_marking_refs=[MARKING_IDS[1]], **MALWARE_KWARGS), - [MARKING_IDS[0], MARKING_IDS[2]], - ), - ( - Malware(object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], TLP_AMBER.id], - **MALWARE_KWARGS), - Malware(object_marking_refs=[MARKING_IDS[1]], - **MALWARE_KWARGS), - [MARKING_IDS[0], TLP_AMBER], ), ]) def test_remove_markings_multiple(data): before = data[0] after = data[1] - before = markings.remove_markings(before, data[2], None) + before = markings.remove_markings(before, [MARKING_IDS[0], MARKING_IDS[2]], None) assert before['object_marking_refs'] == after['object_marking_refs'] diff --git a/stix2/test/test_pattern_expressions.py b/stix2/test/test_pattern_expressions.py index 0db1083..e806aa6 100644 --- a/stix2/test/test_pattern_expressions.py +++ b/stix2/test/test_pattern_expressions.py @@ -170,8 +170,3 @@ def test_set_op(): exp = stix2.ObservationExpression(stix2.IsSubsetComparisonExpression("network-traffic:dst_ref.value", "2001:0db8:dead:beef:0000:0000:0000:0000/64")) assert str(exp) == "[network-traffic:dst_ref.value ISSUBSET '2001:0db8:dead:beef:0000:0000:0000:0000/64']" - - -def test_timestamp(): - ts = stix2.TimestampConstant('2014-01-13T07:03:17Z') - assert str(ts) == "t'2014-01-13T07:03:17Z'"
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 12 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 antlr4-python3-runtime==4.9.3 appdirs==1.4.4 attrs==21.4.0 Babel==2.11.0 bump2version==1.0.1 bumpversion==0.6.0 certifi==2021.5.30 cfgv==3.3.1 charset-normalizer==2.0.12 colorama==0.4.5 coverage==6.2 cpe==1.3.1 distlib==0.3.9 docutils==0.18.1 filelock==3.4.1 identify==2.4.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.2.3 iniconfig==1.1.1 itsdangerous==2.0.1 Jinja2==3.0.3 jsonpointer==2.3 jsonschema==3.2.0 MarkupSafe==2.0.1 nodeenv==1.6.0 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 pre-commit==2.17.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 requests==2.27.1 requests-cache==0.7.5 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 simplejson==3.20.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-prompt==1.5.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 -e git+https://github.com/oasis-open/cti-python-stix2.git@58f39f80af5cbfe02879c2efa4b3b4ef7a504390#egg=stix2 stix2-patterns==2.0.0 stix2-validator==3.0.2 taxii2-client==2.3.0 toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 url-normalize==1.4.3 urllib3==1.26.20 virtualenv==20.16.2 webcolors==1.11.1 zipp==3.6.0
name: cti-python-stix2 channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - antlr4-python3-runtime==4.9.3 - appdirs==1.4.4 - attrs==21.4.0 - babel==2.11.0 - bump2version==1.0.1 - bumpversion==0.6.0 - cfgv==3.3.1 - charset-normalizer==2.0.12 - colorama==0.4.5 - coverage==6.2 - cpe==1.3.1 - distlib==0.3.9 - docutils==0.18.1 - filelock==3.4.1 - identify==2.4.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.2.3 - iniconfig==1.1.1 - itsdangerous==2.0.1 - jinja2==3.0.3 - jsonpointer==2.3 - jsonschema==3.2.0 - markupsafe==2.0.1 - nodeenv==1.6.0 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - pre-commit==2.17.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - requests==2.27.1 - requests-cache==0.7.5 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 - simplejson==3.20.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-prompt==1.5.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - stix2-patterns==2.0.0 - stix2-validator==3.0.2 - taxii2-client==2.3.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - url-normalize==1.4.3 - urllib3==1.26.20 - virtualenv==20.16.2 - webcolors==1.11.1 - zipp==3.6.0 prefix: /opt/conda/envs/cti-python-stix2
[ "stix2/test/test_data_sources.py::test_filesytem_source" ]
[ "stix2/test/test_data_sources.py::test_filesystem_sink", "stix2/test/test_data_sources.py::test_filesystem_store" ]
[ "stix2/test/test_data_sources.py::test_ds_abstract_class_smoke", "stix2/test/test_data_sources.py::test_memory_store_smoke", "stix2/test/test_data_sources.py::test_ds_taxii", "stix2/test/test_data_sources.py::test_ds_taxii_name", "stix2/test/test_data_sources.py::test_parse_taxii_filters", "stix2/test/test_data_sources.py::test_add_get_remove_filter", "stix2/test/test_data_sources.py::test_apply_common_filters", "stix2/test/test_data_sources.py::test_filters0", "stix2/test/test_data_sources.py::test_filters1", "stix2/test/test_data_sources.py::test_filters2", "stix2/test/test_data_sources.py::test_filters3", "stix2/test/test_data_sources.py::test_filters4", "stix2/test/test_data_sources.py::test_filters5", "stix2/test/test_data_sources.py::test_deduplicate", "stix2/test/test_data_sources.py::test_add_remove_composite_datasource", "stix2/test/test_data_sources.py::test_composite_datasource_operations", "stix2/test/test_granular_markings.py::test_add_marking_mark_one_selector_multiple_refs", "stix2/test/test_granular_markings.py::test_add_marking_mark_multiple_selector_one_refs[data0]", "stix2/test/test_granular_markings.py::test_add_marking_mark_multiple_selector_one_refs[data1]", "stix2/test/test_granular_markings.py::test_add_marking_mark_multiple_selector_multiple_refs", "stix2/test/test_granular_markings.py::test_add_marking_mark_another_property_same_marking", "stix2/test/test_granular_markings.py::test_add_marking_mark_same_property_same_marking", "stix2/test/test_granular_markings.py::test_add_marking_bad_selector[data0-marking0]", "stix2/test/test_granular_markings.py::test_get_markings_smoke[data0]", "stix2/test/test_granular_markings.py::test_get_markings_not_marked[data0]", "stix2/test/test_granular_markings.py::test_get_markings_not_marked[data1]", "stix2/test/test_granular_markings.py::test_get_markings_multiple_selectors[data0]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data0-foo]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data1-]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data2-selector2]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data3-selector3]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data4-x.z.[-2]]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data5-c.f]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data6-c.[2].i]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data7-c.[3]]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data8-d]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data9-x.[0]]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data10-z.y.w]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data11-x.z.[1]]", "stix2/test/test_granular_markings.py::test_get_markings_bad_selector[data12-x.z.foo3]", "stix2/test/test_granular_markings.py::test_get_markings_positional_arguments_combinations[data0]", "stix2/test/test_granular_markings.py::test_remove_marking_remove_one_selector_with_multiple_refs[before0]", "stix2/test/test_granular_markings.py::test_remove_marking_remove_one_selector_with_multiple_refs[before1]", "stix2/test/test_granular_markings.py::test_remove_marking_remove_multiple_selector_one_ref", "stix2/test/test_granular_markings.py::test_remove_marking_mark_one_selector_from_multiple_ones", "stix2/test/test_granular_markings.py::test_remove_marking_mark_one_selector_markings_from_multiple_ones", "stix2/test/test_granular_markings.py::test_remove_marking_mark_mutilple_selector_multiple_refs", "stix2/test/test_granular_markings.py::test_remove_marking_mark_another_property_same_marking", "stix2/test/test_granular_markings.py::test_remove_marking_mark_same_property_same_marking", "stix2/test/test_granular_markings.py::test_remove_no_markings", "stix2/test/test_granular_markings.py::test_remove_marking_bad_selector", "stix2/test/test_granular_markings.py::test_is_marked_smoke[data0]", "stix2/test/test_granular_markings.py::test_is_marked_smoke[data1]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data0-foo]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data1-]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data2-selector2]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data3-selector3]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data4-x.z.[-2]]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data5-c.f]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data6-c.[2].i]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data7-c.[3]]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data8-d]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data9-x.[0]]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data10-z.y.w]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data11-x.z.[1]]", "stix2/test/test_granular_markings.py::test_is_marked_invalid_selector[data12-x.z.foo3]", "stix2/test/test_granular_markings.py::test_is_marked_mix_selector[data0]", "stix2/test/test_granular_markings.py::test_is_marked_mix_selector[data1]", "stix2/test/test_granular_markings.py::test_is_marked_valid_selector_no_refs[data0]", "stix2/test/test_granular_markings.py::test_is_marked_valid_selector_no_refs[data1]", "stix2/test/test_granular_markings.py::test_is_marked_valid_selector_and_refs[data0]", "stix2/test/test_granular_markings.py::test_is_marked_valid_selector_and_refs[data1]", "stix2/test/test_granular_markings.py::test_is_marked_valid_selector_multiple_refs[data0]", "stix2/test/test_granular_markings.py::test_is_marked_valid_selector_multiple_refs[data1]", "stix2/test/test_granular_markings.py::test_is_marked_no_marking_refs[data0]", "stix2/test/test_granular_markings.py::test_is_marked_no_marking_refs[data1]", "stix2/test/test_granular_markings.py::test_is_marked_no_selectors[data0]", "stix2/test/test_granular_markings.py::test_is_marked_no_selectors[data1]", "stix2/test/test_granular_markings.py::test_is_marked_positional_arguments_combinations", "stix2/test/test_granular_markings.py::test_create_sdo_with_invalid_marking", "stix2/test/test_granular_markings.py::test_set_marking_mark_one_selector_multiple_refs", "stix2/test/test_granular_markings.py::test_set_marking_mark_multiple_selector_one_refs", "stix2/test/test_granular_markings.py::test_set_marking_mark_multiple_selector_multiple_refs_from_none", "stix2/test/test_granular_markings.py::test_set_marking_mark_another_property_same_marking", "stix2/test/test_granular_markings.py::test_set_marking_bad_selector[marking0]", "stix2/test/test_granular_markings.py::test_set_marking_bad_selector[marking1]", "stix2/test/test_granular_markings.py::test_set_marking_bad_selector[marking2]", "stix2/test/test_granular_markings.py::test_set_marking_bad_selector[marking3]", "stix2/test/test_granular_markings.py::test_set_marking_mark_same_property_same_marking", "stix2/test/test_granular_markings.py::test_clear_marking_smoke[data0]", "stix2/test/test_granular_markings.py::test_clear_marking_smoke[data1]", "stix2/test/test_granular_markings.py::test_clear_marking_multiple_selectors[data0]", "stix2/test/test_granular_markings.py::test_clear_marking_multiple_selectors[data1]", "stix2/test/test_granular_markings.py::test_clear_marking_one_selector[data0]", "stix2/test/test_granular_markings.py::test_clear_marking_one_selector[data1]", "stix2/test/test_granular_markings.py::test_clear_marking_all_selectors[data0]", "stix2/test/test_granular_markings.py::test_clear_marking_all_selectors[data1]", "stix2/test/test_granular_markings.py::test_clear_marking_bad_selector[data0-foo]", "stix2/test/test_granular_markings.py::test_clear_marking_bad_selector[data1-]", "stix2/test/test_granular_markings.py::test_clear_marking_bad_selector[data2-selector2]", "stix2/test/test_granular_markings.py::test_clear_marking_bad_selector[data3-selector3]", "stix2/test/test_markings.py::test_marking_def_example_with_tlp", "stix2/test/test_markings.py::test_marking_def_example_with_statement_positional_argument", "stix2/test/test_markings.py::test_marking_def_example_with_kwargs_statement", "stix2/test/test_markings.py::test_marking_def_invalid_type", "stix2/test/test_markings.py::test_campaign_with_markings_example", "stix2/test/test_markings.py::test_granular_example", "stix2/test/test_markings.py::test_granular_example_with_bad_selector", "stix2/test/test_markings.py::test_campaign_with_granular_markings_example", "stix2/test/test_markings.py::test_parse_marking_definition[{\\n", "stix2/test/test_markings.py::test_parse_marking_definition[data1]", "stix2/test/test_markings.py::test_registered_custom_marking", "stix2/test/test_markings.py::test_not_registered_marking_raises_exception", "stix2/test/test_markings.py::test_marking_wrong_type_construction", "stix2/test/test_object_markings.py::test_add_markings_one_marking[data0]", "stix2/test/test_object_markings.py::test_add_markings_one_marking[data1]", "stix2/test/test_object_markings.py::test_add_markings_multiple_marking", "stix2/test/test_object_markings.py::test_add_markings_combination", "stix2/test/test_object_markings.py::test_add_markings_bad_markings[data0]", "stix2/test/test_object_markings.py::test_add_markings_bad_markings[]", "stix2/test/test_object_markings.py::test_add_markings_bad_markings[data2]", "stix2/test/test_object_markings.py::test_add_markings_bad_markings[data3]", "stix2/test/test_object_markings.py::test_get_markings_object_marking[data0]", "stix2/test/test_object_markings.py::test_get_markings_object_and_granular_combinations[data0]", "stix2/test/test_object_markings.py::test_remove_markings_object_level[data0]", "stix2/test/test_object_markings.py::test_remove_markings_object_level[data1]", "stix2/test/test_object_markings.py::test_remove_markings_multiple[data0]", "stix2/test/test_object_markings.py::test_remove_markings_multiple[data1]", "stix2/test/test_object_markings.py::test_remove_markings_bad_markings", "stix2/test/test_object_markings.py::test_clear_markings[data0]", "stix2/test/test_object_markings.py::test_clear_markings[data1]", "stix2/test/test_object_markings.py::test_is_marked_object_and_granular_combinations", "stix2/test/test_object_markings.py::test_is_marked_no_markings[data0]", "stix2/test/test_object_markings.py::test_is_marked_no_markings[data1]", "stix2/test/test_object_markings.py::test_set_marking", "stix2/test/test_object_markings.py::test_set_marking_bad_input[data0]", "stix2/test/test_object_markings.py::test_set_marking_bad_input[data1]", "stix2/test/test_object_markings.py::test_set_marking_bad_input[]", "stix2/test/test_object_markings.py::test_set_marking_bad_input[data3]", "stix2/test/test_pattern_expressions.py::test_create_comparison_expression", "stix2/test/test_pattern_expressions.py::test_boolean_expression", "stix2/test/test_pattern_expressions.py::test_boolean_expression_with_parentheses", "stix2/test/test_pattern_expressions.py::test_hash_followed_by_registryKey_expression_python_constant", "stix2/test/test_pattern_expressions.py::test_hash_followed_by_registryKey_expression", "stix2/test/test_pattern_expressions.py::test_file_observable_expression", "stix2/test/test_pattern_expressions.py::test_multiple_file_observable_expression", "stix2/test/test_pattern_expressions.py::test_root_types", "stix2/test/test_pattern_expressions.py::test_artifact_payload", "stix2/test/test_pattern_expressions.py::test_greater_than_python_constant", "stix2/test/test_pattern_expressions.py::test_greater_than", "stix2/test/test_pattern_expressions.py::test_and_observable_expression", "stix2/test/test_pattern_expressions.py::test_hex", "stix2/test/test_pattern_expressions.py::test_multiple_qualifiers", "stix2/test/test_pattern_expressions.py::test_set_op" ]
[]
BSD 3-Clause "New" or "Revised" License
1,738
[ "stix2/common.py", "stix2/sdo.py", "stix2/sources/filters.py", "stix2/sources/taxii.py", "stix2/patterns.py", "stix2/markings/__init__.py", "stix2/sro.py", "stix2/markings/granular_markings.py", "stix2/__init__.py", "stix2/markings/utils.py", "stix2/sources/filesystem.py", "stix2/markings/object_markings.py" ]
[ "stix2/common.py", "stix2/sdo.py", "stix2/sources/filters.py", "stix2/sources/taxii.py", "stix2/patterns.py", "stix2/markings/__init__.py", "stix2/sro.py", "stix2/markings/granular_markings.py", "stix2/__init__.py", "stix2/markings/utils.py", "stix2/sources/filesystem.py", "stix2/markings/object_markings.py" ]
OpenMined__PySyft-296
65b9352e9e349c7f932f52904d6f913a09f9a025
2017-10-06 15:20:15
06ce023225dd613d8fb14ab2046135b93ab22376
samuxiii: Ok, I've added slices into the index as a possible type of parameter. But.. I don't understand the comment about index_add_, I've almost used the same pytorch example as my unit test. Please, could you be more explicit? Thanks bharathgs: @samuxiii ahh, my bad.did not see the `index_add_` properly. It looks good.
diff --git a/syft/tensor.py b/syft/tensor.py index f5313aa491..719ae2d409 100644 --- a/syft/tensor.py +++ b/syft/tensor.py @@ -2848,6 +2848,97 @@ class TensorBase(object): self.data = np.remainder(self.data, divisor) return self + def index(self, m): + """ + Returns a new Tensor with the element selected by position + + :param m: integer index or slice + :return: tensor of selected indices + """ + if self.encrypted: + return NotImplemented + if not isinstance(m, int) and not isinstance(m, slice): + raise ValueError("The value of index must be integer") + return TensorBase(self.data[m], self.encrypted) + + def index_add_(self, dim, index, tensor): + """ + Add the value of 'tensor' selecting the elements and ordered + by index. In-place operation. + + :param dim: dimension along which to index + :param index: 1D tensor containing the indices to select + :param tensor: tensor containing the values to add + """ + index = _ensure_tensorbase(index) + tensor = _ensure_tensorbase(tensor) + + if self.encrypted: + return NotImplemented + if index.data.dtype != np.dtype('int_'): + raise TypeError("The value of index must be integer") + if self.data.shape != tensor.data.shape: + raise IndexError("Tensor has different shape") + if self.data.shape[dim] != index.data.size: + raise ValueError("Index should have the same number of elements as dimension") + if np.argmax(index.data > self.data.shape[dim]) != 0: + raise ValueError("Index contains a value which is out of range") + if dim >= self.data.ndim or dim < -self.data.ndim: + raise IndexError("Dimension out of range") + + self.data += tensor.data.take(index, dim) + + def index_copy_(self, dim, index, tensor): + """ + Copy the values of 'tensor' selecting the elements and ordered + by index. In-place operation. + + :para dim: dimension along which to index + :param index: 1D tensor containing the indices to select + :param tensor: tensor containing the values to add + """ + index = _ensure_tensorbase(index) + tensor = _ensure_tensorbase(tensor) + + if self.encrypted: + return NotImplemented + if index.data.dtype != np.dtype('int_'): + raise TypeError("The value of index must be integer") + if self.data.shape != tensor.data.shape: + raise IndexError("Tensor has different shape") + if self.data.shape[dim] != index.data.size: + raise ValueError("Index should have the same number of elements as dimension") + if np.argmax(index.data > self.data.shape[dim]) != 0: + raise ValueError("Index contains a value which is out of range") + if dim >= self.data.ndim or dim < -self.data.ndim: + raise IndexError("Dimension out of range") + + np.copyto(self.data, tensor.data.take(index, dim)) + + def index_fill_(self, dim, index, value): + """ + Fill the original tensor with the values of 'tensor' selecting + the elements and ordered by index. In-place operation. + + :param dim: dimension along which to inde + :param index: 1D tensor containing the indices to select + :param value: value to fill + """ + index = _ensure_tensorbase(index) + + if self.encrypted: + return NotImplemented + if index.data.dtype != np.dtype('int_'): + raise TypeError("The value of index must be integer") + if np.argmax(index.data > self.data.shape[dim]) != 0: + raise ValueError("Index contains a value which is out of range") + if dim >= self.data.ndim or dim < -self.data.ndim: + raise IndexError("Dimension out of range") + + idx = [slice(None)] * self.data.ndim + idx[dim] = index + self.data[tuple(idx)] = value + def index_select(self, dim, index): """ Returns a new Tensor which indexes the ``input`` Tensor along
Implement Default index Functionality for Base Tensor Type **User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing operations on a Tensor of arbitrary type. For this ticket to be complete, index() and index_select() should return a new tensor and index_add_(), index_copy_(), and index_fill_() should perform the operation inline. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation. **Acceptance Criteria:** - If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error. - a unit test demonstrating the correct operation on the Base Tensor type implemented over int and float Tensors. - inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator.
OpenMined/PySyft
diff --git a/tests/test_tensor.py b/tests/test_tensor.py index 465370d309..bf91ebdc3a 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -4,6 +4,7 @@ import unittest from syft import tensor import numpy as np import math +import pytest # Here's our "unit tests". @@ -250,13 +251,6 @@ class EqualTests(unittest.TestCase): self.assertTrue(t1 != t2) -class IndexTests(unittest.TestCase): - def test_indexing(self): - t1 = TensorBase(np.array([1.2, 2, 3])) - self.assertEqual(1.2, t1[0]) - self.assertEqual(3, t1[-1]) - - class sigmoidTests(unittest.TestCase): def test_sigmoid(self): t1 = TensorBase(np.array([1.2, 3.3, 4])) @@ -954,7 +948,90 @@ class notEqualTests(unittest.TestCase): self.assertTrue(syft.equal(t1, TensorBase([1, 1, 1, 0]))) -class index_selectTests(unittest.TestCase): +class IndexTests(unittest.TestCase): + def test_indexing(self): + t1 = TensorBase(np.array([1.2, 2, 3])) + self.assertEqual(1.2, t1[0]) + self.assertEqual(3, t1[-1]) + + def test_index(self): + t = TensorBase(np.array([1, 2, 3.5, 4, 5, 6, 3.5])) + expected1 = TensorBase(np.array(2)) + expected2 = TensorBase(np.array(3.5)) + expected3 = TensorBase(np.array([4, 5, 6])) + + self.assertEqual(expected1, t.index(1)) + self.assertEqual(expected2, t.index(2)) + self.assertEqual(expected2, t.index(-1)) + self.assertEqual(expected3, t.index(slice(3, 6))) + with pytest.raises(ValueError): + t.index(3.5) + + def test_index_add_(self): + t1 = TensorBase(np.array([[0, 0, 0], [1, 1, 1], [1, 1, 1]])) + t2 = TensorBase(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + + expected_0 = TensorBase(np.array([[1, 2, 3], [8, 9, 10], [5, 6, 7]])) + t1.index_add_(0, [0, 2, 1], t2) + self.assertEqual(expected_0, t1) + + t1 = TensorBase(np.array([[0, 0, 0], [1, 1, 1], [1, 1, 1]])) + expected_1 = TensorBase(np.array([[1, 3, 2], [5, 7, 6], [8, 10, 9]])) + t1.index_add_(1, [0, 2, 1], t2) + self.assertEqual(expected_1, t1) + + with pytest.raises(TypeError): + t1.index_add_(0, [1.0, 2, 2], t2) + with pytest.raises(IndexError): + t1.index_add_(0, [0, 1, 2], TensorBase([1, 2])) + with pytest.raises(ValueError): + t1.index_add_(0, [0, 1], t2) + with pytest.raises(ValueError): + t1.index_add_(0, [0, 1, 5], t2) + with pytest.raises(IndexError): + t1.index_add_(4, [0, 1, 2], t2) + + def test_index_copy_(self): + t1 = TensorBase(np.array([[0, 0, 0], [1, 1, 1], [1, 1, 1]])) + t2 = TensorBase(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + expected_0 = TensorBase(np.array([[1, 2, 3], [7, 8, 9], [4, 5, 6]])) + t1.index_copy_(0, [0, 2, 1], t2) + self.assertEqual(expected_0, t1) + + t1 = TensorBase(np.array([[0, 0, 0], [1, 1, 1], [1, 1, 1]])) + expected_1 = TensorBase(np.array([[3, 1, 2], [6, 4, 5], [9, 7, 8]])) + t1.index_copy_(1, [2, 0, 1], t2) + self.assertEqual(expected_1, t1) + + with pytest.raises(TypeError): + t1.index_copy_(0, [1.0, 2, 2], t2) + with pytest.raises(IndexError): + t1.index_copy_(0, [0, 1, 2], TensorBase([1, 2])) + with pytest.raises(ValueError): + t1.index_copy_(0, [0, 1], t2) + with pytest.raises(ValueError): + t1.index_copy_(0, [0, 1, 5], t2) + with pytest.raises(IndexError): + t1.index_copy_(4, [0, 1, 2], t2) + + def test_index_fill_(self): + t1 = TensorBase(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + expected_0 = TensorBase(np.array([[1, 1, 1], [1, 1, 1], [7, 8, 9]])) + t1.index_fill_(0, [0, 1], 1) + self.assertEqual(expected_0, t1) + + t1 = TensorBase(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + expected_1 = TensorBase(np.array([[-2, 2, -2], [-2, 5, -2], [-2, 8, -2]])) + t1.index_fill_(1, [0, 2], -2) + self.assertEqual(expected_1, t1) + + with pytest.raises(TypeError): + t1.index_fill_(0, [1.0, 2, 2], 1) + with pytest.raises(ValueError): + t1.index_fill_(0, [0, 1, 5], 1) + with pytest.raises(IndexError): + t1.index_fill_(4, [0, 1, 2], 1) + def test_index_select(self): t = TensorBase(np.reshape(np.arange(0, 2 * 3 * 4), (2, 3, 4))) idx = np.array([1, 0])
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
PySyft/hydrogen
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-flake8" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
args==0.1.0 attrs==22.2.0 certifi==2021.5.30 clint==0.5.1 flake8==5.0.4 importlib-metadata==4.2.0 iniconfig==1.1.1 joblib==1.1.1 line-profiler==4.1.3 mccabe==0.7.0 numpy==1.19.5 packaging==21.3 phe==1.5.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 pyparsing==3.1.4 pyRserve==1.0.4 pytest==7.0.1 pytest-flake8==1.1.1 scikit-learn==0.24.2 scipy==1.5.4 sklearn==0.0 -e git+https://github.com/OpenMined/PySyft.git@65b9352e9e349c7f932f52904d6f913a09f9a025#egg=syft threadpoolctl==3.1.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: PySyft channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - args==0.1.0 - attrs==22.2.0 - clint==0.5.1 - flake8==5.0.4 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - joblib==1.1.1 - line-profiler==4.1.3 - mccabe==0.7.0 - numpy==1.19.5 - packaging==21.3 - phe==1.5.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pyparsing==3.1.4 - pyrserve==1.0.4 - pytest==7.0.1 - pytest-flake8==1.1.1 - scikit-learn==0.24.2 - scipy==1.5.4 - sklearn==0.0 - threadpoolctl==3.1.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/PySyft
[ "tests/test_tensor.py::IndexTests::test_index", "tests/test_tensor.py::IndexTests::test_index_add_", "tests/test_tensor.py::IndexTests::test_index_copy_", "tests/test_tensor.py::IndexTests::test_index_fill_" ]
[]
[ "tests/test_tensor.py::DimTests::test_as_view", "tests/test_tensor.py::DimTests::test_dim_one", "tests/test_tensor.py::DimTests::test_nelement", "tests/test_tensor.py::DimTests::test_resize", "tests/test_tensor.py::DimTests::test_resize_as", "tests/test_tensor.py::DimTests::test_size", "tests/test_tensor.py::DimTests::test_view", "tests/test_tensor.py::DiagTests::test_one_dim_tensor_below_diag", "tests/test_tensor.py::DiagTests::test_one_dim_tensor_main_diag", "tests/test_tensor.py::DiagTests::test_one_dim_tensor_upper_diag", "tests/test_tensor.py::DiagTests::test_two_dim_tensor_below_diag", "tests/test_tensor.py::DiagTests::test_two_dim_tensor_main_diag", "tests/test_tensor.py::DiagTests::test_two_dim_tensor_upper_diag", "tests/test_tensor.py::AddTests::test_inplace", "tests/test_tensor.py::AddTests::test_scalar", "tests/test_tensor.py::AddTests::test_simple", "tests/test_tensor.py::CeilTests::test_ceil", "tests/test_tensor.py::CeilTests::test_ceil_", "tests/test_tensor.py::ZeroTests::test_zero", "tests/test_tensor.py::FloorTests::test_floor_", "tests/test_tensor.py::SubTests::test_inplace", "tests/test_tensor.py::SubTests::test_scalar", "tests/test_tensor.py::SubTests::test_simple", "tests/test_tensor.py::MaxTests::test_axis", "tests/test_tensor.py::MaxTests::test_no_dim", "tests/test_tensor.py::MultTests::test_inplace", "tests/test_tensor.py::MultTests::test_scalar", "tests/test_tensor.py::MultTests::test_simple", "tests/test_tensor.py::DivTests::test_inplace", "tests/test_tensor.py::DivTests::test_scalar", "tests/test_tensor.py::DivTests::test_simple", "tests/test_tensor.py::AbsTests::test_abs", "tests/test_tensor.py::AbsTests::test_abs_", "tests/test_tensor.py::ShapeTests::test_shape", "tests/test_tensor.py::SqrtTests::test_sqrt", "tests/test_tensor.py::SqrtTests::test_sqrt_", "tests/test_tensor.py::SumTests::test_dim_is_not_none_int", "tests/test_tensor.py::SumTests::test_dim_none_int", "tests/test_tensor.py::EqualTests::test_equal", "tests/test_tensor.py::EqualTests::test_equal_operation", "tests/test_tensor.py::EqualTests::test_inequality_operation", "tests/test_tensor.py::EqualTests::test_not_equal", "tests/test_tensor.py::sigmoidTests::test_sigmoid", "tests/test_tensor.py::addmm::test_addmm_1d", "tests/test_tensor.py::addmm::test_addmm_2d", "tests/test_tensor.py::addmm::test_addmm__1d", "tests/test_tensor.py::addmm::test_addmm__2d", "tests/test_tensor.py::addcmulTests::test_addcmul_1d", "tests/test_tensor.py::addcmulTests::test_addcmul_2d", "tests/test_tensor.py::addcmulTests::test_addcmul__1d", "tests/test_tensor.py::addcmulTests::test_addcmul__2d", "tests/test_tensor.py::addcdivTests::test_addcdiv_1d", "tests/test_tensor.py::addcdivTests::test_addcdiv_2d", "tests/test_tensor.py::addcdivTests::test_addcdiv__1d", "tests/test_tensor.py::addcdivTests::test_addcdiv__2d", "tests/test_tensor.py::addmvTests::test_addmv", "tests/test_tensor.py::addmvTests::test_addmv_", "tests/test_tensor.py::bmmTests::test_bmm", "tests/test_tensor.py::bmmTests::test_bmm_size", "tests/test_tensor.py::addbmmTests::test_addbmm", "tests/test_tensor.py::addbmmTests::test_addbmm_", "tests/test_tensor.py::baddbmmTests::test_baddbmm", "tests/test_tensor.py::baddbmmTests::test_baddbmm_", "tests/test_tensor.py::transposeTests::test_t", "tests/test_tensor.py::transposeTests::test_transpose", "tests/test_tensor.py::transposeTests::test_transpose_", "tests/test_tensor.py::unsqueezeTests::test_unsqueeze", "tests/test_tensor.py::unsqueezeTests::test_unsqueeze_", "tests/test_tensor.py::expTests::test_exp", "tests/test_tensor.py::expTests::test_exp_", "tests/test_tensor.py::fracTests::test_frac", "tests/test_tensor.py::fracTests::test_frac_", "tests/test_tensor.py::rsqrtTests::test_rsqrt", "tests/test_tensor.py::rsqrtTests::test_rsqrt_", "tests/test_tensor.py::signTests::test_sign", "tests/test_tensor.py::signTests::test_sign_", "tests/test_tensor.py::numpyTests::test_numpy", "tests/test_tensor.py::reciprocalTests::test_reciprocal", "tests/test_tensor.py::reciprocalTests::test_reciprocal_", "tests/test_tensor.py::logTests::test_log", "tests/test_tensor.py::logTests::test_log_", "tests/test_tensor.py::logTests::test_log_1p", "tests/test_tensor.py::logTests::test_log_1p_", "tests/test_tensor.py::clampTests::test_clamp_float", "tests/test_tensor.py::clampTests::test_clamp_float_in_place", "tests/test_tensor.py::clampTests::test_clamp_int", "tests/test_tensor.py::clampTests::test_clamp_int_in_place", "tests/test_tensor.py::cloneTests::test_clone", "tests/test_tensor.py::chunkTests::test_chunk", "tests/test_tensor.py::chunkTests::test_chunk_same_size", "tests/test_tensor.py::gtTests::test_gt__in_place_with_number", "tests/test_tensor.py::gtTests::test_gt__in_place_with_tensor", "tests/test_tensor.py::gtTests::test_gt_with_encrypted", "tests/test_tensor.py::gtTests::test_gt_with_number", "tests/test_tensor.py::gtTests::test_gt_with_tensor", "tests/test_tensor.py::geTests::test_ge__in_place_with_number", "tests/test_tensor.py::geTests::test_ge__in_place_with_tensor", "tests/test_tensor.py::geTests::test_ge_with_encrypted", "tests/test_tensor.py::geTests::test_ge_with_number", "tests/test_tensor.py::geTests::test_ge_with_tensor", "tests/test_tensor.py::ltTests::test_lt__in_place_with_number", "tests/test_tensor.py::ltTests::test_lt__in_place_with_tensor", "tests/test_tensor.py::ltTests::test_lt_with_encrypted", "tests/test_tensor.py::ltTests::test_lt_with_number", "tests/test_tensor.py::ltTests::test_lt_with_tensor", "tests/test_tensor.py::leTests::test_le__in_place_with_number", "tests/test_tensor.py::leTests::test_le__in_place_with_tensor", "tests/test_tensor.py::leTests::test_le_with_encrypted", "tests/test_tensor.py::leTests::test_le_with_number", "tests/test_tensor.py::leTests::test_le_with_tensor", "tests/test_tensor.py::bernoulliTests::test_bernoulli", "tests/test_tensor.py::bernoulliTests::test_bernoulli_", "tests/test_tensor.py::cauchyTests::test_cauchy_", "tests/test_tensor.py::uniformTests::test_uniform", "tests/test_tensor.py::uniformTests::test_uniform_", "tests/test_tensor.py::geometricTests::test_geometric_", "tests/test_tensor.py::normalTests::test_normal", "tests/test_tensor.py::normalTests::test_normal_", "tests/test_tensor.py::fillTests::test_fill_", "tests/test_tensor.py::topkTests::test_topK", "tests/test_tensor.py::tolistTests::test_to_list", "tests/test_tensor.py::traceTests::test_trace", "tests/test_tensor.py::roundTests::test_round", "tests/test_tensor.py::roundTests::test_round_", "tests/test_tensor.py::repeatTests::test_repeat", "tests/test_tensor.py::powTests::test_pow", "tests/test_tensor.py::powTests::test_pow_", "tests/test_tensor.py::negTests::test_neg", "tests/test_tensor.py::negTests::test_neg_", "tests/test_tensor.py::tanhTests::test_tanh_", "tests/test_tensor.py::prodTests::test_prod", "tests/test_tensor.py::randomTests::test_random_", "tests/test_tensor.py::nonzeroTests::test_non_zero", "tests/test_tensor.py::cumprodTest::test_cumprod", "tests/test_tensor.py::cumprodTest::test_cumprod_", "tests/test_tensor.py::splitTests::test_split", "tests/test_tensor.py::squeezeTests::test_squeeze", "tests/test_tensor.py::expandAsTests::test_expand_as", "tests/test_tensor.py::meanTests::test_mean", "tests/test_tensor.py::notEqualTests::test_ne", "tests/test_tensor.py::notEqualTests::test_ne_", "tests/test_tensor.py::IndexTests::test_index_select", "tests/test_tensor.py::IndexTests::test_indexing", "tests/test_tensor.py::gatherTests::test_gather_numerical_1", "tests/test_tensor.py::gatherTests::test_gather_numerical_2", "tests/test_tensor.py::scatterTests::test_scatter_dim_out_Of_range", "tests/test_tensor.py::scatterTests::test_scatter_index_out_of_range", "tests/test_tensor.py::scatterTests::test_scatter_index_src_dimension_mismatch", "tests/test_tensor.py::scatterTests::test_scatter_index_type", "tests/test_tensor.py::scatterTests::test_scatter_numerical_0", "tests/test_tensor.py::scatterTests::test_scatter_numerical_1", "tests/test_tensor.py::scatterTests::test_scatter_numerical_2", "tests/test_tensor.py::scatterTests::test_scatter_numerical_3", "tests/test_tensor.py::scatterTests::test_scatter_numerical_4", "tests/test_tensor.py::scatterTests::test_scatter_numerical_5", "tests/test_tensor.py::scatterTests::test_scatter_numerical_6", "tests/test_tensor.py::remainderTests::test_remainder_", "tests/test_tensor.py::remainderTests::test_remainder_broadcasting", "tests/test_tensor.py::testMv::test_mv", "tests/test_tensor.py::testMv::test_mv_tensor", "tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_1", "tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_braodcasting_1", "tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_braodcasting_2", "tests/test_tensor.py::masked_fill_Tests::test_masked_fill_", "tests/test_tensor.py::masked_fill_Tests::test_masked_fill_broadcasting", "tests/test_tensor.py::masked_select_Tests::test_masked_select", "tests/test_tensor.py::masked_select_Tests::test_masked_select_broadcasting_1", "tests/test_tensor.py::masked_select_Tests::test_masked_select_broadcasting_2", "tests/test_tensor.py::masked_select_Tests::test_tensor_base_masked_select", "tests/test_tensor.py::eqTests::test_eq_in_place_with_number", "tests/test_tensor.py::eqTests::test_eq_in_place_with_tensor", "tests/test_tensor.py::eqTests::test_eq_with_number", "tests/test_tensor.py::eqTests::test_eq_with_tensor", "tests/test_tensor.py::mm_test::test_mm_1d", "tests/test_tensor.py::mm_test::test_mm_2d", "tests/test_tensor.py::mm_test::test_mm_3d", "tests/test_tensor.py::newTensorTests::test_encrypted_error", "tests/test_tensor.py::newTensorTests::test_return_new_float_tensor", "tests/test_tensor.py::newTensorTests::test_return_new_int_tensor", "tests/test_tensor.py::fmodTest::test_fmod_number", "tests/test_tensor.py::fmodTest::test_fmod_tensor", "tests/test_tensor.py::fmod_Test::test_fmod_number", "tests/test_tensor.py::fmod_Test::test_fmod_tensor" ]
[]
Apache License 2.0
1,739
[ "syft/tensor.py" ]
[ "syft/tensor.py" ]
serge-sans-paille__gast-14
a43dae94ba014a82cf9303cf827ba1f17bc036a8
2017-10-07 08:35:46
a43dae94ba014a82cf9303cf827ba1f17bc036a8
diff --git a/README.rst b/README.rst index 1a82f5c..1fea29c 100644 --- a/README.rst +++ b/README.rst @@ -151,6 +151,11 @@ ASDL | Import(alias* names) | ImportFrom(identifier? module, alias* names, int? level) + -- Doesn't capture requirement that locals must be + -- defined if globals is + -- still supports use as a function! + | Exec(expr body, expr? globals, expr? locals) + | Global(identifier* names) | Nonlocal(identifier* names) | Expr(expr value) @@ -180,8 +185,11 @@ ASDL -- x < 4 < 3 and (x < 4) < 3 | Compare(expr left, cmpop* ops, expr* comparators) | Call(expr func, expr* args, keyword* keywords) + | Repr(expr value) | Num(object n) -- a number as a PyObject. | Str(string s) -- need to specify raw, unicode, etc? + | FormattedValue(expr value, int? conversion, expr? format_spec) + | JoinedStr(expr* values) | Bytes(bytes s) | NameConstant(singleton value) | Ellipsis diff --git a/gast/gast.py b/gast/gast.py index f1bf062..fcbd656 100644 --- a/gast/gast.py +++ b/gast/gast.py @@ -132,6 +132,9 @@ _nodes = { (expr,)), 'Str': (('s',), ('lineno', 'col_offset',), (expr,)), + 'FormattedValue': (('value', 'conversion', 'format_spec',), + ('lineno', 'col_offset',), (expr,)), + 'JoinedStr': (('values',), ('lineno', 'col_offset',), (expr,)), 'Bytes': (('s',), ('lineno', 'col_offset',), (expr,)), 'NameConstant': (('value',), ('lineno', 'col_offset',), diff --git a/tox.ini b/tox.ini index 7f0c4e1..4a2a146 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py27,py30,py31,py32,py33,py34,py35 +envlist = py27,py30,py31,py32,py33,py34,py35,py36 [testenv] deps = astunparse
Python 3.6 f-strings are not supported by gast What happens with `gast`: ```sh $ python3.6 -c "import gast; print(gast.dump(gast.parse('f\'{x}\'')))" Module(body=[Expr(value=None)]) ``` What happens with standard `ast`: ```sh $ python3.6 -c "import ast; print(ast.dump(ast.parse('f\'{x}\'')))" Module(body=[Expr(value=JoinedStr(values=[FormattedValue(value=Name(id='x', ctx=Load()), conversion=-1, format_spec=None)]))]) ```
serge-sans-paille/gast
diff --git a/tests/test_compat.py b/tests/test_compat.py index 9faed17..fdf0b0d 100644 --- a/tests/test_compat.py +++ b/tests/test_compat.py @@ -62,6 +62,18 @@ class CompatTestCase(unittest.TestCase): tree = gast.parse(code) compile(gast.gast_to_ast(tree), '<test>', 'exec') + if sys.version_info.minor >= 6: + + def test_FormattedValue(self): + code = 'e = 1; f"{e}"' + tree = gast.parse(code) + compile(gast.gast_to_ast(tree), '<test>', 'exec') + + def test_JoinedStr(self): + code = 'e = 1; f"e = {e}"' + tree = gast.parse(code) + compile(gast.gast_to_ast(tree), '<test>', 'exec') + if __name__ == '__main__': unittest.main()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 3 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-pep8" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 execnet==1.9.0 -e git+https://github.com/serge-sans-paille/gast.git@a43dae94ba014a82cf9303cf827ba1f17bc036a8#egg=gast importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pep8==1.7.1 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-cache==1.0 pytest-pep8==1.0.6 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: gast channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - execnet==1.9.0 - pep8==1.7.1 - pytest-cache==1.0 - pytest-pep8==1.0.6 prefix: /opt/conda/envs/gast
[ "tests/test_compat.py::CompatTestCase::test_FormattedValue", "tests/test_compat.py::CompatTestCase::test_JoinedStr" ]
[]
[ "tests/test_compat.py::CompatTestCase::test_ArgAnnotation", "tests/test_compat.py::CompatTestCase::test_Call", "tests/test_compat.py::CompatTestCase::test_KeywordOnlyArgument", "tests/test_compat.py::CompatTestCase::test_TryExceptNamed" ]
[]
BSD 3-Clause "New" or "Revised" License
1,740
[ "README.rst", "tox.ini", "gast/gast.py" ]
[ "README.rst", "tox.ini", "gast/gast.py" ]
cwacek__python-jsonschema-objects-89
7da07c489b698fe1df72ee219e5fd52cd37dbbb1
2017-10-07 21:49:59
ba178ce7680e14e4ac367a6fab5ea3655396668f
diff --git a/python_jsonschema_objects/wrapper_types.py b/python_jsonschema_objects/wrapper_types.py index 2801199..7a211db 100644 --- a/python_jsonschema_objects/wrapper_types.py +++ b/python_jsonschema_objects/wrapper_types.py @@ -275,10 +275,16 @@ class ArrayWrapper(collections.MutableSequence): item_constraint = classbuilder.TypeProxy(type_array) + elif isdict and item_constraint.get('type') == 'object': + """ We need to create a ProtocolBase object for this anonymous definition""" + uri = "{0}_{1}".format(name, "<anonymous_list_type>") + item_constraint = klassbuilder.construct( + uri, item_constraint) + props['__itemtype__'] = item_constraint props.update(addl_constraints) validator = type(str(name), (ArrayWrapper,), props) - return validator \ No newline at end of file + return validator
Top-level array elements are not built into objects Here is a working example of an _internal_ array of objects: `example1.json`: ``` { "$schema": "http://json-schema.org/draft-04/schema", "title": "Example1", "type": "object", "properties": { "name": { "type": "array", "items": { "type": "object", "properties": { "name": {"type": "string"} } } } } } ``` ``` ns1 = pjs.ObjectBuilder("example1.json").build_classes() j1 = ns1.Example1.from_json(json.dumps({'name': [{'value':'foo'}, {'value':'bar'}]})) j1.name[0] # Out[164]: <name_<anonymous_field> name=foo> j1.name[0].value # 'foo' ``` However a _top-level_ array causes a problem because the array elements do not seem to be "objectified" but remain as plain dicts: `example2.json` ``` { "$schema": "http://json-schema.org/draft-04/schema", "title": "Example2", "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } } } } ``` ``` ns2 = pjs.ObjectBuilder("example2.json").build_classes() j2 = ns2.Example2.from_json(json.dumps([{'name': 'foo'}, {'name': 'bar'}])) j2[0] # Out[173]: {'name': 'foo'} type(j2[0]) # Out[179]: dict j2[0].name # AttributeError: 'dict' object has no attribute 'name' ``` `pjs._version.get_versions()` ``` {'dirty': False, 'error': None, 'full-revisionid': '71a6ad0becfb0b2bc2447015ec6ce90d7e3dc725', 'version': '0.2.2'} ```
cwacek/python-jsonschema-objects
diff --git a/test/test_regression_88.py b/test/test_regression_88.py new file mode 100644 index 0000000..7dd9268 --- /dev/null +++ b/test/test_regression_88.py @@ -0,0 +1,49 @@ +import pytest +import json + +import python_jsonschema_objects as pjs + + +def test_nested_arrays_work_fine(): + schema = { + "$schema": "http://json-schema.org/draft-04/schema", + "title": "Example1", + "type": "object", + "properties": { + "name": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"} + } + } + } + } + } + + ns1 = pjs.ObjectBuilder(schema).build_classes() + j1 = ns1.Example1.from_json(json.dumps({'name': [{'value':'foo'}, {'value':'bar'}]})) + assert j1.name[0].value == 'foo' + assert j1.name[1].value == 'bar' + + +def test_top_level_arrays_are_converted_to_objects_properly(): + schema = { + "$schema": "http://json-schema.org/draft-04/schema", + "title": "Example2", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"} + } + } + } + + ns2 = pjs.ObjectBuilder(schema).build_classes() + j2 = ns2.Example2.from_json(json.dumps([{'name': 'foo'}, {'name': 'bar'}])) + assert not isinstance(j2[0], dict) # Out[173]: {'name': 'foo'} + assert j2[0].name == 'foo' + assert j2[1].name == 'bar' +
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_git_commit_hash" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio" ], "pre_install": null, "python": "3.5", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 execnet==1.9.0 importlib-metadata==4.8.3 inflection==0.2.0 iniconfig==1.1.1 jsonschema==2.6.0 Markdown==2.4 packaging==21.3 pandocfilters==1.5.1 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 -e git+https://github.com/cwacek/python-jsonschema-objects.git@7da07c489b698fe1df72ee219e5fd52cd37dbbb1#egg=python_jsonschema_objects six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: python-jsonschema-objects channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - execnet==1.9.0 - importlib-metadata==4.8.3 - inflection==0.2.0 - iniconfig==1.1.1 - jsonschema==2.6.0 - markdown==2.4 - packaging==21.3 - pandocfilters==1.5.1 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/python-jsonschema-objects
[ "test/test_regression_88.py::test_top_level_arrays_are_converted_to_objects_properly" ]
[]
[ "test/test_regression_88.py::test_nested_arrays_work_fine" ]
[]
MIT License
1,741
[ "python_jsonschema_objects/wrapper_types.py" ]
[ "python_jsonschema_objects/wrapper_types.py" ]
cwacek__python-jsonschema-objects-90
7da07c489b698fe1df72ee219e5fd52cd37dbbb1
2017-10-07 22:27:10
ba178ce7680e14e4ac367a6fab5ea3655396668f
diff --git a/python_jsonschema_objects/examples/README.md b/python_jsonschema_objects/examples/README.md index 2136079..700c7b6 100644 --- a/python_jsonschema_objects/examples/README.md +++ b/python_jsonschema_objects/examples/README.md @@ -174,6 +174,60 @@ schemas are unique. } ``` +## Generating Multiple Top Level Objects + +Sometimes what you really want to do is define a couple +of different objects in a schema, and then be able to use +them flexibly. + +Any object built as a reference can be obtained from the top +level namespace. Thus, to obtain multiple top level classes, +define them separately in a definitions structure, then simply +make the top level schema refer to each of them as a `oneOf`. + +The schema and code example below show how this works. + +``` schema +{ + "title": "MultipleObjects", + "id": "foo", + "type": "object", + "oneOf":[ + {"$ref": "#/definitions/ErrorResponse"}, + {"$ref": "#/definitions/VersionGetResponse"} + ], + "definitions": { + "ErrorResponse": { + "title": "Error Response", + "id": "Error Response", + "type": "object", + "properties": { + "message": {"type": "string"}, + "status": {"type": "integer"} + }, + "required": ["message", "status"] + }, + "VersionGetResponse": { + "title": "Version Get Response", + "type": "object", + "properties": { + "local": {"type": "boolean"}, + "version": {"type": "string"} + }, + "required": ["version"] + } + } +} +``` + +``` python +>>> builder = pjs.ObjectBuilder('multiple_objects.json') +>>> classes = builder.build_classes() +>>> print(dir(classes)) +[u'ErrorResponse', 'Local', 'Message', u'Multipleobjects', +'Status', 'Version', u'VersionGetResponse'] +``` + ## Installation pip install python_jsonschema_objects
Request: example for multiple top-level objects in a schema The readme suggests that multiple objects can be extracted from a schema by `ObjectBuilder.build_classes`, however it's really not clear to me how one can define multiple objects within a single schema in a way where this would work. Can you please give an example of how you see this working? For example, given a schema that (somehow) contains the definition of two top-level objects, the expectation might be that the result of `ObjectBuilder(schema).build_classes()` is a namespace with classes for both objects. Does this require combination of schemas with the `anyOf` directive? Or is the intention that there would be a separate schema for each object, and therefore `ObjectBuilder(schema).build_classes()` would need to be called within a loop over all schemas? Example: ``` schema = { "id": "My Schema", "properties": { "ErrorResponse": { "title": "Error Response", "type": "object", "properties": { "message": {"type": "string"}, "status": {"type": "integer"}, }, "required": ["message", "status"], }, "VersionGetResponse": { "title": "Version Get Response", "type": "object", "properties": { "local": {"type": "boolean"}, "version": {"type": "string"}, }, "required": ["version"], } } } print(dir(pjs.ObjectBuilder(schema).build_classes())) # Out: ['ErrorResponse<anonymous>', 'Local', 'Message', 'Status', 'MySchema', 'Version', 'VersionGetResponse<anonymous>'] ``` In this case, the two useful classes have `<anonymous>` suffixes which makes them difficult (but not impossible) to use.
cwacek/python-jsonschema-objects
diff --git a/test/test_pytest.py b/test/test_pytest.py index 867b63f..4e52b6f 100644 --- a/test/test_pytest.py +++ b/test/test_pytest.py @@ -64,9 +64,8 @@ def test_build_classes_is_idempotent(): } } builder = pjs.ObjectBuilder(schema) + x = builder.build_classes() builder.build_classes() - builder.build_classes() - def test_underscore_properties(): diff --git a/test/test_regression_87.py b/test/test_regression_87.py new file mode 100644 index 0000000..523b62f --- /dev/null +++ b/test/test_regression_87.py @@ -0,0 +1,15 @@ +import pytest + +import python_jsonschema_objects as pjs + + +def test_multiple_objects_are_defined(markdown_examples): + builder = pjs.ObjectBuilder( + markdown_examples['MultipleObjects'], + resolved=markdown_examples) + + assert builder + classes = builder.build_classes() + assert 'ErrorResponse' in classes + assert 'VersionGetResponse' in classes + print(dir(classes))
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio" ], "pre_install": null, "python": "3.5", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 execnet==1.9.0 importlib-metadata==4.8.3 inflection==0.2.0 iniconfig==1.1.1 jsonschema==2.6.0 Markdown==2.4 packaging==21.3 pandocfilters==1.5.1 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 -e git+https://github.com/cwacek/python-jsonschema-objects.git@7da07c489b698fe1df72ee219e5fd52cd37dbbb1#egg=python_jsonschema_objects six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: python-jsonschema-objects channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - execnet==1.9.0 - importlib-metadata==4.8.3 - inflection==0.2.0 - iniconfig==1.1.1 - jsonschema==2.6.0 - markdown==2.4 - packaging==21.3 - pandocfilters==1.5.1 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/python-jsonschema-objects
[ "test/test_regression_87.py::test_multiple_objects_are_defined" ]
[]
[ "test/test_pytest.py::test_schema_validation", "test/test_pytest.py::test_regression_9", "test/test_pytest.py::test_build_classes_is_idempotent", "test/test_pytest.py::test_underscore_properties", "test/test_pytest.py::test_array_regressions", "test/test_pytest.py::test_arrays_can_have_reffed_items_of_mixed_type", "test/test_pytest.py::test_regression_39", "test/test_pytest.py::test_loads_markdown_schema_extraction", "test/test_pytest.py::test_object_builder_loads_memory_references", "test/test_pytest.py::test_object_builder_reads_all_definitions", "test/test_pytest.py::test_oneOf_validates_against_any_valid[{\"MyData\":", "test/test_pytest.py::test_oneOf_fails_against_non_matching", "test/test_pytest.py::test_oneOfBare_validates_against_any_valid[{\"MyAddress\":", "test/test_pytest.py::test_oneOfBare_validates_against_any_valid[{\"firstName\":", "test/test_pytest.py::test_oneOfBare_fails_against_non_matching", "test/test_pytest.py::test_additional_props_allowed_by_default", "test/test_pytest.py::test_additional_props_permitted_explicitly", "test/test_pytest.py::test_still_raises_when_accessing_undefined_attrs", "test/test_pytest.py::test_permits_deletion_of_additional_properties", "test/test_pytest.py::test_additional_props_disallowed_explicitly", "test/test_pytest.py::test_objects_can_be_empty", "test/test_pytest.py::test_object_equality_should_compare_data", "test/test_pytest.py::test_object_allows_attributes_in_oncstructor", "test/test_pytest.py::test_object_validates_on_json_decode", "test/test_pytest.py::test_object_validates_enumerations", "test/test_pytest.py::test_validation_of_mixed_type_enums", "test/test_pytest.py::test_objects_allow_non_required_attrs_to_be_missing", "test/test_pytest.py::test_objects_require_required_attrs_on_validate", "test/test_pytest.py::test_attribute_access_via_dict", "test/test_pytest.py::test_attribute_set_via_dict", "test/test_pytest.py::test_numeric_attribute_validation", "test/test_pytest.py::test_objects_validate_prior_to_serializing", "test/test_pytest.py::test_serializing_removes_null_objects", "test/test_pytest.py::test_lists_get_serialized_correctly", "test/test_pytest.py::test_dictionary_transformation[pdict0]", "test/test_pytest.py::test_dictionary_transformation[pdict1]", "test/test_pytest.py::test_strict_mode", "test/test_pytest.py::test_boolean_in_child_object" ]
[]
MIT License
1,742
[ "python_jsonschema_objects/examples/README.md" ]
[ "python_jsonschema_objects/examples/README.md" ]
scrapy__scrapy-2956
895df937a3a18683836ca9e228982e9ea5842aef
2017-10-09 15:11:56
886513c3751b92e42dcc8cb180d4c15a5a11ccaf
codecov[bot]: # [Codecov](https://codecov.io/gh/scrapy/scrapy/pull/2956?src=pr&el=h1) Report > Merging [#2956](https://codecov.io/gh/scrapy/scrapy/pull/2956?src=pr&el=desc) into [master](https://codecov.io/gh/scrapy/scrapy/commit/f729d74886be2290fcbbcaa21d366b770ff21008?src=pr&el=desc) will **decrease** coverage by `0.01%`. > The diff coverage is `71.42%`. ```diff @@ Coverage Diff @@ ## master #2956 +/- ## ========================================== - Coverage 84.59% 84.58% -0.02% ========================================== Files 164 164 Lines 9249 9255 +6 Branches 1376 1378 +2 ========================================== + Hits 7824 7828 +4 - Misses 1167 1168 +1 - Partials 258 259 +1 ``` | [Impacted Files](https://codecov.io/gh/scrapy/scrapy/pull/2956?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [scrapy/dupefilters.py](https://codecov.io/gh/scrapy/scrapy/pull/2956?src=pr&el=tree#diff-c2NyYXB5L2R1cGVmaWx0ZXJzLnB5) | `90.56% <100%> (+0.36%)` | :arrow_up: | | [scrapy/core/scheduler.py](https://codecov.io/gh/scrapy/scrapy/pull/2956?src=pr&el=tree#diff-c2NyYXB5L2NvcmUvc2NoZWR1bGVyLnB5) | `61.68% <60%> (-0.46%)` | :arrow_down: | johtso: Any update on this? kmike: @johtso thanks for the ping :) @elacuesta I think this PR looks good, it needs just a small testing tweak. It'd be also good to add a test for dupefilters without from_crawler/from_settings methods. What's the reason for supporting them, by the way? elacuesta: Hello Mikhail! I think I addressed your latest comments (setting the string to compare in the class itself, add a test for dupefilters created without from_crawler/from_settings methods). Not sure what would be the case for the directly created dupefilters, maybe someone needs to do some initialization but doesn't need the crawler nor the settings and just a child class with a custom constructor is enough? kmike: Thanks @elacuesta! Test coverage is not complete because tests still don't check that dupefilter without from_crawler / from_settings methods work: dupefilter you're using inherits from a base class which has these methods. elacuesta: I didn't realize that before, thanks! I changed the test case, the new class doesn't implement any of the dupefilter methods but I think that's not the point of the test, it's just to ensure the right class is used. elacuesta: Ping @kmike :innocent: kmike: This looks good 👍 However, I'd prefer to avoid copy-paste, and use the same function to create middlewares/extensions and dupefilters. Such function can be found in https://github.com/scrapy/scrapy/pull/1605, which is almost ready to merge as well; I wonder if you're up to finishing it, and using as a base for your PR :) elacuesta: Updated to use `scrapy.utils.misc.create_instance`. The diff shows unrelated changes but that should go away after merging #3348 dangra: hi @elacuesta, can you rebase now that #3348 is merged? thanks
diff --git a/scrapy/core/scheduler.py b/scrapy/core/scheduler.py index a54b4daf0..eb790a67e 100644 --- a/scrapy/core/scheduler.py +++ b/scrapy/core/scheduler.py @@ -4,7 +4,7 @@ import logging from os.path import join, exists from scrapy.utils.reqser import request_to_dict, request_from_dict -from scrapy.utils.misc import load_object +from scrapy.utils.misc import load_object, create_instance from scrapy.utils.job import job_dir logger = logging.getLogger(__name__) @@ -26,7 +26,7 @@ class Scheduler(object): def from_crawler(cls, crawler): settings = crawler.settings dupefilter_cls = load_object(settings['DUPEFILTER_CLASS']) - dupefilter = dupefilter_cls.from_settings(settings) + dupefilter = create_instance(dupefilter_cls, settings, crawler) pqclass = load_object(settings['SCHEDULER_PRIORITY_QUEUE']) dqclass = load_object(settings['SCHEDULER_DISK_QUEUE']) mqclass = load_object(settings['SCHEDULER_MEMORY_QUEUE'])
Give `BaseDupeFilter` access to spider-object I am in a situation where a single item gets defined over a sequence of multiple pages, passing values between the particular callbacks using the `meta`-dict. I believe this is a common approach among scrapy-users. However, it feels like this approach is difficult to get right. With the default implementation of `RFPDupefilter`, my callback-chain is teared apart quite easy, as fingerprints don't take the meta-dict into account. The corresponding requests are thrown away, the information in the meta-dict which made this request unique is lost. I have currently implemented by own meta-aware DupeFilter, but I am still facing the problem that it lacks access to the specific spider in use - and only the Spider really knows the meta-attributes that make a request unique. I could now take it a step further and implement my own scheduler, but I'm afraid that all these custom extensions make my code very brittle wrt future versions of scrapy.
scrapy/scrapy
diff --git a/tests/test_dupefilters.py b/tests/test_dupefilters.py index 2d1a4bfff..db69597a2 100644 --- a/tests/test_dupefilters.py +++ b/tests/test_dupefilters.py @@ -5,11 +5,60 @@ import shutil from scrapy.dupefilters import RFPDupeFilter from scrapy.http import Request +from scrapy.core.scheduler import Scheduler from scrapy.utils.python import to_bytes +from scrapy.utils.job import job_dir +from scrapy.utils.test import get_crawler + + +class FromCrawlerRFPDupeFilter(RFPDupeFilter): + + @classmethod + def from_crawler(cls, crawler): + debug = crawler.settings.getbool('DUPEFILTER_DEBUG') + df = cls(job_dir(crawler.settings), debug) + df.method = 'from_crawler' + return df + + +class FromSettingsRFPDupeFilter(RFPDupeFilter): + + @classmethod + def from_settings(cls, settings): + debug = settings.getbool('DUPEFILTER_DEBUG') + df = cls(job_dir(settings), debug) + df.method = 'from_settings' + return df + + +class DirectDupeFilter(object): + method = 'n/a' class RFPDupeFilterTest(unittest.TestCase): + def test_df_from_crawler_scheduler(self): + settings = {'DUPEFILTER_DEBUG': True, + 'DUPEFILTER_CLASS': __name__ + '.FromCrawlerRFPDupeFilter'} + crawler = get_crawler(settings_dict=settings) + scheduler = Scheduler.from_crawler(crawler) + self.assertTrue(scheduler.df.debug) + self.assertEqual(scheduler.df.method, 'from_crawler') + + def test_df_from_settings_scheduler(self): + settings = {'DUPEFILTER_DEBUG': True, + 'DUPEFILTER_CLASS': __name__ + '.FromSettingsRFPDupeFilter'} + crawler = get_crawler(settings_dict=settings) + scheduler = Scheduler.from_crawler(crawler) + self.assertTrue(scheduler.df.debug) + self.assertEqual(scheduler.df.method, 'from_settings') + + def test_df_direct_scheduler(self): + settings = {'DUPEFILTER_CLASS': __name__ + '.DirectDupeFilter'} + crawler = get_crawler(settings_dict=settings) + scheduler = Scheduler.from_crawler(crawler) + self.assertEqual(scheduler.df.method, 'n/a') + def test_filter(self): dupefilter = RFPDupeFilter() dupefilter.open()
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==25.3.0 Automat==24.8.1 cffi==1.17.1 constantly==23.10.4 coverage==7.8.0 cryptography==44.0.2 cssselect==1.3.0 exceptiongroup==1.2.2 execnet==2.1.1 hyperlink==21.0.0 idna==3.10 incremental==24.7.2 iniconfig==2.1.0 jmespath==1.0.1 lxml==5.3.1 packaging==24.2 parsel==1.10.0 pluggy==1.5.0 pyasn1==0.6.1 pyasn1_modules==0.4.2 pycparser==2.22 PyDispatcher==2.0.7 pyOpenSSL==25.0.0 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 queuelib==1.7.0 -e git+https://github.com/scrapy/scrapy.git@895df937a3a18683836ca9e228982e9ea5842aef#egg=Scrapy service-identity==24.2.0 six==1.17.0 tomli==2.2.1 Twisted==24.11.0 typing_extensions==4.13.0 w3lib==2.3.1 zope.interface==7.2
name: scrapy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==25.3.0 - automat==24.8.1 - cffi==1.17.1 - constantly==23.10.4 - coverage==7.8.0 - cryptography==44.0.2 - cssselect==1.3.0 - exceptiongroup==1.2.2 - execnet==2.1.1 - hyperlink==21.0.0 - idna==3.10 - incremental==24.7.2 - iniconfig==2.1.0 - jmespath==1.0.1 - lxml==5.3.1 - packaging==24.2 - parsel==1.10.0 - pluggy==1.5.0 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pycparser==2.22 - pydispatcher==2.0.7 - pyopenssl==25.0.0 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - queuelib==1.7.0 - service-identity==24.2.0 - six==1.17.0 - tomli==2.2.1 - twisted==24.11.0 - typing-extensions==4.13.0 - w3lib==2.3.1 - zope-interface==7.2 prefix: /opt/conda/envs/scrapy
[ "tests/test_dupefilters.py::RFPDupeFilterTest::test_df_direct_scheduler", "tests/test_dupefilters.py::RFPDupeFilterTest::test_df_from_crawler_scheduler" ]
[]
[ "tests/test_dupefilters.py::RFPDupeFilterTest::test_df_from_settings_scheduler", "tests/test_dupefilters.py::RFPDupeFilterTest::test_dupefilter_path", "tests/test_dupefilters.py::RFPDupeFilterTest::test_filter", "tests/test_dupefilters.py::RFPDupeFilterTest::test_request_fingerprint" ]
[]
BSD 3-Clause "New" or "Revised" License
1,743
[ "scrapy/core/scheduler.py" ]
[ "scrapy/core/scheduler.py" ]
mjs__imapclient-300
42e118739e0ccca49c372bab35574acc9ec7e502
2017-10-09 21:32:17
2abdac690fa653fa2d0d55b7617be24101597698
NicolasLM: Awesome! Can you add a changelog entry? mlorant: Changelog added (in 2.0.0, new section "Fixed")
diff --git a/doc/src/api.rst b/doc/src/api.rst index 3bbfebd..50a2a0e 100644 --- a/doc/src/api.rst +++ b/doc/src/api.rst @@ -21,8 +21,8 @@ are encountered during parsing. Exceptions ~~~~~~~~~~ -IMAPClient wraps exceptions raised by imaplib to ease the error handling. -All the exceptions related to IMAP errors are defined in the module +IMAPClient wraps exceptions raised by imaplib to ease the error handling. +All the exceptions related to IMAP errors are defined in the module `imapclient.exceptions`. The following general exceptions may be raised: * IMAPClientError: the base class for IMAPClient's exceptions and the @@ -51,13 +51,9 @@ malformed exception. In particular: * backports.ssl.CertificateError: raised when TLS certification verification fails. This is *not* a subclass of SSLError. -Utilities -~~~~~~~~~ -.. automodule:: imapclient.testable_imapclient - :members: TLS Support ~~~~~~~~~~~ .. automodule:: imapclient.tls - :members: + :members: \ No newline at end of file diff --git a/doc/src/releases.rst b/doc/src/releases.rst index 29e3ef7..2bc6734 100644 --- a/doc/src/releases.rst +++ b/doc/src/releases.rst @@ -6,7 +6,7 @@ Added ----- -- Connection and read/write operations timeout can now be distinct, +- Connection and read/write operations timeout can now be distinct, using `imapclient.SocketTimeout` namedtuple as `timeout` parameter. - A context manager is introduced to automatically close connections to remote servers. @@ -20,6 +20,12 @@ Changed - More precise exceptions available in `imapclient.exceptions` are raised when an error happens +Fixed +----- +- Modified UTF-7 encoding function had quirks in its original algorithm, + leading to incorrect encoded output in some cases. The algorithm, described + in RFC 3501, has been reimplemented to fix #187 and is better documented. + Other ----- - Drop support of OAUTH(1) diff --git a/imapclient/imap_utf7.py b/imapclient/imap_utf7.py index c689055..eeb5668 100644 --- a/imapclient/imap_utf7.py +++ b/imapclient/imap_utf7.py @@ -1,37 +1,17 @@ -# The contents of this file has been derived code from the Twisted project -# (http://twistedmatrix.com/). The original author is Jp Calderone. - -# Twisted project license follows: - -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: +# This file contains two main methods used to encode and decode UTF-7 +# string, described in the RFC 3501. There are some variations specific +# to IMAP4rev1, so the built-in Python UTF-7 codec can't be used instead. # -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - +# The main difference is the shift character (used to switch from ASCII to +# base64 encoding context), which is & in this modified UTF-7 convention, +# since + is considered as mainly used in mailbox names. +# Other variations and examples can be found in the RFC 3501, section 5.1.3. from __future__ import unicode_literals +import binascii from six import binary_type, text_type, byte2int, iterbytes, unichr -PRINTABLE = set(range(0x20, 0x26)) | set(range(0x27, 0x7f)) - -# TODO: module needs refactoring (e.g. variable names suck) - - def encode(s): """Encode a folder name using IMAP modified UTF-7 encoding. @@ -41,27 +21,36 @@ def encode(s): if not isinstance(s, text_type): return s - r = [] - _in = [] - - def extend_result_if_chars_buffered(): - if _in: - r.extend([b'&', modified_utf7(''.join(_in)), b'-']) - del _in[:] + res = [] + b64_buffer = [] + def consume_b64_buffer(buf): + """ + Consume the buffer by encoding it into a modified base 64 representation + and surround it with shift characters & and - + """ + if b64_buffer: + res.extend([b'&', base64_utf7_encode(buf), b'-']) + del buf[:] for c in s: - if ord(c) in PRINTABLE: - extend_result_if_chars_buffered() - r.append(c.encode('latin-1')) - elif c == '&': - extend_result_if_chars_buffered() - r.append(b'&-') + # printable ascii case should not be modified + if 0x20 <= ord(c) <= 0x7e: + consume_b64_buffer(b64_buffer) + # Special case: & is used as shift character so we need to escape it in ASCII + if c == '&': + res.append(b'&-') + else: + res.append(c.encode('ascii')) + + # Bufferize characters that will be encoded in base64 and append them later + # in the result, when iterating over ASCII character or the end of string else: - _in.append(c) + b64_buffer.append(c) - extend_result_if_chars_buffered() + # Consume the remaining buffer if the string finish with non-ASCII characters + consume_b64_buffer(b64_buffer) - return b''.join(r) + return b''.join(res) AMPERSAND_ORD = byte2int(b'&') @@ -75,35 +64,43 @@ def decode(s): unicode. If non-bytes/str input is provided, the input is returned unchanged. """ - if not isinstance(s, binary_type): return s - r = [] - _in = bytearray() + res = [] + # Store base64 substring that will be decoded once stepping on end shift character + b64_buffer = bytearray() for c in iterbytes(s): - if c == AMPERSAND_ORD and not _in: - _in.append(c) - elif c == DASH_ORD and _in: - if len(_in) == 1: - r.append('&') + # Shift character without anything in buffer -> starts storing base64 substring + if c == AMPERSAND_ORD and not b64_buffer: + b64_buffer.append(c) + # End shift char. -> append the decoded buffer to the result and reset it + elif c == DASH_ORD and b64_buffer: + # Special case &-, representing "&" escaped + if len(b64_buffer) == 1: + res.append('&') else: - r.append(modified_deutf7(_in[1:])) - _in = bytearray() - elif _in: - _in.append(c) + res.append(base64_utf7_decode(b64_buffer[1:])) + b64_buffer = bytearray() + # Still buffering between the shift character and the shift back to ASCII + elif b64_buffer: + b64_buffer.append(c) + # No buffer initialized yet, should be an ASCII printable char else: - r.append(unichr(c)) - if _in: - r.append(modified_deutf7(_in[1:])) - return ''.join(r) + res.append(unichr(c)) + + # Decode the remaining buffer if any + if b64_buffer: + res.append(base64_utf7_decode(b64_buffer[1:])) + + return ''.join(res) -def modified_utf7(s): - s_utf7 = s.encode('utf-7') - return s_utf7[1:-1].replace(b'/', b',') +def base64_utf7_encode(buffer): + s = ''.join(buffer).encode('utf-16be') + return binascii.b2a_base64(s).rstrip(b'\n=').replace(b'/', b',') -def modified_deutf7(s): +def base64_utf7_decode(s): s_utf7 = b'+' + s.replace(b',', b'/') + b'-' return s_utf7.decode('utf-7')
Investigate claims that UTF-7 encoding/decoding doesn't always work Originally reported by: **Menno Smits (Bitbucket: [mjs0](https://bitbucket.org/mjs0))** --- See: https://github.com/MarechJ/py3_imap_utf7 If/when any problems are fixed, ask the author to update the page. Also add comments to SO where this is mentioned. --- - Bitbucket: https://bitbucket.org/mjs0/imapclient/issue/190
mjs/imapclient
diff --git a/tests/imapclient_test.py b/tests/imapclient_test.py index 4f6c406..7bef2dc 100644 --- a/tests/imapclient_test.py +++ b/tests/imapclient_test.py @@ -1,4 +1,4 @@ -from imapclient.testable_imapclient import TestableIMAPClient as IMAPClient +from .testable_imapclient import TestableIMAPClient as IMAPClient from .util import unittest diff --git a/tests/test_imap_utf7.py b/tests/test_imap_utf7.py index 9fe64f1..29e7d0e 100644 --- a/tests/test_imap_utf7.py +++ b/tests/test_imap_utf7.py @@ -22,6 +22,7 @@ class IMAP4UTF7TestCase(unittest.TestCase): ['~peter/mail/\u65e5\u672c\u8a9e/\u53f0\u5317', b'~peter/mail/&ZeVnLIqe-/&U,BTFw-'], # example from RFC 2060 ['\x00foo', b'&AAA-foo'], + ['foo\r\n\nbar\n', b'foo&AA0ACgAK-bar&AAo-'] # see imapclient/#187 issue ] def test_encode(self): diff --git a/tests/test_imapclient.py b/tests/test_imapclient.py index 2ce58bf..4152725 100644 --- a/tests/test_imapclient.py +++ b/tests/test_imapclient.py @@ -15,8 +15,8 @@ import six from imapclient.exceptions import CapabilityError, IMAPClientError from imapclient.imapclient import IMAPlibLoggerAdapter from imapclient.fixed_offset import FixedOffset -from imapclient.testable_imapclient import TestableIMAPClient as IMAPClient +from .testable_imapclient import TestableIMAPClient as IMAPClient from .imapclient_test import IMAPClientTest from .util import patch, sentinel, Mock diff --git a/imapclient/testable_imapclient.py b/tests/testable_imapclient.py similarity index 54% rename from imapclient/testable_imapclient.py rename to tests/testable_imapclient.py index 64d6411..6efe29d 100644 --- a/imapclient/testable_imapclient.py +++ b/tests/testable_imapclient.py @@ -4,28 +4,11 @@ from __future__ import unicode_literals -from .imapclient import IMAPClient - -try: - from unittest.mock import Mock -except ImportError: - try: - from mock import Mock - except ImportError: - raise ImportError( - 'mock library could not be loaded. Please install Python 3.3 or newer ' - 'or install the `mock` third-party package through PyPi.' - ) +from imapclient.imapclient import IMAPClient +from .util import Mock class TestableIMAPClient(IMAPClient): - """Wrapper of :py:class:`imapclient.IMAPClient` that mocks all - interaction with real IMAP server. - - This class should only be used in tests, where you can safely - interact with imapclient without running commands on a real - IMAP account. - """ def __init__(self): super(TestableIMAPClient, self).__init__('somehost')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 0 }, "num_modified_files": 3 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "mock>=1.3.0", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 -e git+https://github.com/mjs/imapclient.git@42e118739e0ccca49c372bab35574acc9ec7e502#egg=IMAPClient importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mock==5.2.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: imapclient channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - mock==5.2.0 - six==1.17.0 prefix: /opt/conda/envs/imapclient
[ "tests/test_imap_utf7.py::IMAP4UTF7TestCase::test_encode" ]
[ "tests/test_imapclient.py::TestDebugLogging::test_IMAP_is_patched", "tests/test_imapclient.py::TestDebugLogging::test_redacted_password" ]
[ "tests/test_imap_utf7.py::IMAP4UTF7TestCase::test_decode", "tests/test_imap_utf7.py::IMAP4UTF7TestCase::test_printable_singletons", "tests/test_imapclient.py::TestListFolders::test_blanks", "tests/test_imapclient.py::TestListFolders::test_empty_response", "tests/test_imapclient.py::TestListFolders::test_folder_encode_off", "tests/test_imapclient.py::TestListFolders::test_funky_characters", "tests/test_imapclient.py::TestListFolders::test_list_folders", "tests/test_imapclient.py::TestListFolders::test_list_folders_NO", "tests/test_imapclient.py::TestListFolders::test_list_sub_folders", "tests/test_imapclient.py::TestListFolders::test_list_sub_folders_NO", "tests/test_imapclient.py::TestListFolders::test_mixed", "tests/test_imapclient.py::TestListFolders::test_quoted_specials", "tests/test_imapclient.py::TestListFolders::test_simple", "tests/test_imapclient.py::TestListFolders::test_unquoted_numeric_folder_name", "tests/test_imapclient.py::TestListFolders::test_unquoted_numeric_folder_name_parsed_as_long", "tests/test_imapclient.py::TestListFolders::test_utf7_decoding", "tests/test_imapclient.py::TestListFolders::test_without_quotes", "tests/test_imapclient.py::TestSelectFolder::test_normal", "tests/test_imapclient.py::TestSelectFolder::test_unselect", "tests/test_imapclient.py::TestAppend::test_with_msg_time", "tests/test_imapclient.py::TestAppend::test_without_msg_time", "tests/test_imapclient.py::TestAclMethods::test_getacl", "tests/test_imapclient.py::TestAclMethods::test_setacl", "tests/test_imapclient.py::TestIdleAndNoop::test_consume_until_tagged_response", "tests/test_imapclient.py::TestIdleAndNoop::test_idle", "tests/test_imapclient.py::TestIdleAndNoop::test_idle_check_blocking", "tests/test_imapclient.py::TestIdleAndNoop::test_idle_check_timeout", "tests/test_imapclient.py::TestIdleAndNoop::test_idle_check_with_data", "tests/test_imapclient.py::TestIdleAndNoop::test_idle_done", "tests/test_imapclient.py::TestIdleAndNoop::test_noop", "tests/test_imapclient.py::TestTimeNormalisation::test_default", "tests/test_imapclient.py::TestTimeNormalisation::test_pass_through", "tests/test_imapclient.py::TestNamespace::test_complex", "tests/test_imapclient.py::TestNamespace::test_folder_decoding", "tests/test_imapclient.py::TestNamespace::test_other_only", "tests/test_imapclient.py::TestNamespace::test_simple", "tests/test_imapclient.py::TestNamespace::test_without_folder_decoding", "tests/test_imapclient.py::TestCapabilities::test_caching", "tests/test_imapclient.py::TestCapabilities::test_has_capability", "tests/test_imapclient.py::TestCapabilities::test_post_auth_request", "tests/test_imapclient.py::TestCapabilities::test_preauth", "tests/test_imapclient.py::TestCapabilities::test_server_returned_capability_after_auth", "tests/test_imapclient.py::TestCapabilities::test_with_starttls", "tests/test_imapclient.py::TestId::test_id", "tests/test_imapclient.py::TestId::test_invalid_parameters", "tests/test_imapclient.py::TestId::test_no_support", "tests/test_imapclient.py::TestRawCommand::test_complex", "tests/test_imapclient.py::TestRawCommand::test_embedded_literal", "tests/test_imapclient.py::TestRawCommand::test_failed_continuation_wait", "tests/test_imapclient.py::TestRawCommand::test_invalid_input_type", "tests/test_imapclient.py::TestRawCommand::test_literal_at_end", "tests/test_imapclient.py::TestRawCommand::test_multiple_literals", "tests/test_imapclient.py::TestRawCommand::test_not_uid", "tests/test_imapclient.py::TestRawCommand::test_plain", "tests/test_imapclient.py::TestExpunge::test_expunge", "tests/test_imapclient.py::TestExpunge::test_id_expunge", "tests/test_imapclient.py::TestShutdown::test_shutdown", "tests/test_imapclient.py::TestContextManager::test_context_manager", "tests/test_imapclient.py::TestContextManager::test_context_manager_fail_closing", "tests/test_imapclient.py::TestContextManager::test_exception_inside_context_manager" ]
[]
BSD License
1,744
[ "doc/src/releases.rst", "doc/src/api.rst", "imapclient/imap_utf7.py" ]
[ "doc/src/releases.rst", "doc/src/api.rst", "imapclient/imap_utf7.py" ]
oasis-open__cti-python-stix2-74
3c80e5e7ebb641b5feb55337f9470ec9d9d58572
2017-10-09 21:36:36
ef6dade6f6773edd14aa16a2e4566e50bf74cbb4
diff --git a/stix2/core.py b/stix2/core.py index 3eaabb0..8ee11f5 100644 --- a/stix2/core.py +++ b/stix2/core.py @@ -15,6 +15,10 @@ from .utils import get_dict class STIXObjectProperty(Property): + def __init__(self, allow_custom=False): + self.allow_custom = allow_custom + super(STIXObjectProperty, self).__init__() + def clean(self, value): try: dictified = get_dict(value) @@ -25,7 +29,10 @@ class STIXObjectProperty(Property): if 'type' in dictified and dictified['type'] == 'bundle': raise ValueError('This property may not contain a Bundle object') - parsed_obj = parse(dictified) + if self.allow_custom: + parsed_obj = parse(dictified, allow_custom=True) + else: + parsed_obj = parse(dictified) return parsed_obj @@ -48,6 +55,10 @@ class Bundle(_STIXBase): else: kwargs['objects'] = list(args) + kwargs.get('objects', []) + allow_custom = kwargs.get('allow_custom', False) + if allow_custom: + self._properties['objects'] = ListProperty(STIXObjectProperty(True)) + super(Bundle, self).__init__(**kwargs) diff --git a/stix2/properties.py b/stix2/properties.py index afe994f..ca7f04c 100644 --- a/stix2/properties.py +++ b/stix2/properties.py @@ -124,7 +124,11 @@ class ListProperty(Property): obj_type = self.contained.type elif type(self.contained).__name__ is 'STIXObjectProperty': # ^ this way of checking doesn't require a circular import - obj_type = type(valid) + # valid is already an instance of a python-stix2 class; no need + # to turn it into a dictionary and then pass it to the class + # constructor again + result.append(valid) + continue else: obj_type = self.contained
Unable to creat 'Bundle' when using custom fields with SDO Hi, When attempting to generate a bundle, a failure message is created when passing an SDO with custom objects even with `allow_custom=True` set on the SDO object. example: `v = factory.create( Vulnerability, name="Test Vulnerability", custom_field = "This is custom", allow_custom=True )` `print Bundle(v)` Will result in the following output: `File "stix.py", line 142, in <module> print Bundle(v) File "/usr/local/lib/python2.7/dist-packages/stix2/core.py", line 51, in __init__ super(Bundle, self).__init__(**kwargs) File "/usr/local/lib/python2.7/dist-packages/stix2/base.py", line 121, in __init__ self._check_property(prop_name, prop_metadata, setting_kwargs) File "/usr/local/lib/python2.7/dist-packages/stix2/base.py", line 55, in _check_property kwargs[prop_name] = prop.clean(kwargs[prop_name]) File "/usr/local/lib/python2.7/dist-packages/stix2/properties.py", line 115, in clean valid = self.contained.clean(item) File "/usr/local/lib/python2.7/dist-packages/stix2/core.py", line 28, in clean parsed_obj = parse(dictified) File "/usr/local/lib/python2.7/dist-packages/stix2/core.py", line 94, in parse return obj_class(allow_custom=allow_custom, **obj) File "/usr/local/lib/python2.7/dist-packages/stix2/base.py", line 104, in __init__ raise ExtraPropertiesError(cls, extra_kwargs) stix2.exceptions.ExtraPropertiesError: Unexpected properties for Vulnerability: (custom_field).`
oasis-open/cti-python-stix2
diff --git a/stix2/test/test_custom.py b/stix2/test/test_custom.py index ff432c1..48529b9 100644 --- a/stix2/test/test_custom.py +++ b/stix2/test/test_custom.py @@ -81,6 +81,18 @@ def test_parse_identity_custom_property(data): assert identity.foo == "bar" +def test_custom_property_in_bundled_object(): + identity = stix2.Identity( + name="John Smith", + identity_class="individual", + x_foo="bar", + allow_custom=True, + ) + bundle = stix2.Bundle(identity, allow_custom=True) + + assert bundle.objects[0].x_foo == "bar" + + @stix2.sdo.CustomObject('x-new-type', [ ('property1', stix2.properties.StringProperty(required=True)), ('property2', stix2.properties.IntegerProperty()),
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 2 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "pip install tox-travis pre-commit" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 antlr4-python3-runtime==4.9.3 appdirs==1.4.4 async-generator==1.10 attrs==21.4.0 Babel==2.11.0 bleach==4.1.0 bump2version==1.0.1 bumpversion==0.6.0 certifi==2021.5.30 cfgv==3.3.1 charset-normalizer==2.0.12 colorama==0.4.5 coverage==6.2 cpe==1.3.1 decorator==5.1.1 defusedxml==0.7.1 distlib==0.3.9 docutils==0.18.1 entrypoints==0.4 filelock==3.4.1 identify==2.4.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.2.3 iniconfig==1.1.1 ipython-genutils==0.2.0 itsdangerous==2.0.1 Jinja2==3.0.3 jsonpointer==2.3 jsonschema==3.2.0 jupyter-client==7.1.2 jupyter-core==4.9.2 jupyterlab-pygments==0.1.2 MarkupSafe==2.0.1 mistune==0.8.4 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nbsphinx==0.8.8 nest-asyncio==1.6.0 nodeenv==1.6.0 packaging==21.3 pandocfilters==1.5.1 platformdirs==2.4.0 pluggy==1.0.0 pre-commit==2.17.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 pyzmq==25.1.2 requests==2.27.1 requests-cache==0.7.5 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 simplejson==3.20.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-prompt==1.5.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 -e git+https://github.com/oasis-open/cti-python-stix2.git@3c80e5e7ebb641b5feb55337f9470ec9d9d58572#egg=stix2 stix2-patterns==2.0.0 stix2-validator==3.0.2 taxii2-client==2.3.0 testpath==0.6.0 toml==0.10.2 tomli==1.2.3 tornado==6.1 tox==3.28.0 tox-travis==0.13 traitlets==4.3.3 typing_extensions==4.1.1 url-normalize==1.4.3 urllib3==1.26.20 virtualenv==20.16.2 webcolors==1.11.1 webencodings==0.5.1 zipp==3.6.0
name: cti-python-stix2 channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - antlr4-python3-runtime==4.9.3 - appdirs==1.4.4 - async-generator==1.10 - attrs==21.4.0 - babel==2.11.0 - bleach==4.1.0 - bump2version==1.0.1 - bumpversion==0.6.0 - cfgv==3.3.1 - charset-normalizer==2.0.12 - colorama==0.4.5 - coverage==6.2 - cpe==1.3.1 - decorator==5.1.1 - defusedxml==0.7.1 - distlib==0.3.9 - docutils==0.18.1 - entrypoints==0.4 - filelock==3.4.1 - identify==2.4.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.2.3 - iniconfig==1.1.1 - ipython-genutils==0.2.0 - itsdangerous==2.0.1 - jinja2==3.0.3 - jsonpointer==2.3 - jsonschema==3.2.0 - jupyter-client==7.1.2 - jupyter-core==4.9.2 - jupyterlab-pygments==0.1.2 - markupsafe==2.0.1 - mistune==0.8.4 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nbsphinx==0.8.8 - nest-asyncio==1.6.0 - nodeenv==1.6.0 - packaging==21.3 - pandocfilters==1.5.1 - platformdirs==2.4.0 - pluggy==1.0.0 - pre-commit==2.17.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - pyzmq==25.1.2 - requests==2.27.1 - requests-cache==0.7.5 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 - simplejson==3.20.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-prompt==1.5.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - stix2-patterns==2.0.0 - stix2-validator==3.0.2 - taxii2-client==2.3.0 - testpath==0.6.0 - toml==0.10.2 - tomli==1.2.3 - tornado==6.1 - tox==3.28.0 - tox-travis==0.13 - traitlets==4.3.3 - typing-extensions==4.1.1 - url-normalize==1.4.3 - urllib3==1.26.20 - virtualenv==20.16.2 - webcolors==1.11.1 - webencodings==0.5.1 - zipp==3.6.0 prefix: /opt/conda/envs/cti-python-stix2
[ "stix2/test/test_custom.py::test_custom_property_in_bundled_object" ]
[]
[ "stix2/test/test_custom.py::test_identity_custom_property", "stix2/test/test_custom.py::test_identity_custom_property_invalid", "stix2/test/test_custom.py::test_identity_custom_property_allowed", "stix2/test/test_custom.py::test_parse_identity_custom_property[{\\n", "stix2/test/test_custom.py::test_custom_object_type", "stix2/test/test_custom.py::test_custom_object_no_init", "stix2/test/test_custom.py::test_parse_custom_object_type", "stix2/test/test_custom.py::test_parse_unregistered_custom_object_type", "stix2/test/test_custom.py::test_custom_observable_object", "stix2/test/test_custom.py::test_custom_observable_object_no_init", "stix2/test/test_custom.py::test_custom_observable_object_invalid_ref_property", "stix2/test/test_custom.py::test_custom_observable_object_invalid_refs_property", "stix2/test/test_custom.py::test_custom_observable_object_invalid_refs_list_property", "stix2/test/test_custom.py::test_custom_observable_object_invalid_valid_refs", "stix2/test/test_custom.py::test_custom_no_properties_raises_exception", "stix2/test/test_custom.py::test_custom_wrong_properties_arg_raises_exception", "stix2/test/test_custom.py::test_parse_custom_observable_object", "stix2/test/test_custom.py::test_parse_unregistered_custom_observable_object", "stix2/test/test_custom.py::test_parse_invalid_custom_observable_object", "stix2/test/test_custom.py::test_observable_custom_property", "stix2/test/test_custom.py::test_observable_custom_property_invalid", "stix2/test/test_custom.py::test_observable_custom_property_allowed", "stix2/test/test_custom.py::test_observed_data_with_custom_observable_object", "stix2/test/test_custom.py::test_custom_extension", "stix2/test/test_custom.py::test_custom_extension_wrong_observable_type", "stix2/test/test_custom.py::test_custom_extension_invalid_observable", "stix2/test/test_custom.py::test_custom_extension_no_properties", "stix2/test/test_custom.py::test_custom_extension_empty_properties", "stix2/test/test_custom.py::test_custom_extension_no_init", "stix2/test/test_custom.py::test_parse_observable_with_custom_extension", "stix2/test/test_custom.py::test_parse_observable_with_unregistered_custom_extension" ]
[]
BSD 3-Clause "New" or "Revised" License
1,745
[ "stix2/core.py", "stix2/properties.py" ]
[ "stix2/core.py", "stix2/properties.py" ]
peter-wangxu__persist-queue-25
10b8fa0e8bf5da6d44dbeb85f94a2b0779685d41
2017-10-10 02:39:16
7a2c4d3768dfd6528cc8c1599ef773ebf310697b
diff --git a/persistqueue/__init__.py b/persistqueue/__init__.py index c066e10..ef321ef 100644 --- a/persistqueue/__init__.py +++ b/persistqueue/__init__.py @@ -1,7 +1,7 @@ # coding=utf-8 __author__ = 'Peter Wang' __license__ = 'BSD License' -__version__ = '0.3.0' +__version__ = '0.3.1' from .exceptions import Empty, Full # noqa from .pdict import PDict # noqa diff --git a/persistqueue/sqlbase.py b/persistqueue/sqlbase.py index 22bdfe6..48955f7 100644 --- a/persistqueue/sqlbase.py +++ b/persistqueue/sqlbase.py @@ -27,15 +27,17 @@ def commit_ignore_error(conn): """Ignore the error of no transaction is active. The transaction may be already committed by user's task_done call. - It's safe to to ignore all errors of this kind. + It's safe to ignore all errors of this kind. """ try: conn.commit() except sqlite3.OperationalError as ex: if 'no transaction is active' in str(ex): - log.warning( + log.debug( 'Not able to commit the transaction, ' 'may already be committed.') + else: + raise class SQLiteBase(object): @@ -50,17 +52,23 @@ class SQLiteBase(object): _MEMORY = ':memory:' # flag indicating store DB in memory def __init__(self, path, name='default', multithreading=False, - timeout=10.0, auto_commit=False): + timeout=10.0, auto_commit=True): """Initiate a queue in sqlite3 or memory. :param path: path for storing DB file. + :param name: the suffix for the table name, + table name would be ${_TABLE_NAME}_${name} :param multithreading: if set to True, two db connections will be, one for **put** and one for **get**. :param timeout: timeout in second waiting for the database lock. :param auto_commit: Set to True, if commit is required on every - INSERT/UPDATE action. + INSERT/UPDATE action, otherwise False, whereas + a **task_done** is required to persist changes + after **put**. + """ + self.memory_sql = False self.path = path self.name = name self.timeout = timeout @@ -71,19 +79,27 @@ class SQLiteBase(object): def _init(self): """Initialize the tables in DB.""" - if not os.path.exists(self.path): + if self.path == self._MEMORY: + self.memory_sql = True + log.debug("Initializing Sqlite3 Queue in memory.") + elif not os.path.exists(self.path): os.makedirs(self.path) - log.debug('Initializing Sqlite3 Queue with path {}'.format(self.path)) + log.debug( + 'Initializing Sqlite3 Queue with path {}'.format(self.path)) self._conn = self._new_db_connection( self.path, self.multithreading, self.timeout) self._getter = self._conn self._putter = self._conn - if self.multithreading: - self._putter = self._new_db_connection( - self.path, self.multithreading, self.timeout) + self._conn.execute(self._sql_create) self._conn.commit() + # Setup another session only for disk-based queue. + if self.multithreading: + if not self.memory_sql: + self._putter = self._new_db_connection( + self.path, self.multithreading, self.timeout) + # SQLite3 transaction lock self.tran_lock = threading.Lock() self.put_event = threading.Event()
FIFOSQLiteQueue: the get() method returns None instead of blocking and if I specify get(block=True) it raises the empty exception
peter-wangxu/persist-queue
diff --git a/tests/test_sqlqueue.py b/tests/test_sqlqueue.py index 61a31db..1e63431 100644 --- a/tests/test_sqlqueue.py +++ b/tests/test_sqlqueue.py @@ -150,23 +150,32 @@ class SQLite3QueueTest(unittest.TestCase): queue.put('var%d' % x) task_done_if_required(queue) - def consumer(): - for _ in range(100): + counter = [] + # Set all to 0 + for _ in range(1000): + counter.append(0) + + def consumer(index): + for i in range(200): data = queue.get(block=True) self.assertTrue('var' in data) + counter[index * 200 + i] = data p = Thread(target=producer) p.start() consumers = [] - for _ in range(10): - t = Thread(target=consumer) + for index in range(5): + t = Thread(target=consumer, args=(index,)) t.start() consumers.append(t) + p.join() for t in consumers: t.join() self.assertEqual(0, queue.qsize()) + for x in range(1000): + self.assertNotEqual(0, counter[x], "0 for counter's index %s" % x) class SQLite3QueueAutoCommitTest(SQLite3QueueTest): @@ -175,6 +184,24 @@ class SQLite3QueueAutoCommitTest(SQLite3QueueTest): self.auto_commit = True +class SQLite3QueueInMemory(SQLite3QueueTest): + def setUp(self): + self.path = ":memory:" + self.auto_commit = False + + def test_open_close_1000(self): + self.skipTest('Memory based sqlite is not persistent.') + + def test_open_close_single(self): + self.skipTest('Memory based sqlite is not persistent.') + + def test_multiple_consumers(self): + # TODO(peter) when the shared-cache feature is available in default + # Python of most Linux distros, this should be easy:). + self.skipTest('In-memory based sqlite needs the support ' + 'of shared-cache') + + class FILOSQLite3QueueTest(unittest.TestCase): def setUp(self): self.path = tempfile.mkdtemp(suffix='filo_sqlqueue')
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 2 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt", "test-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cov-core==1.15.0 coverage==7.8.0 distlib==0.3.9 dnspython==2.7.0 eventlet==0.39.1 exceptiongroup==1.2.2 filelock==3.18.0 flake8==7.2.0 greenlet==3.1.1 iniconfig==2.1.0 mccabe==0.7.0 mock==5.2.0 nose2==0.15.1 packaging==24.2 -e git+https://github.com/peter-wangxu/persist-queue.git@10b8fa0e8bf5da6d44dbeb85f94a2b0779685d41#egg=persist_queue platformdirs==4.3.7 pluggy==1.5.0 pycodestyle==2.13.0 pyflakes==3.3.1 pytest==8.3.5 tomli==2.2.1 virtualenv==20.29.3
name: persist-queue channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cov-core==1.15.0 - coverage==7.8.0 - distlib==0.3.9 - dnspython==2.7.0 - eventlet==0.39.1 - exceptiongroup==1.2.2 - filelock==3.18.0 - flake8==7.2.0 - greenlet==3.1.1 - iniconfig==2.1.0 - mccabe==0.7.0 - mock==5.2.0 - nose2==0.15.1 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pycodestyle==2.13.0 - pyflakes==3.3.1 - pytest==8.3.5 - tomli==2.2.1 - virtualenv==20.29.3 prefix: /opt/conda/envs/persist-queue
[ "tests/test_sqlqueue.py::SQLite3QueueInMemory::test_multi_threaded_parallel" ]
[]
[ "tests/test_sqlqueue.py::SQLite3QueueTest::test_multi_threaded_multi_producer", "tests/test_sqlqueue.py::SQLite3QueueTest::test_multi_threaded_parallel", "tests/test_sqlqueue.py::SQLite3QueueTest::test_open_close_1000", "tests/test_sqlqueue.py::SQLite3QueueTest::test_open_close_single", "tests/test_sqlqueue.py::SQLite3QueueTest::test_raise_empty", "tests/test_sqlqueue.py::SQLite3QueueTest::test_random_read_write", "tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_multi_threaded_multi_producer", "tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_multi_threaded_parallel", "tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_multiple_consumers", "tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_open_close_1000", "tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_open_close_single", "tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_raise_empty", "tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_random_read_write", "tests/test_sqlqueue.py::SQLite3QueueInMemory::test_multi_threaded_multi_producer", "tests/test_sqlqueue.py::SQLite3QueueInMemory::test_raise_empty", "tests/test_sqlqueue.py::SQLite3QueueInMemory::test_random_read_write", "tests/test_sqlqueue.py::FILOSQLite3QueueTest::test_open_close_1000", "tests/test_sqlqueue.py::FILOSQLite3QueueAutoCommitTest::test_open_close_1000" ]
[ "tests/test_sqlqueue.py::SQLite3QueueTest::test_multiple_consumers" ]
BSD 3-Clause "New" or "Revised" License
1,746
[ "persistqueue/sqlbase.py", "persistqueue/__init__.py" ]
[ "persistqueue/sqlbase.py", "persistqueue/__init__.py" ]
OpenMined__PySyft-314
543a66153c17d6e2885d6b7d571749fe02629ba5
2017-10-10 10:13:12
06ce023225dd613d8fb14ab2046135b93ab22376
codecov[bot]: # [Codecov](https://codecov.io/gh/OpenMined/PySyft/pull/314?src=pr&el=h1) Report > Merging [#314](https://codecov.io/gh/OpenMined/PySyft/pull/314?src=pr&el=desc) into [master](https://codecov.io/gh/OpenMined/PySyft/commit/543a66153c17d6e2885d6b7d571749fe02629ba5?src=pr&el=desc) will **increase** coverage by `0.04%`. > The diff coverage is `100%`. [![Impacted file tree graph](https://codecov.io/gh/OpenMined/PySyft/pull/314/graphs/tree.svg?token=W0kQS1vaXB&width=650&height=150&src=pr)](https://codecov.io/gh/OpenMined/PySyft/pull/314?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #314 +/- ## ========================================== + Coverage 82.26% 82.31% +0.04% ========================================== Files 24 24 Lines 3243 3252 +9 ========================================== + Hits 2668 2677 +9 Misses 575 575 ``` | [Impacted Files](https://codecov.io/gh/OpenMined/PySyft/pull/314?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [tests/test\_tensor.py](https://codecov.io/gh/OpenMined/PySyft/pull/314?src=pr&el=tree#diff-dGVzdHMvdGVzdF90ZW5zb3IucHk=) | `94.21% <100%> (+0.04%)` | :arrow_up: | | [syft/tensor.py](https://codecov.io/gh/OpenMined/PySyft/pull/314?src=pr&el=tree#diff-c3lmdC90ZW5zb3IucHk=) | `77.82% <100%> (+0.02%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/OpenMined/PySyft/pull/314?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/OpenMined/PySyft/pull/314?src=pr&el=footer). Last update [543a661...65e98a9](https://codecov.io/gh/OpenMined/PySyft/pull/314?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/syft/tensor.py b/syft/tensor.py index 081679d946..d374bb5ae0 100644 --- a/syft/tensor.py +++ b/syft/tensor.py @@ -449,7 +449,7 @@ class TensorBase(object): return NotImplemented else: out = self.data[position] - if (len(self.shape()) == 1): + if (len(self.shape()) == 1) and (type(position) != slice): return out else: return TensorBase(self.data[position], self.encrypted)
1dim TensorBase slice returns ndarray instead of TensorBase Currently a slice of TensorBase returns an ndarray if `dim() == 1`. This is not the behavior expected in PyTorch Example pytorch session: ```python t1 = Tensor([1,2,3,4]) t1[1:] 2 3 4 [torch.FloatTensor of size 3] ``` Same with TensorBase ```python t2 = TensorBase([1,2,3,4]) t2[1:] array([2, 3, 4]) ``` see https://github.com/OpenMined/PySyft/commit/7fb592dcd4ede5b0d0cc6bdc50b98a0445ad77c0#diff-796d7f13fb1460eef9cffdccebd38faeR148 Is this a bug or expected ?
OpenMined/PySyft
diff --git a/tests/test_tensor.py b/tests/test_tensor.py index 5c46a87951..7ec07030c8 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -957,6 +957,17 @@ class IndexTests(unittest.TestCase): with pytest.raises(ValueError): t.index(3.5) + def test_index_slice_notation(self): + t1 = TensorBase(np.array([1, 2, 3, 4])) + expected1 = TensorBase(np.array([2, 3, 4])) + expected2 = type(t1[1:]) + expected3 = 1 + + # Do not use "t.index" form in following test + self.assertEqual(expected1, t1[1:]) + self.assertEqual(expected2, TensorBase) + self.assertEqual(expected3, t1[0]) + def test_index_add_(self): t1 = TensorBase(np.array([[0, 0, 0], [1, 1, 1], [1, 1, 1]])) t2 = TensorBase(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
PySyft/hydrogen
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-flake8", "line_profiler", "clint", "coverage" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt", "dev-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==3.6.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 args==0.1.0 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 backcall==0.2.0 bleach==4.1.0 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 clint==0.5.1 comm==0.1.4 contextvars==2.4 coverage==6.2 dataclasses==0.8 decorator==5.1.1 defusedxml==0.7.1 entrypoints==0.4 flake8==5.0.4 idna==3.10 immutables==0.19 importlib-metadata==4.2.0 iniconfig==1.1.1 ipykernel==5.5.6 ipython==7.16.3 ipython-genutils==0.2.0 ipywidgets==7.8.5 jedi==0.17.2 Jinja2==3.0.3 joblib==1.1.1 json5==0.9.16 jsonschema==3.2.0 jupyter==1.1.1 jupyter-client==7.1.2 jupyter-console==6.4.3 jupyter-core==4.9.2 jupyter-server==1.13.1 jupyterlab==3.2.9 jupyterlab-pygments==0.1.2 jupyterlab-server==2.10.3 jupyterlab_widgets==1.1.11 line-profiler==4.1.3 MarkupSafe==2.0.1 mccabe==0.7.0 mistune==0.8.4 nbclassic==0.3.5 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nest-asyncio==1.6.0 notebook==6.4.10 numpy==1.19.5 packaging==21.3 pandocfilters==1.5.1 parso==0.7.1 pexpect==4.9.0 phe==1.5.0 pickleshare==0.7.5 pluggy==1.0.0 prometheus-client==0.17.1 prompt-toolkit==3.0.36 ptyprocess==0.7.0 py==1.11.0 pycodestyle==2.9.1 pycparser==2.21 pyflakes==2.5.0 Pygments==2.14.0 pyparsing==3.1.4 pyRserve==1.0.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-flake8==1.1.1 python-dateutil==2.9.0.post0 pytz==2025.2 pyzmq==25.1.2 requests==2.27.1 scikit-learn==0.24.2 scipy==1.5.4 Send2Trash==1.8.3 six==1.17.0 sklearn==0.0 sniffio==1.2.0 -e git+https://github.com/OpenMined/PySyft.git@543a66153c17d6e2885d6b7d571749fe02629ba5#egg=syft terminado==0.12.1 testpath==0.6.0 threadpoolctl==3.1.0 tomli==1.2.3 tornado==6.1 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 wcwidth==0.2.13 webencodings==0.5.1 websocket-client==1.3.1 widgetsnbextension==3.6.10 zipp==3.6.0
name: PySyft channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==3.6.2 - argon2-cffi==21.3.0 - argon2-cffi-bindings==21.2.0 - args==0.1.0 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - backcall==0.2.0 - bleach==4.1.0 - cffi==1.15.1 - charset-normalizer==2.0.12 - clint==0.5.1 - comm==0.1.4 - contextvars==2.4 - coverage==6.2 - dataclasses==0.8 - decorator==5.1.1 - defusedxml==0.7.1 - entrypoints==0.4 - flake8==5.0.4 - idna==3.10 - immutables==0.19 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - ipykernel==5.5.6 - ipython==7.16.3 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - jedi==0.17.2 - jinja2==3.0.3 - joblib==1.1.1 - json5==0.9.16 - jsonschema==3.2.0 - jupyter==1.1.1 - jupyter-client==7.1.2 - jupyter-console==6.4.3 - jupyter-core==4.9.2 - jupyter-server==1.13.1 - jupyterlab==3.2.9 - jupyterlab-pygments==0.1.2 - jupyterlab-server==2.10.3 - jupyterlab-widgets==1.1.11 - line-profiler==4.1.3 - markupsafe==2.0.1 - mccabe==0.7.0 - mistune==0.8.4 - nbclassic==0.3.5 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nest-asyncio==1.6.0 - notebook==6.4.10 - numpy==1.19.5 - packaging==21.3 - pandocfilters==1.5.1 - parso==0.7.1 - pexpect==4.9.0 - phe==1.5.0 - pickleshare==0.7.5 - pluggy==1.0.0 - prometheus-client==0.17.1 - prompt-toolkit==3.0.36 - ptyprocess==0.7.0 - py==1.11.0 - pycodestyle==2.9.1 - pycparser==2.21 - pyflakes==2.5.0 - pygments==2.14.0 - pyparsing==3.1.4 - pyrserve==1.0.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-flake8==1.1.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyzmq==25.1.2 - requests==2.27.1 - scikit-learn==0.24.2 - scipy==1.5.4 - send2trash==1.8.3 - six==1.17.0 - sklearn==0.0 - sniffio==1.2.0 - terminado==0.12.1 - testpath==0.6.0 - threadpoolctl==3.1.0 - tomli==1.2.3 - tornado==6.1 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - wcwidth==0.2.13 - webencodings==0.5.1 - websocket-client==1.3.1 - widgetsnbextension==3.6.10 - zipp==3.6.0 prefix: /opt/conda/envs/PySyft
[ "tests/test_tensor.py::IndexTests::test_index_slice_notation" ]
[]
[ "tests/test_tensor.py::DimTests::test_as_view", "tests/test_tensor.py::DimTests::test_dim_one", "tests/test_tensor.py::DimTests::test_resize", "tests/test_tensor.py::DimTests::test_resize_as", "tests/test_tensor.py::DimTests::test_view", "tests/test_tensor.py::DiagTests::test_one_dim_tensor_below_diag", "tests/test_tensor.py::DiagTests::test_one_dim_tensor_main_diag", "tests/test_tensor.py::DiagTests::test_one_dim_tensor_upper_diag", "tests/test_tensor.py::DiagTests::test_two_dim_tensor_below_diag", "tests/test_tensor.py::DiagTests::test_two_dim_tensor_main_diag", "tests/test_tensor.py::DiagTests::test_two_dim_tensor_upper_diag", "tests/test_tensor.py::AddTests::test_inplace", "tests/test_tensor.py::AddTests::test_scalar", "tests/test_tensor.py::AddTests::test_simple", "tests/test_tensor.py::CeilTests::test_ceil", "tests/test_tensor.py::CeilTests::test_ceil_", "tests/test_tensor.py::ZeroTests::test_zero", "tests/test_tensor.py::FloorTests::test_floor_", "tests/test_tensor.py::SubTests::test_inplace", "tests/test_tensor.py::SubTests::test_scalar", "tests/test_tensor.py::SubTests::test_simple", "tests/test_tensor.py::MaxTests::test_axis", "tests/test_tensor.py::MaxTests::test_no_dim", "tests/test_tensor.py::MultTests::test_inplace", "tests/test_tensor.py::MultTests::test_scalar", "tests/test_tensor.py::MultTests::test_simple", "tests/test_tensor.py::DivTests::test_inplace", "tests/test_tensor.py::DivTests::test_scalar", "tests/test_tensor.py::DivTests::test_simple", "tests/test_tensor.py::AbsTests::test_abs", "tests/test_tensor.py::AbsTests::test_abs_", "tests/test_tensor.py::ShapeTests::test_shape", "tests/test_tensor.py::SqrtTests::test_sqrt", "tests/test_tensor.py::SqrtTests::test_sqrt_", "tests/test_tensor.py::SumTests::test_dim_is_not_none_int", "tests/test_tensor.py::SumTests::test_dim_none_int", "tests/test_tensor.py::EqualTests::test_equal", "tests/test_tensor.py::EqualTests::test_equal_operation", "tests/test_tensor.py::EqualTests::test_inequality_operation", "tests/test_tensor.py::EqualTests::test_not_equal", "tests/test_tensor.py::sigmoidTests::test_sigmoid", "tests/test_tensor.py::addmm::test_addmm_1d", "tests/test_tensor.py::addmm::test_addmm_2d", "tests/test_tensor.py::addmm::test_addmm__1d", "tests/test_tensor.py::addmm::test_addmm__2d", "tests/test_tensor.py::addcmulTests::test_addcmul_1d", "tests/test_tensor.py::addcmulTests::test_addcmul_2d", "tests/test_tensor.py::addcmulTests::test_addcmul__1d", "tests/test_tensor.py::addcmulTests::test_addcmul__2d", "tests/test_tensor.py::addcdivTests::test_addcdiv_1d", "tests/test_tensor.py::addcdivTests::test_addcdiv_2d", "tests/test_tensor.py::addcdivTests::test_addcdiv__1d", "tests/test_tensor.py::addcdivTests::test_addcdiv__2d", "tests/test_tensor.py::addmvTests::test_addmv", "tests/test_tensor.py::addmvTests::test_addmv_", "tests/test_tensor.py::bmmTests::test_bmm", "tests/test_tensor.py::bmmTests::test_bmm_size", "tests/test_tensor.py::addbmmTests::test_addbmm", "tests/test_tensor.py::addbmmTests::test_addbmm_", "tests/test_tensor.py::baddbmmTests::test_baddbmm", "tests/test_tensor.py::baddbmmTests::test_baddbmm_", "tests/test_tensor.py::transposeTests::test_t", "tests/test_tensor.py::transposeTests::test_transpose", "tests/test_tensor.py::transposeTests::test_transpose_", "tests/test_tensor.py::unsqueezeTests::test_unsqueeze", "tests/test_tensor.py::unsqueezeTests::test_unsqueeze_", "tests/test_tensor.py::expTests::test_exp", "tests/test_tensor.py::expTests::test_exp_", "tests/test_tensor.py::fracTests::test_frac", "tests/test_tensor.py::fracTests::test_frac_", "tests/test_tensor.py::rsqrtTests::test_rsqrt", "tests/test_tensor.py::rsqrtTests::test_rsqrt_", "tests/test_tensor.py::signTests::test_sign", "tests/test_tensor.py::signTests::test_sign_", "tests/test_tensor.py::numpyTests::test_numpy", "tests/test_tensor.py::reciprocalTests::test_reciprocal", "tests/test_tensor.py::reciprocalTests::test_reciprocal_", "tests/test_tensor.py::logTests::test_log", "tests/test_tensor.py::logTests::test_log_", "tests/test_tensor.py::logTests::test_log_1p", "tests/test_tensor.py::logTests::test_log_1p_", "tests/test_tensor.py::clampTests::test_clamp_float", "tests/test_tensor.py::clampTests::test_clamp_float_in_place", "tests/test_tensor.py::clampTests::test_clamp_int", "tests/test_tensor.py::clampTests::test_clamp_int_in_place", "tests/test_tensor.py::cloneTests::test_clone", "tests/test_tensor.py::chunkTests::test_chunk", "tests/test_tensor.py::chunkTests::test_chunk_same_size", "tests/test_tensor.py::gtTests::test_gt__in_place_with_number", "tests/test_tensor.py::gtTests::test_gt__in_place_with_tensor", "tests/test_tensor.py::gtTests::test_gt_with_encrypted", "tests/test_tensor.py::gtTests::test_gt_with_number", "tests/test_tensor.py::gtTests::test_gt_with_tensor", "tests/test_tensor.py::geTests::test_ge__in_place_with_number", "tests/test_tensor.py::geTests::test_ge__in_place_with_tensor", "tests/test_tensor.py::geTests::test_ge_with_encrypted", "tests/test_tensor.py::geTests::test_ge_with_number", "tests/test_tensor.py::geTests::test_ge_with_tensor", "tests/test_tensor.py::ltTests::test_lt__in_place_with_number", "tests/test_tensor.py::ltTests::test_lt__in_place_with_tensor", "tests/test_tensor.py::ltTests::test_lt_with_encrypted", "tests/test_tensor.py::ltTests::test_lt_with_number", "tests/test_tensor.py::ltTests::test_lt_with_tensor", "tests/test_tensor.py::leTests::test_le__in_place_with_number", "tests/test_tensor.py::leTests::test_le__in_place_with_tensor", "tests/test_tensor.py::leTests::test_le_with_encrypted", "tests/test_tensor.py::leTests::test_le_with_number", "tests/test_tensor.py::leTests::test_le_with_tensor", "tests/test_tensor.py::bernoulliTests::test_bernoulli", "tests/test_tensor.py::bernoulliTests::test_bernoulli_", "tests/test_tensor.py::cauchyTests::test_cauchy_", "tests/test_tensor.py::uniformTests::test_uniform", "tests/test_tensor.py::uniformTests::test_uniform_", "tests/test_tensor.py::geometricTests::test_geometric_", "tests/test_tensor.py::normalTests::test_normal", "tests/test_tensor.py::normalTests::test_normal_", "tests/test_tensor.py::fillTests::test_fill_", "tests/test_tensor.py::topkTests::test_topK", "tests/test_tensor.py::tolistTests::test_to_list", "tests/test_tensor.py::traceTests::test_trace", "tests/test_tensor.py::roundTests::test_round", "tests/test_tensor.py::roundTests::test_round_", "tests/test_tensor.py::repeatTests::test_repeat", "tests/test_tensor.py::powTests::test_pow", "tests/test_tensor.py::powTests::test_pow_", "tests/test_tensor.py::negTests::test_neg", "tests/test_tensor.py::negTests::test_neg_", "tests/test_tensor.py::tanhTests::test_tanh_", "tests/test_tensor.py::prodTests::test_prod", "tests/test_tensor.py::randomTests::test_random_", "tests/test_tensor.py::nonzeroTests::test_non_zero", "tests/test_tensor.py::cumprodTest::test_cumprod", "tests/test_tensor.py::cumprodTest::test_cumprod_", "tests/test_tensor.py::splitTests::test_split", "tests/test_tensor.py::squeezeTests::test_squeeze", "tests/test_tensor.py::expandAsTests::test_expand_as", "tests/test_tensor.py::meanTests::test_mean", "tests/test_tensor.py::notEqualTests::test_ne", "tests/test_tensor.py::notEqualTests::test_ne_", "tests/test_tensor.py::IndexTests::test_index", "tests/test_tensor.py::IndexTests::test_index_add_", "tests/test_tensor.py::IndexTests::test_index_copy_", "tests/test_tensor.py::IndexTests::test_index_fill_", "tests/test_tensor.py::IndexTests::test_index_select", "tests/test_tensor.py::IndexTests::test_indexing", "tests/test_tensor.py::gatherTests::test_gather_numerical_1", "tests/test_tensor.py::gatherTests::test_gather_numerical_2", "tests/test_tensor.py::scatterTests::test_scatter_dim_out_Of_range", "tests/test_tensor.py::scatterTests::test_scatter_index_out_of_range", "tests/test_tensor.py::scatterTests::test_scatter_index_src_dimension_mismatch", "tests/test_tensor.py::scatterTests::test_scatter_index_type", "tests/test_tensor.py::scatterTests::test_scatter_numerical_0", "tests/test_tensor.py::scatterTests::test_scatter_numerical_1", "tests/test_tensor.py::scatterTests::test_scatter_numerical_2", "tests/test_tensor.py::scatterTests::test_scatter_numerical_3", "tests/test_tensor.py::scatterTests::test_scatter_numerical_4", "tests/test_tensor.py::scatterTests::test_scatter_numerical_5", "tests/test_tensor.py::scatterTests::test_scatter_numerical_6", "tests/test_tensor.py::remainderTests::test_remainder_", "tests/test_tensor.py::remainderTests::test_remainder_broadcasting", "tests/test_tensor.py::testMv::test_mv", "tests/test_tensor.py::testMv::test_mv_tensor", "tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_1", "tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_braodcasting_1", "tests/test_tensor.py::masked_scatter_Tests::test_masked_scatter_braodcasting_2", "tests/test_tensor.py::masked_fill_Tests::test_masked_fill_", "tests/test_tensor.py::masked_fill_Tests::test_masked_fill_broadcasting", "tests/test_tensor.py::masked_select_Tests::test_masked_select", "tests/test_tensor.py::masked_select_Tests::test_masked_select_broadcasting_1", "tests/test_tensor.py::masked_select_Tests::test_masked_select_broadcasting_2", "tests/test_tensor.py::masked_select_Tests::test_tensor_base_masked_select", "tests/test_tensor.py::eqTests::test_eq_in_place_with_number", "tests/test_tensor.py::eqTests::test_eq_in_place_with_tensor", "tests/test_tensor.py::eqTests::test_eq_with_number", "tests/test_tensor.py::eqTests::test_eq_with_tensor", "tests/test_tensor.py::mm_test::test_mm_1d", "tests/test_tensor.py::mm_test::test_mm_2d", "tests/test_tensor.py::mm_test::test_mm_3d", "tests/test_tensor.py::newTensorTests::test_encrypted_error", "tests/test_tensor.py::newTensorTests::test_return_new_float_tensor", "tests/test_tensor.py::newTensorTests::test_return_new_int_tensor", "tests/test_tensor.py::fmodTest::test_fmod_number", "tests/test_tensor.py::fmodTest::test_fmod_tensor", "tests/test_tensor.py::fmod_Test::test_fmod_number", "tests/test_tensor.py::fmod_Test::test_fmod_tensor", "tests/test_tensor.py::lerpTests::test_lerp", "tests/test_tensor.py::lerpTests::test_lerp_", "tests/test_tensor.py::RenormTests::testRenorm", "tests/test_tensor.py::RenormTests::testRenorm_" ]
[]
Apache License 2.0
1,747
[ "syft/tensor.py" ]
[ "syft/tensor.py" ]
zhmcclient__python-zhmcclient-456
f67d075e5f00522a32d60f5f5966956d3c66a5b2
2017-10-10 15:29:15
f67d075e5f00522a32d60f5f5966956d3c66a5b2
coveralls: [![Coverage Status](https://coveralls.io/builds/13651438/badge)](https://coveralls.io/builds/13651438) Coverage decreased (-18.4%) to 69.271% when pulling **b4050fc28c6175a6b93873b1262cbc3c95b9ddeb on andy/migrate-to-pytest-part2** into **9a19f326a9a6740884247a397327ca26e4c35811 on master**. coveralls: [![Coverage Status](https://coveralls.io/builds/13657534/badge)](https://coveralls.io/builds/13657534) Coverage decreased (-18.4%) to 69.271% when pulling **0d37be29cd898ab6360ba8d5e50493fca7d9cc35 on andy/migrate-to-pytest-part2** into **9a19f326a9a6740884247a397327ca26e4c35811 on master**. coveralls: [![Coverage Status](https://coveralls.io/builds/13666745/badge)](https://coveralls.io/builds/13666745) Coverage increased (+0.1%) to 87.733% when pulling **b14a51f1c795c20aa9db1ffb77aab7469311f3fb on andy/migrate-to-pytest-part2** into **9a19f326a9a6740884247a397327ca26e4c35811 on master**. coveralls: [![Coverage Status](https://coveralls.io/builds/13666797/badge)](https://coveralls.io/builds/13666797) Coverage increased (+0.1%) to 87.733% when pulling **b14a51f1c795c20aa9db1ffb77aab7469311f3fb on andy/migrate-to-pytest-part2** into **9a19f326a9a6740884247a397327ca26e4c35811 on master**. coveralls: [![Coverage Status](https://coveralls.io/builds/13666987/badge)](https://coveralls.io/builds/13666987) Coverage increased (+0.1%) to 87.733% when pulling **30c1282b4a894a75004a80fc422044d6ad014ba0 on andy/migrate-to-pytest-part2** into **9a19f326a9a6740884247a397327ca26e4c35811 on master**. coveralls: [![Coverage Status](https://coveralls.io/builds/13701235/badge)](https://coveralls.io/builds/13701235) Coverage increased (+0.1%) to 87.733% when pulling **41d26b5fcdf249386dd462580603f84447964e9d on andy/migrate-to-pytest-part2** into **5bb72418b2d42eae43b9c605bbad6034cf5aa8bf on master**.
diff --git a/.travis.yml b/.travis.yml index db14ec8..6ee16b1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -30,26 +30,41 @@ matrix: python: "3.4" env: - PACKAGE_LEVEL=minimum - - os: linux - language: python - python: "3.5" - env: - - PACKAGE_LEVEL=minimum - before_install: - - if [[ "$TRAVIS_EVENT_TYPE" != "cron" ]]; then exit 0; fi +# - os: linux +# language: python +# python: "3.4" +# env: +# - PACKAGE_LEVEL=latest +# - os: linux +# language: python +# python: "3.5" +# env: +# - PACKAGE_LEVEL=minimum +# - os: linux +# language: python +# python: "3.5" +# env: +# - PACKAGE_LEVEL=latest +# - os: linux +# language: python +# python: "3.6" +# env: +# - PACKAGE_LEVEL=minimum - os: linux language: python python: "3.6" env: - PACKAGE_LEVEL=latest - - os: linux - language: python - python: "pypy-5.3.1" # Python 2.7.10 - env: - - PACKAGE_LEVEL=minimum - before_install: - - if [[ "$TRAVIS_EVENT_TYPE" != "cron" ]]; then exit 0; fi -# TODO: Re-enable osx support once Travis has better OS-X capacity +# - os: linux +# language: python +# python: "pypy-5.3.1" # Python 2.7.10 +# env: +# - PACKAGE_LEVEL=minimum +# - os: linux +# language: python +# python: "pypy-5.3.1" # Python 2.7.10 +# env: +# - PACKAGE_LEVEL=latest # - os: osx # language: generic # python: @@ -62,16 +77,12 @@ matrix: # env: # - PACKAGE_LEVEL=latest # - PYTHON=2 -# before_install: -# - if [[ "$TRAVIS_EVENT_TYPE" != "cron" ]]; then exit 0; fi # - os: osx # language: generic # python: # env: # - PACKAGE_LEVEL=minimum # - PYTHON=3 -# before_install: -# - if [[ "$TRAVIS_EVENT_TYPE" != "cron" ]]; then exit 0; fi # - os: osx # language: generic # python: @@ -79,8 +90,20 @@ matrix: # - PACKAGE_LEVEL=latest # - PYTHON=3 +before_install: + - if [[ "$TRAVIS_BRANCH" == "manual-ci-run" ]]; then + export _NEED_REBASE=true; + fi + - if [[ -n $_NEED_REBASE ]]; then git fetch origin master; fi + - if [[ -n $_NEED_REBASE ]]; then git branch master FETCH_HEAD; fi + - if [[ -n $_NEED_REBASE ]]; then git rebase master; fi + - git branch -av + # commands to install dependencies install: + - if [[ "$TRAVIS_BRANCH" == "manual-ci-run" || "$TRAVIS_PULL_REQUEST_BRANCH" == "manual-ci-run" ]]; then + export _MANUAL_CI_RUN=true; + fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then if [[ "${PYTHON:0:1}" == "2" ]]; then export PYTHON_CMD=python; @@ -124,11 +147,10 @@ install: # commands to run builds & tests script: - make check + - if [[ -n $_MANUAL_CI_RUN ]]; then make pylint; fi - make test - - if [[ "$TRAVIS_EVENT_TYPE" == "cron" ]]; then - make build; - make builddoc; - fi + - if [[ -n $_MANUAL_CI_RUN ]]; then make build; fi + - if [[ -n $_MANUAL_CI_RUN ]]; then make builddoc; fi after_success: - if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PYTHON_VERSION" == "2.7" && "$PACKAGE_LEVEL" == "latest" ]]; then diff --git a/Makefile b/Makefile index a957987..8793b55 100644 --- a/Makefile +++ b/Makefile @@ -79,6 +79,13 @@ win64_dist_file := $(dist_dir)/$(package_name)-$(package_version).win-amd64.exe # dist_files := $(bdist_file) $(sdist_file) $(win64_dist_file) dist_files := $(bdist_file) $(sdist_file) +# Source files in the packages +package_py_files := \ + $(wildcard $(package_name)/*.py) \ + $(wildcard $(package_name)/*/*.py) \ + $(wildcard zhmcclient_mock/*.py) \ + $(wildcard zhmcclient_mock/*/*.py) \ + # Directory for generated API documentation doc_build_dir := build_doc @@ -94,8 +101,19 @@ doc_dependent_files := \ $(doc_conf_dir)/conf.py \ $(wildcard $(doc_conf_dir)/*.rst) \ $(wildcard $(doc_conf_dir)/notebooks/*.ipynb) \ - $(wildcard $(package_name)/*.py) \ - $(wildcard zhmcclient_mock/*.py) \ + $(package_py_files) \ + +# Directory with test source files +test_dir := tests + +# Test log +test_log_file := test_$(python_version_fn).log + +# Source files with test code +test_py_files := \ + $(wildcard $(test_dir)/*.py) \ + $(wildcard $(test_dir)/*/*.py) \ + $(wildcard $(test_dir)/*/*/*.py) \ # Flake8 config file flake8_rc_file := setup.cfg @@ -106,32 +124,32 @@ pylint_rc_file := .pylintrc # Source files for check (with PyLint and Flake8) check_py_files := \ setup.py \ - $(wildcard $(package_name)/*.py) \ - $(wildcard zhmcclient_mock/*.py) \ - $(wildcard tests/unit/*.py) \ - $(wildcard tests/unit/zhmcclient_mock/*.py) \ - $(wildcard tests/function/*.py) \ + $(package_py_files) \ + $(test_py_files) \ $(wildcard docs/notebooks/*.py) \ $(wildcard tools/cpcinfo) \ $(wildcard tools/cpcdata) \ -# Test log -test_log_file := test_$(python_version_fn).log - ifdef TESTCASES pytest_opts := -k $(TESTCASES) else pytest_opts := endif +# Files to be built +ifeq ($(PLATFORM),Windows) +build_files := $(win64_dist_file) +else +build_files := $(bdist_file) $(sdist_file) +endif + # Files the distribution archive depends upon. dist_dependent_files := \ setup.py setup.cfg \ README.rst \ requirements.txt \ $(wildcard *.py) \ - $(wildcard $(package_name)/*.py) \ - $(wildcard zhmcclient_mock/*.py) \ + $(package_py_files) \ # No built-in rules needed: .SUFFIXES: @@ -143,15 +161,17 @@ help: @echo 'Uses the currently active Python environment: Python $(python_version_fn)' @echo 'Valid targets are (they do just what is stated, i.e. no automatic prereq targets):' @echo ' develop - Prepare the development environment by installing prerequisites' - @echo ' build - Build the distribution files in: $(dist_dir) (requires Linux or OSX)' - @echo ' buildwin - Build the Windows installable in: $(dist_dir) (requires Windows 64-bit)' - @echo ' builddoc - Build documentation in: $(doc_build_dir)' + @echo ' install - Install package in active Python environment' @echo ' check - Run Flake8 on sources and save results in: flake8.log' @echo ' pylint - Run PyLint on sources and save results in: pylint.log' - @echo ' test - Run unit tests (and test coverage) and save results in: $(test_log_file)' + @echo ' test - Run tests (and test coverage) and save results in: $(test_log_file)' + @echo ' Does not include install but depends on it, so make sure install is current.' @echo ' Env.var TESTCASES can be used to specify a py.test expression for its -k option' - @echo ' all - Do all of the above (except buildwin when not on Windows)' - @echo ' install - Install package in active Python environment and test import (includes build)' + @echo ' build - Build the distribution files in: $(dist_dir)' + @echo ' On Windows, builds: $(win64_dist_file)' + @echo ' On Linux + OSX, builds: $(bdist_file) $(sdist_file)' + @echo ' builddoc - Build documentation in: $(doc_build_dir)' + @echo ' all - Do all of the above' @echo ' uninstall - Uninstall package from active Python environment' @echo ' upload - Upload the distribution files to PyPI (includes uninstall+build)' @echo ' clean - Remove any temporary files' @@ -170,13 +190,13 @@ _pip: $(PIP_CMD) install $(pip_level_opts) pip setuptools wheel pbr .PHONY: develop -develop: _pip +develop: _pip dev-requirements.txt requirements.txt @echo 'Installing runtime and development requirements with PACKAGE_LEVEL=$(PACKAGE_LEVEL)' $(PIP_CMD) install $(pip_level_opts) -r dev-requirements.txt @echo '$@ done.' .PHONY: build -build: $(bdist_file) $(sdist_file) +build: $(build_files) @echo '$@ done.' .PHONY: buildwin @@ -249,9 +269,9 @@ pylint: pylint.log @echo '$@ done.' .PHONY: install -install: _pip +install: _pip requirements.txt setup.py setup.cfg $(package_py_files) @echo 'Installing runtime requirements with PACKAGE_LEVEL=$(PACKAGE_LEVEL)' - $(PIP_CMD) install $(pip_level_opts) . + $(PIP_CMD) install $(pip_level_opts) -r requirements.txt . $(PYTHON_CMD) -c "import zhmcclient; print('Import: ok')" @echo 'Done: Installed $(package_name) into current Python environment.' @echo '$@ done.' @@ -268,12 +288,10 @@ test: $(test_log_file) .PHONY: clobber clobber: uninstall clean rm -Rf $(doc_build_dir) htmlcov .tox - rm -fv pylint.log flake8.log test_*.log - rm -fv $(bdist_file) $(sdist_file) $(win64_dist_file) + rm -f pylint.log flake8.log test_*.log $(bdist_file) $(sdist_file) $(win64_dist_file) @echo 'Done: Removed all build products to get to a fresh state.' @echo '$@ done.' -# Also remove any build products that are dependent on the Python version .PHONY: clean clean: rm -Rf build .cache $(package_name).egg-info .eggs @@ -283,7 +301,7 @@ clean: @echo '$@ done.' .PHONY: all -all: develop check build builddoc test +all: develop install check pylint test build builddoc @echo '$@ done.' .PHONY: upload @@ -303,7 +321,7 @@ endif # Distribution archives. $(bdist_file): Makefile $(dist_dependent_files) ifneq ($(PLATFORM),Windows) - rm -Rfv $(package_name).egg-info .eggs + rm -Rfv $(package_name).egg-info .eggs build $(PYTHON_CMD) setup.py bdist_wheel -d $(dist_dir) --universal @echo 'Done: Created binary distribution archive: $@' else @@ -313,7 +331,7 @@ endif $(sdist_file): Makefile $(dist_dependent_files) ifneq ($(PLATFORM),Windows) - rm -Rfv $(package_name).egg-info .eggs + rm -Rfv $(package_name).egg-info .eggs build $(PYTHON_CMD) setup.py sdist -d $(dist_dir) @echo 'Done: Created source distribution archive: $@' else @@ -323,7 +341,7 @@ endif $(win64_dist_file): Makefile $(dist_dependent_files) ifeq ($(PLATFORM),Windows) - rm -Rfv $(package_name).egg-info .eggs + rm -Rfv $(package_name).egg-info .eggs build $(PYTHON_CMD) setup.py bdist_wininst -d $(dist_dir) -o -t "$(package_name) v$(package_version)" @echo 'Done: Created Windows installable: $@' else @@ -342,15 +360,14 @@ else @echo 'Info: PyLint requires Python 2; skipping this step on Python $(python_major_version)' endif -# TODO: Once Flake8 has no more errors, remove the dash "-" flake8.log: Makefile $(flake8_rc_file) $(check_py_files) rm -fv $@ bash -c 'set -o pipefail; flake8 $(check_py_files) 2>&1 |tee [email protected]' mv -f [email protected] $@ @echo 'Done: Created Flake8 log file: $@' -$(test_log_file): Makefile $(package_name)/*.py zhmcclient_mock/*.py tests/unit/*.py tests/unit/zhmcclient_mock/*.py tests/function/*.py .coveragerc +$(test_log_file): Makefile $(package__py_files) $(test_py_files) .coveragerc rm -fv $@ - bash -c 'set -o pipefail; PYTHONWARNINGS=default py.test -s tests --cov $(package_name) --cov zhmcclient_mock --cov-config .coveragerc --cov-report=html $(pytest_opts) 2>&1 |tee [email protected]' + bash -c 'set -o pipefail; PYTHONWARNINGS=default py.test -s $(test_dir) --cov $(package_name) --cov zhmcclient_mock --cov-config .coveragerc --cov-report=html $(pytest_opts) 2>&1 |tee [email protected]' mv -f [email protected] $@ @echo 'Done: Created test log file: $@' diff --git a/appveyor.yml b/appveyor.yml index 92d8340..c4b18dd 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -6,12 +6,12 @@ environment: - PYTHON_VERSION: 2.7 PYTHON_ARCH: 32 PYTHON_HOME: C:\Python27 - - PYTHON_VERSION: 2.7 - PYTHON_ARCH: 64 - PYTHON_HOME: C:\Python27-x64 - - PYTHON_VERSION: 3.4 - PYTHON_ARCH: 32 - PYTHON_HOME: C:\Python34 +# - PYTHON_VERSION: 2.7 +# PYTHON_ARCH: 64 +# PYTHON_HOME: C:\Python27-x64 +# - PYTHON_VERSION: 3.4 +# PYTHON_ARCH: 32 +# PYTHON_HOME: C:\Python34 # - PYTHON_VERSION: 3.4 # PYTHON_ARCH: 64 # PYTHON_HOME: C:\Python34-x64 @@ -35,6 +35,16 @@ configuration: install: + - if %APPVEYOR_REPO_BRANCH%.==manual-ci-run. set _NEED_REBASE=true + - if %_NEED_REBASE%.==true. git fetch origin master + - if %_NEED_REBASE%.==true. git branch master FETCH_HEAD + - if %_NEED_REBASE%.==true. git rebase master + - git branch -av + + # TODO: Use the _MANUAL_CI_RUN variable in tox.ini to run certain parts only when set + - if %APPVEYOR_REPO_BRANCH%.==manual-ci-run. set _MANUAL_CI_RUN=true + - if %APPVEYOR_PULL_REQUEST_HEAD_REPO_BRANCH%.==manual-ci-run. set _MANUAL_CI_RUN=true + # Set PACKAGE_LEVEL for make - set PACKAGE_LEVEL=%configuration% diff --git a/docs/changes.rst b/docs/changes.rst index 8366dca..8c1d908 100644 --- a/docs/changes.rst +++ b/docs/changes.rst @@ -19,13 +19,45 @@ Change log ---------- -Version 0.18.0 +Version 0.19.0 ^^^^^^^^^^^^^^ Released: not yet **Incompatible changes:** +**Deprecations:** + +**Bug fixes:** + +* Fixed a flawed setup of setuptools in Python 2.7 on the Travis CI, where + the metadata directory of setuptools existed twice, by adding a script + `remove_duplicate_setuptools.py` that removes the moot copy of the metadata + directory (issue #434). + +* Fixed a bug where multiple Session objects shared the same set of + HTTP header fields, causing confusion in the logon status. + +**Enhancements:** + +* Migrated all remaining test cases from unittest to pytest, and started + improving the testcases using pytest specific features such as + parametrization. + +**Known issues:** + +* See `list of open issues`_. + +.. _`list of open issues`: https://github.com/zhmcclient/python-zhmcclient/issues + + +Version 0.18.0 +^^^^^^^^^^^^^^ + +Released: 2017-10-19 + +**Incompatible changes:** + * Removed the zhmc CLI support from this project, moving it into a new GitHub project ``zhmcclient/zhmccli``. @@ -40,8 +72,6 @@ Released: not yet - python-utils (from progressbar2) - wcwidth (from prompt-toolkit -> click-repl) -**Deprecations:** - **Bug fixes:** * Fixed a flawed setup of setuptools in Python 2.7 on the Travis CI, where @@ -49,14 +79,6 @@ Released: not yet `remove_duplicate_setuptools.py` that removes the moot copy of the metadata directory (issue #434). -**Enhancements:** - -**Known issues:** - -* See `list of open issues`_. - - .. _`list of open issues`: https://github.com/zhmcclient/python-zhmcclient/issues - Version 0.17.0 ^^^^^^^^^^^^^^ diff --git a/zhmcclient/_session.py b/zhmcclient/_session.py index 0b4c95e..937c238 100644 --- a/zhmcclient/_session.py +++ b/zhmcclient/_session.py @@ -23,6 +23,7 @@ import time import re import collections import six +from copy import copy try: from collections import OrderedDict except ImportError: @@ -340,7 +341,7 @@ class Session(object): scheme=_HMC_SCHEME, host=self._host, port=_HMC_PORT) - self._headers = _STD_HEADERS # dict with standard HTTP headers + self._headers = copy(_STD_HEADERS) # dict with standard HTTP headers if session_id is not None: # Create a logged-on state (same state as in _do_logon()) self._session_id = session_id
Error "fixture 'capture' not found" on Py34+36 when migrating test_logging to pytest ### Actual behavior When migrating the test_logging.py testcases from unittest to pytest, the following error happens on Python 3.4 and 3.6 (but not on 2.7): ``` $ PYTHONPATH=. py.test -s -x test_pt.py . . . file /media/share/Projects/zhmcclient/repos/python-zhmcclient/try/logcapture/test_pt.py, line 7 @log_capture(level=logging.DEBUG) def test_2(self, capture): E fixture 'capture' not found > available fixtures: cache, capfd, capsys, cov, doctest_namespace, monkeypatch, pytestconfig, record_xml_property, recwarn, tmpdir, tmpdir_factory > use 'pytest --fixtures [testpath]' for help on them. ``` The most simple source file reproducing this error, is: ``` import logging import pytest # not used here, but is actually used in the real life testcase from testfixtures import log_capture class Test2(object): @log_capture(level=logging.DEBUG) def test_2(self, capture): pass ``` Travis logs: https://travis-ci.org/zhmcclient/python-zhmcclient/builds/286095482 Appveyor logs: https://ci.appveyor.com/project/leopoldjuergen/python-zhmcclient/build/1.0.1145 As a comparison, the following unittest-based source file (with code that corresponds to the pytest-based file shown above) runs fine in all of these Python versions: ``` import logging import unittest from testfixtures import log_capture class Test1(unittest.TestCase): @log_capture(level=logging.DEBUG) def test1(self, capture): pass ``` This has been reported to the testfixtures project as issue Simplistix/testfixtures#65. ### Expected behavior Runs fine when using pytest, on all python versions. ### Execution environment * zhmcclient version: Git branch of PR #456. * Operating system (type+version): ubuntu 16.04 * HMC version: n/a * CPC version: n/a
zhmcclient/python-zhmcclient
diff --git a/tests/unit/common/test_utils.py b/tests/unit/common/test_utils.py index 1967f8f..692af7e 100644 --- a/tests/unit/common/test_utils.py +++ b/tests/unit/common/test_utils.py @@ -128,7 +128,7 @@ class TestUtilsAssertResources(object): exp_resources[0].properties['description'] = 'changed description' # Execute the code to be tested - with pytest.raises(AssertionError) as exc_info: + with pytest.raises(AssertionError): assert_resources(resources, exp_resources, prop_names) @pytest.mark.parametrize( @@ -148,5 +148,5 @@ class TestUtilsAssertResources(object): prop_names = exp_resources[0].properties.keys() # Execute the code to be tested - with pytest.raises(AssertionError) as exc_info: + with pytest.raises(AssertionError): assert_resources(resources, exp_resources, prop_names) diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py old mode 100755 new mode 100644 index eedd9d5..29d4626 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/unit/test_exceptions.py b/tests/unit/test_exceptions.py old mode 100755 new mode 100644 index c620b6a..dbc13af --- a/tests/unit/test_exceptions.py +++ b/tests/unit/test_exceptions.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/unit/test_hba.py b/tests/unit/test_hba.py old mode 100755 new mode 100644 index 94878c6..a51654d --- a/tests/unit/test_hba.py +++ b/tests/unit/test_hba.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/unit/test_logging.py b/tests/unit/test_logging.py old mode 100755 new mode 100644 index 33bd7e5..2c93d54 --- a/tests/unit/test_logging.py +++ b/tests/unit/test_logging.py @@ -19,8 +19,8 @@ Unit tests for _logging module. from __future__ import absolute_import, print_function import logging -import unittest -from testfixtures import log_capture +import pytest +from testfixtures import LogCapture from zhmcclient._logging import logged_api_call, get_logger @@ -126,32 +126,42 @@ _EXP_LOG_MSG_ENTER = "==> %s, args: %.500r, kwargs: %.500r" _EXP_LOG_MSG_LEAVE = "<== %s, result: %.1000r" [email protected]() +def capture(): + """ + This way of defining a fixture works around the issue that when + using the decorator testfixtures.log_capture() instead, pytest + fails with "fixture 'capture' not found". + """ + with LogCapture(level=logging.DEBUG) as log: + yield log + + # # Test cases # -class TestLoggingDecorator(unittest.TestCase): +class TestLoggingDecorator(object): """All test cases for the @logged_api_call decorator.""" def assert_log_capture(self, log_capture, exp_apifunc): - self.assertEqual(len(log_capture.records), 2) + assert len(log_capture.records) == 2 enter_record = log_capture.records[0] - self.assertEqual(enter_record.name, _EXP_LOGGER_NAME) - self.assertEqual(enter_record.levelname, _EXP_LOG_LEVEL) - self.assertEqual(enter_record.msg, _EXP_LOG_MSG_ENTER) - self.assertEqual(enter_record.args[0], exp_apifunc) + assert enter_record.name == _EXP_LOGGER_NAME + assert enter_record.levelname == _EXP_LOG_LEVEL + assert enter_record.msg == _EXP_LOG_MSG_ENTER + assert enter_record.args[0] == exp_apifunc # We don't check the positional args and keyword args leave_record = log_capture.records[1] - self.assertEqual(leave_record.name, _EXP_LOGGER_NAME) - self.assertEqual(leave_record.levelname, _EXP_LOG_LEVEL) - self.assertEqual(leave_record.msg, _EXP_LOG_MSG_LEAVE) - self.assertEqual(leave_record.args[0], exp_apifunc) + assert leave_record.name == _EXP_LOGGER_NAME + assert leave_record.levelname == _EXP_LOG_LEVEL + assert leave_record.msg == _EXP_LOG_MSG_LEAVE + assert leave_record.args[0] == exp_apifunc # We don't check the positional args and keyword args - @log_capture(level=logging.DEBUG) def test_1a_global_from_global(self, capture): """Simple test calling a decorated global function from a global function.""" @@ -160,7 +170,6 @@ class TestLoggingDecorator(unittest.TestCase): self.assert_log_capture(capture, 'decorated_global_function()') - @log_capture(level=logging.DEBUG) def test_1b_global_from_method(self, capture): """Simple test calling a decorated global function from a method.""" @@ -168,7 +177,6 @@ class TestLoggingDecorator(unittest.TestCase): self.assert_log_capture(capture, 'decorated_global_function()') - @log_capture(level=logging.DEBUG) def test_2a_global_inner1_from_global(self, capture): """Simple test calling a decorated inner function defined in a global function from a global function.""" @@ -180,7 +188,6 @@ class TestLoggingDecorator(unittest.TestCase): self.assert_log_capture(capture, 'global1_function.decorated_inner1_function()') - @log_capture(level=logging.DEBUG) def test_2b_global_inner1_from_method(self, capture): """Simple test calling a decorated inner function defined in a global function from a method.""" @@ -192,7 +199,6 @@ class TestLoggingDecorator(unittest.TestCase): self.assert_log_capture(capture, 'global1_function.decorated_inner1_function()') - @log_capture(level=logging.DEBUG) def test_3a_global_inner2_from_global(self, capture): """Simple test calling a decorated inner function defined in an inner function defined in a global function from a global function.""" @@ -204,7 +210,6 @@ class TestLoggingDecorator(unittest.TestCase): self.assert_log_capture(capture, 'inner1_function.decorated_inner2_function()') - @log_capture(level=logging.DEBUG) def test_3b_global_inner1_from_method(self, capture): """Simple test calling a decorated inner function defined in an inner function defined in a global function from a method.""" @@ -216,7 +221,6 @@ class TestLoggingDecorator(unittest.TestCase): self.assert_log_capture(capture, 'inner1_function.decorated_inner2_function()') - @log_capture(level=logging.DEBUG) def test_4a_method_from_global(self, capture): """Simple test calling a decorated method from a global function.""" @@ -227,7 +231,6 @@ class TestLoggingDecorator(unittest.TestCase): self.assert_log_capture(capture, 'Decorator1Class.decorated_method()') - @log_capture(level=logging.DEBUG) def test_4b_method_from_method(self, capture): """Simple test calling a decorated method from a method.""" @@ -238,7 +241,6 @@ class TestLoggingDecorator(unittest.TestCase): self.assert_log_capture(capture, 'Decorator1Class.decorated_method()') - @log_capture(level=logging.DEBUG) def test_5a_method_from_global(self, capture): """Simple test calling a decorated inner function defined in a method from a global function.""" @@ -251,7 +253,6 @@ class TestLoggingDecorator(unittest.TestCase): self.assert_log_capture(capture, 'method.decorated_inner_function()') - @log_capture(level=logging.DEBUG) def test_5b_method_from_method(self, capture): """Simple test calling a decorated inner function defined in a method from a method.""" @@ -267,7 +268,7 @@ class TestLoggingDecorator(unittest.TestCase): def test_decorated_class(self): """Test that using the decorator on a class raises TypeError.""" - with self.assertRaises(TypeError): + with pytest.raises(TypeError): @logged_api_call class DecoratedClass(object): @@ -276,7 +277,7 @@ class TestLoggingDecorator(unittest.TestCase): def test_decorated_property(self): """Test that using the decorator on a property raises TypeError.""" - with self.assertRaises(TypeError): + with pytest.raises(TypeError): class Class(object): @@ -286,7 +287,7 @@ class TestLoggingDecorator(unittest.TestCase): return self -class TestGetLogger(unittest.TestCase): +class TestGetLogger(object): """All test cases for get_logger().""" def test_root_logger(self): @@ -296,11 +297,10 @@ class TestGetLogger(unittest.TestCase): zhmc_logger = get_logger('') - self.assertTrue(isinstance(zhmc_logger, logging.Logger)) - self.assertEqual(zhmc_logger, py_logger) - self.assertTrue(len(zhmc_logger.handlers) >= 1, - "Unexpected list of logging handlers: %r" % - zhmc_logger.handlers) + assert isinstance(zhmc_logger, logging.Logger) + assert zhmc_logger == py_logger + assert len(zhmc_logger.handlers) >= 1, \ + "Unexpected list of logging handlers: %r" % zhmc_logger.handlers def test_foo_logger(self): """Test that get_logger('foo') returns the Python logger 'foo' @@ -309,8 +309,7 @@ class TestGetLogger(unittest.TestCase): zhmc_logger = get_logger('foo') - self.assertTrue(isinstance(zhmc_logger, logging.Logger)) - self.assertEqual(zhmc_logger, py_logger) - self.assertTrue(len(zhmc_logger.handlers) >= 1, - "Unexpected list of logging handlers: %r" % - zhmc_logger.handlers) + assert isinstance(zhmc_logger, logging.Logger) + assert zhmc_logger == py_logger + assert len(zhmc_logger.handlers) >= 1, \ + "Unexpected list of logging handlers: %r" % zhmc_logger.handlers diff --git a/tests/unit/test_lpar.py b/tests/unit/test_lpar.py old mode 100755 new mode 100644 index 46a743d..5883b79 --- a/tests/unit/test_lpar.py +++ b/tests/unit/test_lpar.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,16 +18,16 @@ Unit tests for _lpar module. from __future__ import absolute_import, print_function -import unittest +# FIXME: Migrate requests_mock to zhmcclient_mock. import requests_mock from zhmcclient import Session, Client -class LparTests(unittest.TestCase): +class TestLpar(object): """All tests for Lpar and LparManager classes.""" - def setUp(self): + def setup_method(self): self.session = Session('fake-host', 'fake-user', 'fake-id') self.client = Client(self.session) with requests_mock.mock() as m: @@ -52,7 +51,7 @@ class LparTests(unittest.TestCase): cpcs = self.cpc_mgr.list(full_properties=False) self.cpc = cpcs[0] - def tearDown(self): + def teardown_method(self): with requests_mock.mock() as m: m.delete('/api/sessions/this-session', status_code=204) self.session.logoff() @@ -60,7 +59,7 @@ class LparTests(unittest.TestCase): def test_init(self): """Test __init__() on LparManager instance in CPC.""" lpar_mgr = self.cpc.lpars - self.assertEqual(lpar_mgr.cpc, self.cpc) + assert lpar_mgr.cpc == self.cpc def test_list_short_ok(self): """ @@ -87,16 +86,13 @@ class LparTests(unittest.TestCase): lpars = lpar_mgr.list(full_properties=False) - self.assertEqual(len(lpars), len(result['logical-partitions'])) + assert len(lpars) == len(result['logical-partitions']) for idx, lpar in enumerate(lpars): - self.assertEqual( - lpar.properties, - result['logical-partitions'][idx]) - self.assertEqual( - lpar.uri, - result['logical-partitions'][idx]['object-uri']) - self.assertFalse(lpar.full_properties) - self.assertEqual(lpar.manager, lpar_mgr) + assert lpar.properties == result['logical-partitions'][idx] + assert lpar.uri == \ + result['logical-partitions'][idx]['object-uri'] + assert not lpar.full_properties + assert lpar.manager == lpar_mgr def test_list_full_ok(self): """ @@ -143,15 +139,14 @@ class LparTests(unittest.TestCase): lpars = lpar_mgr.list(full_properties=True) - self.assertEqual(len(lpars), len(result['logical-partitions'])) + assert len(lpars) == len(result['logical-partitions']) for idx, lpar in enumerate(lpars): - self.assertEqual(lpar.properties['name'], - result['logical-partitions'][idx]['name']) - self.assertEqual( - lpar.uri, - result['logical-partitions'][idx]['object-uri']) - self.assertTrue(lpar.full_properties) - self.assertEqual(lpar.manager, lpar_mgr) + assert lpar.properties['name'] == \ + result['logical-partitions'][idx]['name'] + assert lpar.uri == \ + result['logical-partitions'][idx]['object-uri'] + assert lpar.full_properties + assert lpar.manager == lpar_mgr def test_activate(self): """ @@ -185,7 +180,7 @@ class LparTests(unittest.TestCase): m.post("/api/logical-partitions/fake-lpar-id-1/operations/" "activate", json=result) status = lpar.activate(wait_for_completion=False) - self.assertEqual(status, result) + assert status == result def test_deactivate(self): """ @@ -219,7 +214,7 @@ class LparTests(unittest.TestCase): m.post("/api/logical-partitions/fake-lpar-id-1/operations/" "deactivate", json=result) status = lpar.deactivate(wait_for_completion=False) - self.assertEqual(status, result) + assert status == result def test_load(self): """ @@ -254,8 +249,4 @@ class LparTests(unittest.TestCase): m.post("/api/logical-partitions/fake-lpar-id-1/operations/load", json=result) status = lpar.load(load_address='5162', wait_for_completion=False) - self.assertEqual(status, result) - - -if __name__ == '__main__': - unittest.main() + assert status == result diff --git a/tests/unit/test_manager.py b/tests/unit/test_manager.py old mode 100755 new mode 100644 index 41b0e02..eb5febf --- a/tests/unit/test_manager.py +++ b/tests/unit/test_manager.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,10 +18,11 @@ Unit tests for _manager module. from __future__ import absolute_import, print_function -import unittest from datetime import datetime import time +import re import warnings +import pytest from zhmcclient import BaseResource, BaseManager, Session, NotFound, \ NoUniqueMatch @@ -76,12 +76,12 @@ class MyManager(BaseManager): return result_list -class Manager1Tests(unittest.TestCase): +class TestManager1(object): """ Tests for the BaseManager class with one resource. """ - def setUp(self): + def setup_method(self): self.session = Session(host='fake-host', userid='fake-user', password='fake-pw') self.manager = MyManager(self.session) @@ -103,18 +103,17 @@ class Manager1Tests(unittest.TestCase): repr_str = repr_str.replace('\n', '\\n') # We check just the begin of the string: - self.assertRegexpMatches( - repr_str, - r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.format( - classname=manager.__class__.__name__, - id=id(manager))) + assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'. + format(classname=manager.__class__.__name__, + id=id(manager)), + repr_str) def test_init_properties(self): """Test BaseManager properties after initialization.""" - self.assertEqual(self.manager.resource_class, MyResource) - self.assertEqual(self.manager.session, self.session) - self.assertEqual(self.manager.parent, None) + assert self.manager.resource_class == MyResource + assert self.manager.session == self.session + assert self.manager.parent is None def test_invalidate_cache(self): """Test invalidate_cache().""" @@ -122,11 +121,11 @@ class Manager1Tests(unittest.TestCase): # Populate the cache by finding a resource by name. self.manager.find(**filter_args) - self.assertEqual(self.manager._list_called, 1) + assert self.manager._list_called == 1 # Check that on the second find by name, list() is not called again. self.manager.find(**filter_args) - self.assertEqual(self.manager._list_called, 1) + assert self.manager._list_called == 1 # Invalidate the cache via invalidate_cache(). self.manager.invalidate_cache() @@ -134,7 +133,7 @@ class Manager1Tests(unittest.TestCase): # Check that on the third find by name, list() is called again, because # the cache had been invalidated. self.manager.find(**filter_args) - self.assertEqual(self.manager._list_called, 2) + assert self.manager._list_called == 2 def test_flush(self): """Test flush() and verify that it raises a DeprecationWarning.""" @@ -142,25 +141,25 @@ class Manager1Tests(unittest.TestCase): # Populate the cache by finding a resource by name. self.manager.find(**filter_args) - self.assertEqual(self.manager._list_called, 1) + assert self.manager._list_called == 1 # Check that on the second find by name, list() is not called again. self.manager.find(**filter_args) - self.assertEqual(self.manager._list_called, 1) + assert self.manager._list_called == 1 # Invalidate the cache via flush(). with warnings.catch_warnings(record=True) as wngs: warnings.simplefilter("always") self.manager.flush() - self.assertEqual(len(wngs), 1) + assert len(wngs) == 1 wng = wngs[0] - self.assertTrue(issubclass(wng.category, DeprecationWarning), - "Unexpected warnings class: %s" % wng.category) + assert issubclass(wng.category, DeprecationWarning), \ + "Unexpected warnings class: %s" % wng.category # Check that on the third find by name, list() is called again, because # the cache had been invalidated. self.manager.find(**filter_args) - self.assertEqual(self.manager._list_called, 2) + assert self.manager._list_called == 2 def test_list_not_implemented(self): """Test that BaseManager.list() raises NotImplementedError.""" @@ -175,16 +174,16 @@ class Manager1Tests(unittest.TestCase): name_prop='fake_name_prop', query_props=[]) - with self.assertRaises(NotImplementedError): + with pytest.raises(NotImplementedError): manager.list() -class Manager2Tests(unittest.TestCase): +class TestManager2(object): """ Tests for the BaseManager class with two resources. """ - def setUp(self): + def setup_method(self): self.session = Session(host='fake-host', userid='fake-user', password='fake-pw') self.manager = MyManager(self.session) @@ -217,7 +216,7 @@ class Manager2Tests(unittest.TestCase): resources = self.manager.findall(**filter_args) - self.assertEqual(len(resources), 0) + assert len(resources) == 0 def test_findall_name_one(self): """Test BaseManager.findall() with one resource matching by the name @@ -226,9 +225,9 @@ class Manager2Tests(unittest.TestCase): resources = self.manager.findall(**filter_args) - self.assertEqual(len(resources), 1) - self.assertEqual(resources[0].uri, self.resource2.uri) - self.assertEqual(resources[0].name, self.resource2.name) + assert len(resources) == 1 + assert resources[0].uri == self.resource2.uri + assert resources[0].name == self.resource2.name def test_findall_str_none(self): """Test BaseManager.findall() with no resource matching by a @@ -236,7 +235,7 @@ class Manager2Tests(unittest.TestCase): resources = self.manager.findall(other="not-exists") - self.assertEqual(len(resources), 0) + assert len(resources) == 0 def test_findall_str_one(self): """Test BaseManager.findall() with one resource matching by a @@ -244,9 +243,9 @@ class Manager2Tests(unittest.TestCase): resources = self.manager.findall(other="fake-other-2") - self.assertEqual(len(resources), 1) - self.assertEqual(resources[0].uri, self.resource2.uri) - self.assertEqual(resources[0].name, self.resource2.name) + assert len(resources) == 1 + assert resources[0].uri == self.resource2.uri + assert resources[0].name == self.resource2.name def test_findall_str_one_and(self): """Test BaseManager.findall() with one resource matching by two @@ -255,9 +254,9 @@ class Manager2Tests(unittest.TestCase): resources = self.manager.findall(same="fake-same", other="fake-other-2") - self.assertEqual(len(resources), 1) - self.assertEqual(resources[0].uri, self.resource2.uri) - self.assertEqual(resources[0].name, self.resource2.name) + assert len(resources) == 1 + assert resources[0].uri == self.resource2.uri + assert resources[0].name == self.resource2.name def test_findall_str_two(self): """Test BaseManager.findall() with two resources matching by a @@ -265,9 +264,9 @@ class Manager2Tests(unittest.TestCase): resources = self.manager.findall(same="fake-same") - self.assertEqual(len(resources), 2) - self.assertEqual(set([res.uri for res in resources]), - set([self.resource1.uri, self.resource2.uri])) + assert len(resources) == 2 + assert set([res.uri for res in resources]) == \ + set([self.resource1.uri, self.resource2.uri]) def test_findall_str_two_or(self): """Test BaseManager.findall() with two resources matching by a @@ -277,9 +276,9 @@ class Manager2Tests(unittest.TestCase): resources = self.manager.findall(other=["fake-other-1", "fake-other-2"]) - self.assertEqual(len(resources), 2) - self.assertEqual(set([res.uri for res in resources]), - set([self.resource1.uri, self.resource2.uri])) + assert len(resources) == 2 + assert set([res.uri for res in resources]) == \ + set([self.resource1.uri, self.resource2.uri]) def test_findall_int_none(self): """Test BaseManager.findall() with no resource matching by a @@ -287,7 +286,7 @@ class Manager2Tests(unittest.TestCase): resources = self.manager.findall(int_other=815) - self.assertEqual(len(resources), 0) + assert len(resources) == 0 def test_findall_int_one(self): """Test BaseManager.findall() with one resource matching by a @@ -295,9 +294,9 @@ class Manager2Tests(unittest.TestCase): resources = self.manager.findall(int_other=24) - self.assertEqual(len(resources), 1) - self.assertEqual(resources[0].uri, self.resource2.uri) - self.assertEqual(resources[0].name, self.resource2.name) + assert len(resources) == 1 + assert resources[0].uri == self.resource2.uri + assert resources[0].name == self.resource2.name def test_findall_int_two(self): """Test BaseManager.findall() with two resources matching by a @@ -305,16 +304,16 @@ class Manager2Tests(unittest.TestCase): resources = self.manager.findall(int_same=42) - self.assertEqual(len(resources), 2) - self.assertEqual(set([res.uri for res in resources]), - set([self.resource1.uri, self.resource2.uri])) + assert len(resources) == 2 + assert set([res.uri for res in resources]) == \ + set([self.resource1.uri, self.resource2.uri]) def test_find_name_none(self): """Test BaseManager.find() with no resource matching by the name resource property.""" filter_args = {self.manager._name_prop: "not-exists"} - with self.assertRaises(NotFound): + with pytest.raises(NotFound): self.manager.find(**filter_args) def test_find_name_one(self): @@ -324,13 +323,13 @@ class Manager2Tests(unittest.TestCase): resource = self.manager.find(**filter_args) - self.assertEqual(resource.uri, self.resource2.uri) - self.assertEqual(resource.name, self.resource2.name) + assert resource.uri == self.resource2.uri + assert resource.name == self.resource2.name def test_find_str_none(self): """Test BaseManager.find() with no resource matching by a string-typed (non-name) resource property.""" - with self.assertRaises(NotFound): + with pytest.raises(NotFound): self.manager.find(other="not-exists") @@ -340,20 +339,20 @@ class Manager2Tests(unittest.TestCase): resource = self.manager.find(other="fake-other-2") - self.assertEqual(resource.uri, self.resource2.uri) - self.assertEqual(resource.name, self.resource2.name) + assert resource.uri == self.resource2.uri + assert resource.name == self.resource2.name def test_find_str_two(self): """Test BaseManager.find() with two resources matching by a string-typed (non-name) resource property.""" - with self.assertRaises(NoUniqueMatch): + with pytest.raises(NoUniqueMatch): self.manager.find(same="fake-same") def test_find_int_none(self): """Test BaseManager.find() with no resource matching by a string-typed (non-name) resource property.""" - with self.assertRaises(NotFound): + with pytest.raises(NotFound): self.manager.find(int_other=815) @@ -363,13 +362,13 @@ class Manager2Tests(unittest.TestCase): resource = self.manager.find(int_other=24) - self.assertEqual(resource.uri, self.resource2.uri) - self.assertEqual(resource.name, self.resource2.name) + assert resource.uri == self.resource2.uri + assert resource.name == self.resource2.name def test_find_int_two(self): """Test BaseManager.find() with two resources matching by a string-typed (non-name) resource property.""" - with self.assertRaises(NoUniqueMatch): + with pytest.raises(NoUniqueMatch): self.manager.find(int_same=42) @@ -377,7 +376,7 @@ class Manager2Tests(unittest.TestCase): """Test BaseManager.find_by_name() with no resource matching by the name resource property.""" - with self.assertRaises(NotFound): + with pytest.raises(NotFound): self.manager.find_by_name("not-exists") def test_find_by_name_one(self): @@ -386,21 +385,20 @@ class Manager2Tests(unittest.TestCase): resource = self.manager.find_by_name(self.resource2.name) - self.assertEqual(resource.uri, self.resource2.uri) - self.assertEqual(resource.name, self.resource2.name) + assert resource.uri == self.resource2.uri + assert resource.name == self.resource2.name -class NameUriCacheTests(unittest.TestCase): +class TestNameUriCache(object): """All tests for the _NameUriCache class.""" - def assertDatetimeNear(self, dt1, dt2, max_delta=0.1): + def assert_datetime_near(self, dt1, dt2, max_delta=0.1): delta = abs(dt2 - dt1).total_seconds() - if delta > max_delta: - self.fail( - "Datetime values are %s s apart, maximum is %s s" % - (delta, max_delta)) + assert delta <= max_delta, \ + "Datetime values are %s s apart, maximum is %s s" % \ + (delta, max_delta) - def setUp(self): + def setup_method(self): self.session = Session(host='fake-host', userid='fake-user', password='fake-pw') self.manager = MyManager(self.session) @@ -430,10 +428,10 @@ class NameUriCacheTests(unittest.TestCase): def test_initial(self): """Test initial cache state.""" - self.assertEqual(self.cache._manager, self.manager) - self.assertEqual(self.cache._timetolive, self.timetolive) - self.assertEqual(self.cache._uris, {}) - self.assertDatetimeNear(self.cache._invalidated, self.created) + assert self.cache._manager == self.manager + assert self.cache._timetolive == self.timetolive + assert self.cache._uris == {} + self.assert_datetime_near(self.cache._invalidated, self.created) def test_get_no_invalidate(self): """Tests for get() without auto-invalidating the cache.""" @@ -442,37 +440,37 @@ class NameUriCacheTests(unittest.TestCase): # cache brings all resources into the cache and causes list() to be # called once. resource1_uri = self.cache.get(self.resource1_name) - self.assertEqual(resource1_uri, self.resource1.uri) - self.assertEqual(set(self.cache._uris.keys()), self.all_names) - self.assertEqual(self.manager._list_called, 1) + assert resource1_uri == self.resource1.uri + assert set(self.cache._uris.keys()) == self.all_names + assert self.manager._list_called == 1 # Check that on the second access of the same name, list() is not # called again. resource1_uri = self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 1) + assert self.manager._list_called == 1 def test_get_non_existing(self): """Tests for get() of a non-existing entry.""" # Check that accessing a non-existing resource name raises an # exception, but has populated the cache. - with self.assertRaises(NotFound): + with pytest.raises(NotFound): self.cache.get('non-existing') - self.assertEqual(set(self.cache._uris.keys()), self.all_names) - self.assertEqual(self.manager._list_called, 1) + assert set(self.cache._uris.keys()) == self.all_names + assert self.manager._list_called == 1 def test_get_auto_invalidate(self): """Tests for get() with auto-invalidating the cache.""" # Populate the cache. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 1) - self.assertDatetimeNear(self.cache._invalidated, self.created) + assert self.manager._list_called == 1 + self.assert_datetime_near(self.cache._invalidated, self.created) # Check that on the second access of the same name, list() is not # called again. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 1) + assert self.manager._list_called == 1 # Wait until the time-to-live has safely passed. time.sleep(self.timetolive + 0.2) @@ -481,33 +479,33 @@ class NameUriCacheTests(unittest.TestCase): # again, because the cache now has auto-invalidated. self.cache.get(self.resource1_name) invalidated = datetime.now() - self.assertEqual(self.manager._list_called, 2) - self.assertDatetimeNear(self.cache._invalidated, invalidated) + assert self.manager._list_called == 2 + self.assert_datetime_near(self.cache._invalidated, invalidated) def test_get_manual_invalidate(self): """Tests for get() and manual invalidate().""" # Populate the cache. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 1) - self.assertDatetimeNear(self.cache._invalidated, self.created) + assert self.manager._list_called == 1 + self.assert_datetime_near(self.cache._invalidated, self.created) # Check that on the second access of the same name, list() is not # called again. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 1) + assert self.manager._list_called == 1 # Manually invalidate the cache. self.cache.invalidate() invalidated = datetime.now() - self.assertDatetimeNear(self.cache._invalidated, invalidated) - self.assertEqual(self.cache._uris, {}) + self.assert_datetime_near(self.cache._invalidated, invalidated) + assert self.cache._uris == {} # Check that on the third access of the same name, list() is called # again, because the cache has been invalidated. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 2) - self.assertEqual(set(self.cache._uris.keys()), self.all_names) + assert self.manager._list_called == 2 + assert set(self.cache._uris.keys()) == self.all_names def test_refresh_empty(self): """Test refresh() on an empty cache.""" @@ -516,71 +514,71 @@ class NameUriCacheTests(unittest.TestCase): # re-populates it. self.cache.refresh() refreshed = datetime.now() - self.assertDatetimeNear(self.cache._invalidated, refreshed) - self.assertEqual(self.manager._list_called, 1) - self.assertEqual(set(self.cache._uris.keys()), self.all_names) + self.assert_datetime_near(self.cache._invalidated, refreshed) + assert self.manager._list_called == 1 + assert set(self.cache._uris.keys()) == self.all_names def test_refresh_populated(self): """Test refresh() on a fully populated cache.""" # Populate the cache. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 1) - self.assertDatetimeNear(self.cache._invalidated, self.created) + assert self.manager._list_called == 1 + self.assert_datetime_near(self.cache._invalidated, self.created) # Refresh the cache and check that this invalidates it and # re-populates it. self.cache.refresh() refreshed = datetime.now() - self.assertDatetimeNear(self.cache._invalidated, refreshed) - self.assertEqual(self.manager._list_called, 2) - self.assertEqual(set(self.cache._uris.keys()), self.all_names) + self.assert_datetime_near(self.cache._invalidated, refreshed) + assert self.manager._list_called == 2 + assert set(self.cache._uris.keys()) == self.all_names def test_delete_existing(self): """Test delete() of an existing cache entry, and re-accessing it.""" # Populate the cache. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 1) - self.assertDatetimeNear(self.cache._invalidated, self.created) + assert self.manager._list_called == 1 + self.assert_datetime_near(self.cache._invalidated, self.created) # Delete an existing cache entry and check that the entry is now gone. self.cache.delete(self.resource1_name) - self.assertEqual(set(self.cache._uris.keys()), {self.resource2_name}) + assert set(self.cache._uris.keys()) == {self.resource2_name} # Re-access the deleted entry, and check that list() is called again # to get that entry into the cache. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 2) - self.assertEqual(set(self.cache._uris.keys()), self.all_names) + assert self.manager._list_called == 2 + assert set(self.cache._uris.keys()) == self.all_names def test_delete_non_existing(self): """Test delete() of a non-existing cache entry.""" # Populate the cache. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 1) - self.assertDatetimeNear(self.cache._invalidated, self.created) + assert self.manager._list_called == 1 + self.assert_datetime_near(self.cache._invalidated, self.created) # Delete a non-existing cache entry and check that no exception is # raised and that the cache still contains the same entries. self.cache.delete('non-existing') - self.assertEqual(self.manager._list_called, 1) - self.assertEqual(set(self.cache._uris.keys()), self.all_names) + assert self.manager._list_called == 1 + assert set(self.cache._uris.keys()) == self.all_names def test_delete_none(self): """Test delete() of `None`.""" # Populate the cache. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 1) - self.assertDatetimeNear(self.cache._invalidated, self.created) + assert self.manager._list_called == 1 + self.assert_datetime_near(self.cache._invalidated, self.created) # Delete `None` and check that no exception is raised and that the # cache still contains the same entries. self.cache.delete(None) - self.assertEqual(self.manager._list_called, 1) - self.assertEqual(set(self.cache._uris.keys()), self.all_names) + assert self.manager._list_called == 1 + assert set(self.cache._uris.keys()) == self.all_names def test_update_from_empty(self): """Test update_from() on an empty cache.""" @@ -603,9 +601,8 @@ class NameUriCacheTests(unittest.TestCase): # Update the cache from these two resources check that they are now in # the cache (and that list() has not been called) self.cache.update_from([resource3, resource4]) - self.assertEqual(self.manager._list_called, 0) - self.assertEqual(set(self.cache._uris.keys()), - {resource3_name, resource4_name}) + assert self.manager._list_called == 0 + assert set(self.cache._uris.keys()) == {resource3_name, resource4_name} def test_update_from_populated_modify_name(self): """Test update_from() on a populated cache and modify the URI of one @@ -627,23 +624,22 @@ class NameUriCacheTests(unittest.TestCase): # Populate the cache. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 1) - self.assertEqual(set(self.cache._uris.keys()), - {self.resource1_name, self.resource2_name}) + assert self.manager._list_called == 1 + assert set(self.cache._uris.keys()) == \ + {self.resource1_name, self.resource2_name} # Update the cache from these two resources check that they are now in # the cache (and that list() has not been called again). self.cache.update_from([resource3, resource2_new]) - self.assertEqual(self.manager._list_called, 1) - self.assertEqual( - set(self.cache._uris.keys()), - {self.resource1_name, self.resource2_name, resource3_name}) + assert self.manager._list_called == 1 + assert set(self.cache._uris.keys()) == \ + {self.resource1_name, self.resource2_name, resource3_name} # Access the modified entry, and check that the entry has changed # (and that list() has not been called again). resource2_uri = self.cache.get(self.resource2_name) - self.assertEqual(self.manager._list_called, 1) - self.assertEqual(resource2_uri, resource2_new_uri) + assert self.manager._list_called == 1 + assert resource2_uri == resource2_new_uri def test_update_empty(self): """Test update() on an empty cache.""" @@ -653,13 +649,13 @@ class NameUriCacheTests(unittest.TestCase): # Update the cache, to get the entry added. self.cache.update(resource3_name, resource3_uri) - self.assertEqual(self.manager._list_called, 0) + assert self.manager._list_called == 0 # Access the new entry, and check the entry (and that list() has not # been called). act_resource3_uri = self.cache.get(resource3_name) - self.assertEqual(self.manager._list_called, 0) - self.assertEqual(act_resource3_uri, resource3_uri) + assert self.manager._list_called == 0 + assert act_resource3_uri == resource3_uri def test_update_empty_empty(self): """Test update() on an empty cache with an empty resource name.""" @@ -670,8 +666,8 @@ class NameUriCacheTests(unittest.TestCase): # Update the cache with the empty resource name, and check that no # exception is raised and that the cache is still empty. self.cache.update(resource3_name, resource3_uri) - self.assertEqual(self.cache._uris, {}) - self.assertEqual(self.manager._list_called, 0) + assert self.cache._uris == {} + assert self.manager._list_called == 0 def test_update_empty_none(self): """Test update() on an empty cache with a `None` resource name.""" @@ -682,8 +678,8 @@ class NameUriCacheTests(unittest.TestCase): # Update the cache with the empty resource name, and check that no # exception is raised and that the cache is still empty. self.cache.update(resource3_name, resource3_uri) - self.assertEqual(self.cache._uris, {}) - self.assertEqual(self.manager._list_called, 0) + assert self.cache._uris == {} + assert self.manager._list_called == 0 def test_update_populated_new(self): """Test update() on a populated cache with a new entry.""" @@ -693,19 +689,19 @@ class NameUriCacheTests(unittest.TestCase): # Populate the cache. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 1) - self.assertEqual(set(self.cache._uris.keys()), - {self.resource1_name, self.resource2_name}) + assert self.manager._list_called == 1 + assert set(self.cache._uris.keys()) == \ + {self.resource1_name, self.resource2_name} # Update the cache, to get the new entry added. self.cache.update(resource3_name, resource3_uri) - self.assertEqual(self.manager._list_called, 1) + assert self.manager._list_called == 1 # Access the new entry, and check the entry (and that list() has not # been called). act_resource3_uri = self.cache.get(resource3_name) - self.assertEqual(self.manager._list_called, 1) - self.assertEqual(act_resource3_uri, resource3_uri) + assert self.manager._list_called == 1 + assert act_resource3_uri == resource3_uri def test_update_populated_modify(self): """Test update() on a populated cache by modifying an existing @@ -715,16 +711,16 @@ class NameUriCacheTests(unittest.TestCase): # Populate the cache. self.cache.get(self.resource1_name) - self.assertEqual(self.manager._list_called, 1) - self.assertEqual(set(self.cache._uris.keys()), - {self.resource1_name, self.resource2_name}) + assert self.manager._list_called == 1 + assert set(self.cache._uris.keys()) == \ + {self.resource1_name, self.resource2_name} # Update the cache, to get the existing entry modified. self.cache.update(self.resource2_name, resource2_new_uri) - self.assertEqual(self.manager._list_called, 1) + assert self.manager._list_called == 1 # Access the new entry, and check the entry (and that list() has not # been called again). act_resource2_uri = self.cache.get(self.resource2_name) - self.assertEqual(self.manager._list_called, 1) - self.assertEqual(act_resource2_uri, resource2_new_uri) + assert self.manager._list_called == 1 + assert act_resource2_uri == resource2_new_uri diff --git a/tests/unit/test_nic.py b/tests/unit/test_nic.py old mode 100755 new mode 100644 index 98e6009..3100017 --- a/tests/unit/test_nic.py +++ b/tests/unit/test_nic.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/unit/test_notification.py b/tests/unit/test_notification.py index dfe650c..5b20666 100644 --- a/tests/unit/test_notification.py +++ b/tests/unit/test_notification.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,9 +21,9 @@ requests_mock package. from __future__ import absolute_import, print_function -import unittest import json import threading +# FIXME: Migrate mock to zhmcclient_mock. from mock import patch from zhmcclient._notification import NotificationReceiver @@ -125,9 +124,9 @@ def receive_notifications(receiver): return msg_items -class NotificationTests(unittest.TestCase): +class TestNotification(object): - def setUp(self): + def setup_method(self): self.topic = 'fake-topic' self.hmc = 'fake-hmc' self.userid = 'fake-userid' @@ -148,7 +147,7 @@ class NotificationTests(unittest.TestCase): conn.mock_start() msg_items = receive_notifications(receiver) - self.assertEqual(msg_items, []) + assert msg_items == [] @patch(target='stomp.Connection', new=MockedStompConnection) def test_one_message(self): @@ -163,8 +162,8 @@ class NotificationTests(unittest.TestCase): conn.mock_start() msg_items = receive_notifications(receiver) - self.assertEqual(len(msg_items), 1) + assert len(msg_items) == 1 msg0 = msg_items[0] - self.assertEqual(msg0[0], self.std_headers) - self.assertEqual(msg0[1], message_obj) + assert msg0[0] == self.std_headers + assert msg0[1] == message_obj diff --git a/tests/unit/test_port.py b/tests/unit/test_port.py old mode 100755 new mode 100644 index acc2e55..10c0f16 --- a/tests/unit/test_port.py +++ b/tests/unit/test_port.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,16 +18,16 @@ Unit tests for _port module. from __future__ import absolute_import, print_function -import unittest +# FIXME: Migrate requests_mock to zhmcclient_mock. import requests_mock from zhmcclient import Session, Client -class PortTests(unittest.TestCase): +class TestPort(object): """All tests for Port and PortManager classes.""" - def setUp(self): + def setup_method(self): self.session = Session('port-dpm-host', 'port-user', 'port-pwd') self.client = Client(self.session) @@ -110,7 +109,7 @@ class PortTests(unittest.TestCase): adapters = adapter_mgr.list(full_properties=False) self.adapters = adapters - def tearDown(self): + def teardown_method(self): with requests_mock.mock() as m: m.delete('/api/sessions/this-session', status_code=204) self.session.logoff() @@ -118,7 +117,7 @@ class PortTests(unittest.TestCase): def test_init(self): """Test __init__() on PortManager instance in Adapter.""" port_mgr = self.adapters[0].ports - self.assertEqual(port_mgr.adapter, self.adapters[0]) + assert port_mgr.adapter == self.adapters[0] def test_list_short_ok(self): """ @@ -139,15 +138,15 @@ class PortTests(unittest.TestCase): else: network_uris = result_adapter['network-port-uris'] uris = network_uris - self.assertEqual(adapter.properties['port-count'], len(uris)) + assert adapter.properties['port-count'] == len(uris) else: uris = [] - self.assertEqual(len(ports), len(uris)) + assert len(ports) == len(uris) for idx, port in enumerate(ports): - self.assertTrue(port.properties['element-uri'] in uris) - self.assertFalse(port.full_properties) - self.assertEqual(port.manager, port_mgr) + assert port.properties['element-uri'] in uris + assert not port.full_properties + assert port.manager == port_mgr def test_list_full_ok(self): """ @@ -177,18 +176,15 @@ class PortTests(unittest.TestCase): ports = port_mgr.list(full_properties=True) if len(ports) != 0: storage_uris = self.result['adapters'][0]['storage-port-uris'] - self.assertEqual(adapter.properties['port-count'], - len(storage_uris)) + assert adapter.properties['port-count'] == len(storage_uris) else: storage_uris = [] - self.assertEqual(len(ports), len(storage_uris)) + assert len(ports) == len(storage_uris) for idx, port in enumerate(ports): - self.assertEqual( - port.properties['element-uri'], - storage_uris[idx]) - self.assertTrue(port.full_properties) - self.assertEqual(port.manager, port_mgr) + assert port.properties['element-uri'] == storage_uris[idx] + assert port.full_properties + assert port.manager == port_mgr def test_list_filter_name_ok(self): """ @@ -218,15 +214,14 @@ class PortTests(unittest.TestCase): filter_args = {'name': 'Port 0'} ports = port_mgr.list(filter_args=filter_args) - self.assertEqual(len(ports), 1) + assert len(ports) == 1 port = ports[0] - self.assertEqual(port.name, 'Port 0') - self.assertEqual( - port.uri, - '/api/adapters/fake-adapter-id-1/storage-ports/0') - self.assertEqual(port.properties['name'], 'Port 0') - self.assertEqual(port.properties['element-id'], '0') - self.assertEqual(port.manager, port_mgr) + assert port.name == 'Port 0' + assert port.uri == \ + '/api/adapters/fake-adapter-id-1/storage-ports/0' + assert port.properties['name'] == 'Port 0' + assert port.properties['element-id'] == '0' + assert port.manager == port_mgr def test_list_filter_elementid_ok(self): """ @@ -256,15 +251,14 @@ class PortTests(unittest.TestCase): filter_args = {'element-id': '0'} ports = port_mgr.list(filter_args=filter_args) - self.assertEqual(len(ports), 1) + assert len(ports) == 1 port = ports[0] - self.assertEqual(port.name, 'Port 0') - self.assertEqual( - port.uri, - '/api/adapters/fake-adapter-id-1/storage-ports/0') - self.assertEqual(port.properties['name'], 'Port 0') - self.assertEqual(port.properties['element-id'], '0') - self.assertEqual(port.manager, port_mgr) + assert port.name == 'Port 0' + assert port.uri == \ + '/api/adapters/fake-adapter-id-1/storage-ports/0' + assert port.properties['name'] == 'Port 0' + assert port.properties['element-id'] == '0' + assert port.manager == port_mgr def test_update_properties(self): """ @@ -277,7 +271,3 @@ class PortTests(unittest.TestCase): m.post('/api/adapters/fake-adapter-id-1/storage-ports/0', status_code=204) port.update_properties(properties={}) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/unit/test_resource.py b/tests/unit/test_resource.py old mode 100755 new mode 100644 index 496aebe..7149568 --- a/tests/unit/test_resource.py +++ b/tests/unit/test_resource.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,8 +18,8 @@ Unit tests for _resource module. from __future__ import absolute_import, print_function -import unittest import time +import re from collections import OrderedDict from zhmcclient import BaseResource, BaseManager, Session @@ -66,12 +65,12 @@ class MyManager(BaseManager): raise NotImplemented -class ResourceTestCase(unittest.TestCase): +class ResourceTestCase(object): """ Base class for all tests in this file. """ - def setUp(self): + def setup_method(self): self.session = Session(host='fake-host') self.mgr = MyManager(self.session) self.uri = self.mgr._base_uri + '/deadbeef-beef-beef-beef-deadbeefbeef' @@ -85,23 +84,21 @@ class ResourceTestCase(unittest.TestCase): """ # Check that the properties member is a dict - self.assertTrue(isinstance(resource.properties, dict)) + assert isinstance(resource.properties, dict) # Verify that the resource properties are as expected - self.assertEqual( - len(resource.properties), len(exp_props), - "Set of properties does not match. Expected {!r}, got {!r}". - format(resource.properties.keys(), exp_props.keys())) + assert len(resource.properties) == len(exp_props), \ + "Set of properties does not match. Expected {!r}, got {!r}". \ + format(resource.properties.keys(), exp_props.keys()) for name, exp_value in exp_props.items(): act_value = resource.properties[name] - self.assertEqual( - act_value, exp_value, - "Property {!r} does not match. Expected {!r}, got {!r}". - format(name, exp_value, act_value)) + assert act_value == exp_value, \ + "Property {!r} does not match. Expected {!r}, got {!r}". \ + format(name, exp_value, act_value) -class InitTests(ResourceTestCase): +class TestInit(ResourceTestCase): """Test BaseResource initialization.""" def test_empty_name(self): @@ -114,12 +111,12 @@ class InitTests(ResourceTestCase): res = MyResource(self.mgr, self.uri, self.name, init_props) - self.assertTrue(res.manager is self.mgr) - self.assertEqual(res.uri, self.uri) - self.assertEqual(res.name, self.name) + assert res.manager is self.mgr + assert res.uri == self.uri + assert res.name == self.name self.assert_properties(res, res_props) - self.assertTrue(int(time.time()) - res.properties_timestamp <= 1) - self.assertEqual(res.full_properties, False) + assert int(time.time()) - res.properties_timestamp <= 1 + assert res.full_properties is False def test_empty_no_name(self): """Test with an empty set of input properties, without 'name'.""" @@ -130,11 +127,11 @@ class InitTests(ResourceTestCase): res = MyResource(self.mgr, self.uri, None, init_props) - self.assertTrue(res.manager is self.mgr) - self.assertEqual(res.uri, self.uri) + assert res.manager is self.mgr + assert res.uri == self.uri self.assert_properties(res, res_props) - self.assertTrue(int(time.time()) - res.properties_timestamp <= 1) - self.assertEqual(res.full_properties, False) + assert int(time.time()) - res.properties_timestamp <= 1 + assert res.full_properties is False def test_simple(self): """Test with a simple set of input properties.""" @@ -150,11 +147,11 @@ class InitTests(ResourceTestCase): res = MyResource(self.mgr, self.uri, None, init_props) - self.assertTrue(res.manager is self.mgr) - self.assertEqual(res.uri, self.uri) + assert res.manager is self.mgr + assert res.uri == self.uri self.assert_properties(res, res_props) - self.assertTrue(int(time.time()) - res.properties_timestamp <= 1) - self.assertEqual(res.full_properties, False) + assert int(time.time()) - res.properties_timestamp <= 1 + assert res.full_properties is False def test_prop_case(self): """Test case sensitivity for the input properties.""" @@ -170,11 +167,11 @@ class InitTests(ResourceTestCase): res = MyResource(self.mgr, self.uri, None, init_props) - self.assertTrue(res.manager is self.mgr) - self.assertEqual(res.uri, self.uri) + assert res.manager is self.mgr + assert res.uri == self.uri self.assert_properties(res, res_props) - self.assertTrue(int(time.time()) - res.properties_timestamp <= 1) - self.assertEqual(res.full_properties, False) + assert int(time.time()) - res.properties_timestamp <= 1 + assert res.full_properties is False def test_invalid_type(self): """Test that input properties with an invalid type fail.""" @@ -201,10 +198,9 @@ class InitTests(ResourceTestCase): str_str = str_str.replace('\n', '\\n') # We check just the begin of the string: - self.assertRegexpMatches( - str_str, - r'^{classname}\s*\(.*'.format( - classname=resource.__class__.__name__)) + assert re.match(r'^{classname}\s*\(.*'. + format(classname=resource.__class__.__name__), + str_str) def test_repr(self): """Test BaseResource.__repr__().""" @@ -218,14 +214,13 @@ class InitTests(ResourceTestCase): repr_str = repr_str.replace('\n', '\\n') # We check just the begin of the string: - self.assertRegexpMatches( - repr_str, - r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.format( - classname=resource.__class__.__name__, - id=id(resource))) + assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'. + format(classname=resource.__class__.__name__, + id=id(resource)), + repr_str) -class PropertySetTests(ResourceTestCase): +class TestPropertySet(ResourceTestCase): """Test BaseResource by setting properties.""" def test_add_to_empty(self): @@ -271,7 +266,7 @@ class PropertySetTests(ResourceTestCase): self.assert_properties(res, res_props) -class PropertyDelTests(ResourceTestCase): +class TestPropertyDel(ResourceTestCase): """Test BaseResource by deleting properties.""" def test_del_one(self): @@ -344,10 +339,10 @@ class PropertyDelTests(ResourceTestCase): res.properties.clear() - self.assertEqual(len(res.properties), 0) + assert len(res.properties) == 0 -class ManagerDivideFilterTests(ResourceTestCase): +class TestManagerDivideFilter(ResourceTestCase): """Test the _divide_filter_args() method of BaseManager.""" # Reserved chars are defined in RFC 3986 as gen-delims and sub-delims. @@ -369,8 +364,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '') - self.assertEqual(cf_args, {}) + assert parm_str == '' + assert cf_args == {} def test_empty(self): """Test with an empty set of filter arguments.""" @@ -378,8 +373,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '') - self.assertEqual(cf_args, {}) + assert parm_str == '' + assert cf_args == {} def test_one_string_qp(self): """Test with one string filter argument that is a query parm.""" @@ -387,8 +382,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '?qp1=bar') - self.assertEqual(cf_args, {}) + assert parm_str == '?qp1=bar' + assert cf_args == {} def test_one_string_cf(self): """Test with one string filter argument that is a client filter.""" @@ -396,8 +391,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '') - self.assertEqual(cf_args, {'foo': 'bar'}) + assert parm_str == '' + assert cf_args == {'foo': 'bar'} def test_one_integer_qp(self): """Test with one integer filter argument that is a query parm.""" @@ -405,8 +400,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '?qp2=42') - self.assertEqual(cf_args, {}) + assert parm_str == '?qp2=42' + assert cf_args == {} def test_one_integer_cf(self): """Test with one integer filter argument that is a client filter.""" @@ -414,8 +409,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '') - self.assertEqual(cf_args, {'foo': 42}) + assert parm_str == '' + assert cf_args == {'foo': 42} def test_one_str_reserved_val_qp(self): """Test with one string filter argument with reserved URI chars in @@ -426,8 +421,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '?qp1={}'.format(escape_str)) - self.assertEqual(cf_args, {}) + assert parm_str == '?qp1={}'.format(escape_str) + assert cf_args == {} def test_one_str_reserved_val_cf(self): """Test with one string filter argument with reserved URI chars in @@ -437,8 +432,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '') - self.assertEqual(cf_args, {'foo': char_str}) + assert parm_str == '' + assert cf_args == {'foo': char_str} def test_one_str_dash_name_qp(self): """Test with one string filter argument with a dash in its name that is @@ -448,8 +443,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '?foo-boo=bar') - self.assertEqual(cf_args, {}) + assert parm_str == '?foo-boo=bar' + assert cf_args == {} def test_one_str_reserved_name_qp(self): """Test with one string filter argument with reserved URI chars in @@ -461,8 +456,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '?{}=bar'.format(escape_str)) - self.assertEqual(cf_args, {}) + assert parm_str == '?{}=bar'.format(escape_str) + assert cf_args == {} def test_two_qp(self): """Test with two filter arguments that are query parms.""" @@ -470,8 +465,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '?qp1=bar&qp2=42') - self.assertEqual(cf_args, {}) + assert parm_str == '?qp1=bar&qp2=42' + assert cf_args == {} def test_two_qp_cf(self): """Test with two filter arguments where one is a query parm and one is @@ -480,8 +475,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '?qp1=bar') - self.assertEqual(cf_args, {'foo': 42}) + assert parm_str == '?qp1=bar' + assert cf_args == {'foo': 42} def test_two_cf_qp(self): """Test with two filter arguments where one is a client filter and one @@ -490,8 +485,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '?qp1=42') - self.assertEqual(cf_args, {'foo': 'bar'}) + assert parm_str == '?qp1=42' + assert cf_args == {'foo': 'bar'} def test_two_two_qp(self): """Test with two filter arguments, one of which is a list of two, and @@ -500,8 +495,8 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '?qp1=bar&qp2=42&qp2=7') - self.assertEqual(cf_args, {}) + assert parm_str == '?qp1=bar&qp2=42&qp2=7' + assert cf_args == {} def test_two_str_reserved_val_qp(self): """Test with two filter arguments, one of which is a list of two, and @@ -512,9 +507,5 @@ class ManagerDivideFilterTests(ResourceTestCase): parm_str, cf_args = self.mgr._divide_filter_args(filter_args) - self.assertEqual(parm_str, '?qp1=bar&qp2=42&qp2={}'.format(escape_str)) - self.assertEqual(cf_args, {}) - - -if __name__ == '__main__': - unittest.main() + assert parm_str == '?qp1=bar&qp2=42&qp2={}'.format(escape_str) + assert cf_args == {} diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py old mode 100755 new mode 100644 index 604c23d..3a3538f --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,18 +18,21 @@ Unit tests for _session module. from __future__ import absolute_import, print_function -import unittest -import requests -import requests_mock import time import json +import re +import requests +import requests_mock +import mock +import pytest -from zhmcclient import Session, ParseError, Job, HTTPError, OperationTimeout +from zhmcclient import Session, ParseError, Job, HTTPError, OperationTimeout, \ + ClientAuthError -class SessionTests(unittest.TestCase): +class TestSession(object): """ - Test the ``Session`` class. + All tests for the Session class. """ # TODO: Test Session.get() in all variations (including errors) @@ -50,22 +52,49 @@ class SessionTests(unittest.TestCase): headers={'X-Request-Id': 'fake-request-id'}, status_code=204) - def test_init(self): + @pytest.mark.parametrize( + "host, userid, password, use_get_password, session_id", [ + ('fake-host', None, None, False, None), + ('fake-host', 'fake-userid', None, False, None), + ('fake-host', 'fake-userid', 'fake-pw', False, None), + ('fake-host', 'fake-userid', 'fake-pw', True, None), + ] + ) + def test_init(self, host, userid, password, use_get_password, session_id): """Test initialization of Session object.""" - session = Session('fake-host', 'fake-user', 'fake-pw') + # TODO: Add support for input parameter: retry_timeout_config + # TODO: Add support for input parameter: time_stats_keeper + + if use_get_password: + def get_password(host, userid): + pw = 'fake-pw-{}-{}'.format(host, userid) + return pw + else: + get_password = None + + session = Session(host, userid, password, session_id, get_password) - self.assertEqual(session.host, 'fake-host') - self.assertEqual(session.userid, 'fake-user') - self.assertEqual(session._password, 'fake-pw') - base_url = 'https://' + session.host + ':6794' - self.assertEqual(session.base_url, base_url) - self.assertTrue('Content-type' in session.headers) - self.assertTrue('Accept' in session.headers) - self.assertEqual(len(session.headers), 2) - self.assertIsNone(session.session_id) - self.assertTrue('X-API-Session' not in session.headers) - self.assertIsNone(session.session) + assert session.host == host + assert session.userid == userid + assert session._password == password + assert session.session_id == session_id + assert session.get_password == get_password + + base_url = 'https://{}:6794'.format(session.host) + assert session.base_url == base_url + + assert session.headers['Content-type'] == 'application/json' + assert session.headers['Accept'] == '*/*' + + if session_id is None: + assert session.session is None + assert 'X-API-Session' not in session.headers + assert len(session.headers) == 2 + else: + assert isinstance(session.session, requests.Session) + assert session.headers['X-API-Session'] == session_id + assert len(session.headers) == 3 def test_repr(self): """Test Session.__repr__().""" @@ -76,47 +105,100 @@ class SessionTests(unittest.TestCase): repr_str = repr_str.replace('\n', '\\n') # We check just the begin of the string: - self.assertRegexpMatches( - repr_str, - r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.format( - classname=session.__class__.__name__, - id=id(session))) + assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'. + format(classname=session.__class__.__name__, + id=id(session)), + repr_str) + + @pytest.mark.parametrize( + "host, userid, password, use_get_password, exp_exc", [ + ('fake-host', None, None, False, ClientAuthError), + ('fake-host', 'fake-userid', None, False, ClientAuthError), + ('fake-host', 'fake-userid', 'fake-pw', False, None), + ('fake-host', 'fake-userid', 'fake-pw', True, None), + ('fake-host', 'fake-userid', None, True, None), + ] + ) + def test_logon( + self, host, userid, password, use_get_password, exp_exc): + """Test Session.logon() (and also Session.is_logon()).""" + + with requests_mock.Mocker() as m: - @requests_mock.mock() - def test_logon_logoff(self, m): - """Test logon and logoff; this uses post() and delete().""" + self.mock_server_1(m) - self.mock_server_1(m) + if use_get_password: + get_password = mock.MagicMock() + get_password.return_value = \ + 'fake-pw-{}-{}'.format(host, userid) + else: + get_password = None - session = Session('fake-host', 'fake-user', 'fake-pw') + # Create a session in logged-off state + session = Session(host, userid, password, None, get_password) + + assert session.session_id is None + assert 'X-API-Session' not in session.headers + assert session.session is None + + logged_on = session.is_logon() + assert not logged_on - self.assertIsNone(session.session_id) - self.assertTrue('X-API-Session' not in session.headers) - self.assertIsNone(session.session) + if exp_exc: + try: - logged_on = session.is_logon() + # The code to be tested: + session.logon() - self.assertFalse(logged_on) + except exp_exc: + pass - session.logon() + logged_on = session.is_logon() + assert not logged_on + else: - self.assertEqual(session.session_id, 'fake-session-id') - self.assertTrue('X-API-Session' in session.headers) - self.assertTrue(isinstance(session.session, requests.Session)) + # The code to be tested: + session.logon() - logged_on = session.is_logon() + assert session.session_id == 'fake-session-id' + assert 'X-API-Session' in session.headers + assert isinstance(session.session, requests.Session) - self.assertTrue(logged_on) + if get_password: + if password is None: + get_password.assert_called_with(host, userid) + assert session._password == get_password.return_value + else: + get_password.assert_not_called() - session.logoff() + logged_on = session.is_logon() + assert logged_on - self.assertIsNone(session.session_id) - self.assertTrue('X-API-Session' not in session.headers) - self.assertIsNone(session.session) + def test_logoff(self): + """Test Session.logoff() (and also Session.is_logon()).""" - logged_on = session.is_logon() + with requests_mock.Mocker() as m: - self.assertFalse(logged_on) + self.mock_server_1(m) + + # Create a session in logged-off state + session = Session('fake-host', 'fake-userid', 'fake-pw') + + session.logon() + + logged_on = session.is_logon() + assert logged_on + + # The code to be tested: + session.logoff() + + assert session.session_id is None + assert session.session is None + assert 'X-API-Session' not in session.headers + assert len(session.headers) == 2 + + logged_on = session.is_logon() + assert not logged_on def _do_parse_error_logon(self, m, json_content, exp_msg_pattern, exp_line, exp_col): @@ -140,16 +222,22 @@ class SessionTests(unittest.TestCase): r"Response status .*" % \ exp_msg_pattern - with self.assertRaisesRegexp(ParseError, exp_pe_pattern) as cm: + with pytest.raises(ParseError) as exc_info: session.logon() - self.assertEqual(cm.exception.line, exp_line) - self.assertEqual(cm.exception.column, exp_col) + exc = exc_info.value + + assert re.match(exp_pe_pattern, str(exc)) + assert exc.line == exp_line + assert exc.column == exp_col + + # TODO: Merge the next 3 test functions into one that is parametrized @requests_mock.mock() - def test_logon_error_invalid_delim(self, m): + def test_logon_error_invalid_delim(self, *args): """ Logon with invalid JSON response that has an invalid delimiter. """ + m = args[0] json_content = b'{\n"api-session"; "fake-session-id"\n}' exp_msg_pattern = r"Expecting ':' delimiter: .*" exp_line = 2 @@ -158,10 +246,11 @@ class SessionTests(unittest.TestCase): exp_col) @requests_mock.mock() - def test_logon_error_invalid_quotes(self, m): + def test_logon_error_invalid_quotes(self, *args): """ Logon with invalid JSON response that incorrectly uses single quotes. """ + m = args[0] json_content = b'{\'api-session\': \'fake-session-id\'}' exp_msg_pattern = r"Expecting property name enclosed in double " \ "quotes: .*" @@ -171,10 +260,11 @@ class SessionTests(unittest.TestCase): exp_col) @requests_mock.mock() - def test_logon_error_extra_closing(self, m): + def test_logon_error_extra_closing(self, *args): """ Logon with invalid JSON response that has an extra closing brace. """ + m = args[0] json_content = b'{"api-session": "fake-session-id"}}' exp_msg_pattern = r"Extra data: .*" exp_line = 1 @@ -217,7 +307,7 @@ class SessionTests(unittest.TestCase): result = session.get_notification_topics() - self.assertEqual(result, gnt_result['topics']) + assert result == gnt_result['topics'] m.delete('/api/sessions/this-session', status_code=204) @@ -268,18 +358,18 @@ class SessionTests(unittest.TestCase): "Console Configuration Error: " \ "Web Services API is not enabled on the HMC." - with self.assertRaises(HTTPError) as cm: + with pytest.raises(HTTPError) as exc_info: session.get(get_uri, logon_required=False) - exc = cm.exception + exc = exc_info.value - self.assertEqual(exc.http_status, get_resp_status) - self.assertEqual(exc.reason, exp_reason) - self.assertEqual(exc.message, exp_message) - self.assertTrue(exc.request_uri.endswith(get_uri)) - self.assertEqual(exc.request_method, 'GET') + assert exc.http_status == get_resp_status + assert exc.reason == exp_reason + assert exc.message == exp_message + assert exc.request_uri.endswith(get_uri) + assert exc.request_method == 'GET' -class JobTests(unittest.TestCase): +class TestJob(object): """ Test the ``Job`` class. """ @@ -299,6 +389,8 @@ class JobTests(unittest.TestCase): headers={'X-Request-Id': 'fake-request-id'}, status_code=204) + # TODO: Add parametrization to the next test function. + def test_init(self): """Test initialization of Job object.""" session = Session('fake-host', 'fake-user', 'fake-pw') @@ -311,10 +403,12 @@ class JobTests(unittest.TestCase): job = Job(session, self.job_uri, op_method, op_uri) - self.assertEqual(job.uri, self.job_uri) - self.assertEqual(job.session, session) - self.assertEqual(job.op_method, op_method) - self.assertEqual(job.op_uri, op_uri) + assert job.uri == self.job_uri + assert job.session == session + assert job.op_method == op_method + assert job.op_uri == op_uri + + # TODO: Merge the next 7 test functions into one that is parametrized def test_check_incomplete(self): """Test check_for_completion() with incomplete job.""" @@ -332,8 +426,8 @@ class JobTests(unittest.TestCase): job_status, op_result = job.check_for_completion() - self.assertEqual(job_status, 'running') - self.assertIsNone(op_result) + assert job_status == 'running' + assert op_result is None def test_check_complete_success_noresult(self): """Test check_for_completion() with successful complete job without @@ -355,8 +449,8 @@ class JobTests(unittest.TestCase): job_status, op_result = job.check_for_completion() - self.assertEqual(job_status, 'complete') - self.assertIsNone(op_result) + assert job_status == 'complete' + assert op_result is None def test_check_complete_success_result(self): """Test check_for_completion() with successful complete job with a @@ -381,8 +475,8 @@ class JobTests(unittest.TestCase): job_status, op_result = job.check_for_completion() - self.assertEqual(job_status, 'complete') - self.assertEqual(op_result, exp_op_result) + assert job_status == 'complete' + assert op_result == exp_op_result def test_check_complete_error1(self): """Test check_for_completion() with complete job in error (1).""" @@ -402,12 +496,13 @@ class JobTests(unittest.TestCase): m.get(self.job_uri, json=query_job_status_result) m.delete(self.job_uri, status_code=204) - with self.assertRaises(HTTPError) as cm: + with pytest.raises(HTTPError) as exc_info: job_status, op_result = job.check_for_completion() + exc = exc_info.value - self.assertEqual(cm.exception.http_status, 500) - self.assertEqual(cm.exception.reason, 42) - self.assertEqual(cm.exception.message, None) + assert exc.http_status == 500 + assert exc.reason == 42 + assert exc.message is None def test_check_complete_error2(self): """Test check_for_completion() with complete job in error (2).""" @@ -427,12 +522,13 @@ class JobTests(unittest.TestCase): m.get(self.job_uri, json=query_job_status_result) m.delete(self.job_uri, status_code=204) - with self.assertRaises(HTTPError) as cm: + with pytest.raises(HTTPError) as exc_info: job_status, op_result = job.check_for_completion() + exc = exc_info.value - self.assertEqual(cm.exception.http_status, 500) - self.assertEqual(cm.exception.reason, 42) - self.assertEqual(cm.exception.message, None) + assert exc.http_status == 500 + assert exc.reason == 42 + assert exc.message is None def test_check_complete_error3(self): """Test check_for_completion() with complete job in error (3).""" @@ -456,12 +552,13 @@ class JobTests(unittest.TestCase): m.get(self.job_uri, json=query_job_status_result) m.delete(self.job_uri, status_code=204) - with self.assertRaises(HTTPError) as cm: + with pytest.raises(HTTPError) as exc_info: job_status, op_result = job.check_for_completion() + exc = exc_info.value - self.assertEqual(cm.exception.http_status, 500) - self.assertEqual(cm.exception.reason, 42) - self.assertEqual(cm.exception.message, 'bla message') + assert exc.http_status == 500 + assert exc.reason == 42 + assert exc.message == 'bla message' def test_check_complete_error4(self): """Test check_for_completion() with complete job in error (4).""" @@ -485,12 +582,15 @@ class JobTests(unittest.TestCase): m.get(self.job_uri, json=query_job_status_result) m.delete(self.job_uri, status_code=204) - with self.assertRaises(HTTPError) as cm: + with pytest.raises(HTTPError) as exc_info: job_status, op_result = job.check_for_completion() + exc = exc_info.value + + assert exc.http_status == 500 + assert exc.reason == 42 + assert exc.message == 'bla message' - self.assertEqual(cm.exception.http_status, 500) - self.assertEqual(cm.exception.reason, 42) - self.assertEqual(cm.exception.message, 'bla message') + # TODO: Merge the next 3 test functions into one that is parametrized def test_wait_complete1_success_result(self): """Test wait_for_completion() with successful complete job with a @@ -515,7 +615,7 @@ class JobTests(unittest.TestCase): op_result = job.wait_for_completion() - self.assertEqual(op_result, exp_op_result) + assert op_result == exp_op_result def test_wait_complete3_success_result(self): """Test wait_for_completion() with successful complete job with a @@ -538,7 +638,7 @@ class JobTests(unittest.TestCase): op_result = job.wait_for_completion() - self.assertEqual(op_result, exp_op_result) + assert op_result == exp_op_result def test_wait_complete3_timeout(self): """Test wait_for_completion() with timeout.""" @@ -577,8 +677,7 @@ class JobTests(unittest.TestCase): "timeout: %s s" % (duration, operation_timeout)) except OperationTimeout as exc: msg = exc.args[0] - self.assertTrue(msg.startswith( - "Waiting for completion of job")) + assert msg.startswith("Waiting for completion of job") def result_running_callback(request, context): @@ -601,7 +700,3 @@ def result_complete_callback(request, context): } time.sleep(1) return json.dumps(job_result_complete) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/unit/test_timestats.py b/tests/unit/test_timestats.py old mode 100755 new mode 100644 index abbc74e..284540a --- a/tests/unit/test_timestats.py +++ b/tests/unit/test_timestats.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,7 +19,7 @@ Tests for time statistics (`_timestats` module). from __future__ import absolute_import, print_function import time -import unittest +import pytest from zhmcclient import TimeStatsKeeper, TimeStats @@ -56,7 +55,7 @@ def measure(stats, duration): return end - begin -class TimeStatsTests(unittest.TestCase): +class TestTimeStats(object): """All tests for TimeStatsKeeper and TimeStats.""" def test_enabling(self): @@ -64,70 +63,67 @@ class TimeStatsTests(unittest.TestCase): keeper = TimeStatsKeeper() - self.assertFalse(keeper.enabled, - "Verify that initial state is disabled") + assert not keeper.enabled, \ + "Verify that initial state is disabled" keeper.disable() - self.assertFalse(keeper.enabled, - "Verify that disabling a disabled keeper works") + assert not keeper.enabled, \ + "Verify that disabling a disabled keeper works" keeper.enable() - self.assertTrue(keeper.enabled, - "Verify that enabling a disabled keeper works") + assert keeper.enabled, \ + "Verify that enabling a disabled keeper works" keeper.enable() - self.assertTrue(keeper.enabled, - "Verify that enabling an enabled keeper works") + assert keeper.enabled, \ + "Verify that enabling an enabled keeper works" keeper.disable() - self.assertFalse(keeper.enabled, - "Verify that disabling an enabled keeper works") + assert not keeper.enabled, \ + "Verify that disabling an enabled keeper works" def test_get(self): """Test getting time statistics.""" keeper = TimeStatsKeeper() snapshot_length = len(keeper.snapshot()) - self.assertEqual(snapshot_length, 0, - "Verify that initial state has no time statistics. " - "Actual number = %d" % snapshot_length) + assert snapshot_length == 0, \ + "Verify that initial state has no time statistics. " \ + "Actual number = %d" % snapshot_length stats = keeper.get_stats('foo') snapshot_length = len(keeper.snapshot()) - self.assertEqual(snapshot_length, 0, - "Verify that getting a new stats with a disabled " - "keeper results in no time statistics. " - "Actual number = %d" % snapshot_length) - self.assertEqual(stats.keeper, keeper) - self.assertEqual(stats.name, "disabled") # stats for disabled keeper - self.assertEqual(stats.count, 0) - self.assertEqual(stats.avg_time, 0) - self.assertEqual(stats.min_time, float('inf')) - self.assertEqual(stats.max_time, 0) + assert snapshot_length == 0, \ + "Verify that getting a new stats with a disabled keeper results " \ + "in no time statistics. Actual number = %d" % snapshot_length + assert stats.keeper == keeper + assert stats.name == "disabled" # stats for disabled keeper + assert stats.count == 0 + assert stats.avg_time == 0 + assert stats.min_time == float('inf') + assert stats.max_time == 0 keeper.enable() stats = keeper.get_stats('foo') snapshot_length = len(keeper.snapshot()) - self.assertEqual(snapshot_length, 1, - "Verify that getting a new stats with an enabled " - "keeper results in one time statistics. " - "Actual number = %d" % snapshot_length) - - self.assertEqual(stats.keeper, keeper) - self.assertEqual(stats.name, 'foo') - self.assertEqual(stats.count, 0) - self.assertEqual(stats.avg_time, 0) - self.assertEqual(stats.min_time, float('inf')) - self.assertEqual(stats.max_time, 0) + assert snapshot_length == 1, \ + "Verify that getting a new stats with an enabled keeper results " \ + "in one time statistics. Actual number = %d" % snapshot_length + + assert stats.keeper == keeper + assert stats.name == 'foo' + assert stats.count == 0 + assert stats.avg_time == 0 + assert stats.min_time == float('inf') + assert stats.max_time == 0 keeper.get_stats('foo') snapshot_length = len(keeper.snapshot()) - self.assertEqual(snapshot_length, 1, - "Verify that getting an existing stats with an " - "enabled keeper results in the same number of time " - "statistics. " - "Actual number = %d" % snapshot_length) + assert snapshot_length == 1, \ + "Verify that getting an existing stats with an enabled keeper " \ + "results in the same number of time statistics. " \ + "Actual number = %d" % snapshot_length def test_measure_enabled(self): """Test measuring time with enabled keeper.""" @@ -145,22 +141,22 @@ class TimeStatsTests(unittest.TestCase): stats_dict = keeper.snapshot() for op_name in stats_dict: stats = stats_dict[op_name] - self.assertEqual(stats.count, 1) - self.assertLess(time_abs_delta(stats.avg_time, dur), delta, - "avg time: actual: %f, expected: %f, delta: %f" % - (stats.avg_time, dur, delta)) - self.assertLess(time_abs_delta(stats.min_time, dur), delta, - "min time: actual: %f, expected: %f, delta: %f" % - (stats.min_time, dur, delta)) - self.assertLess(time_abs_delta(stats.max_time, dur), delta, - "max time: actual: %f, expected: %f, delta: %f" % - (stats.max_time, dur, delta)) + assert stats.count == 1 + assert time_abs_delta(stats.avg_time, dur) < delta, \ + "avg time: actual: %f, expected: %f, delta: %f" % \ + (stats.avg_time, dur, delta) + assert time_abs_delta(stats.min_time, dur) < delta, \ + "min time: actual: %f, expected: %f, delta: %f" % \ + (stats.min_time, dur, delta) + assert time_abs_delta(stats.max_time, dur) < delta, \ + "max time: actual: %f, expected: %f, delta: %f" % \ + (stats.max_time, dur, delta) stats.reset() - self.assertEqual(stats.count, 0) - self.assertEqual(stats.avg_time, 0) - self.assertEqual(stats.min_time, float('inf')) - self.assertEqual(stats.max_time, 0) + assert stats.count == 0 + assert stats.avg_time == 0 + assert stats.min_time == float('inf') + assert stats.max_time == 0 def test_measure_disabled(self): """Test measuring time with disabled keeper.""" @@ -170,7 +166,7 @@ class TimeStatsTests(unittest.TestCase): duration = 0.2 stats = keeper.get_stats('foo') - self.assertEqual(stats.name, 'disabled') + assert stats.name == 'disabled' stats.begin() time.sleep(duration) @@ -179,10 +175,10 @@ class TimeStatsTests(unittest.TestCase): stats_dict = keeper.snapshot() for op_name in stats_dict: stats = stats_dict[op_name] - self.assertEqual(stats.count, 0) - self.assertEqual(stats.avg_time, 0) - self.assertEqual(stats.min_time, float('inf')) - self.assertEqual(stats.max_time, 0) + assert stats.count == 0 + assert stats.avg_time == 0 + assert stats.min_time == float('inf') + assert stats.max_time == 0 def test_snapshot(self): """Test that snapshot() takes a stable snapshot.""" @@ -210,10 +206,10 @@ class TimeStatsTests(unittest.TestCase): # verify that only the first data item is in the snapshot for op_name in snap_stats_dict: snap_stats = snap_stats_dict[op_name] - self.assertEqual(snap_stats.count, 1) + assert snap_stats.count == 1 # verify that both data items are in the original stats object - self.assertEqual(stats.count, 2) + assert stats.count == 2 def test_measure_avg_min_max(self): """Test measuring avg min max values.""" @@ -238,16 +234,16 @@ class TimeStatsTests(unittest.TestCase): stats_dict = keeper.snapshot() for op_name in stats_dict: stats = stats_dict[op_name] - self.assertEqual(stats.count, 3) - self.assertLess(time_abs_delta(stats.avg_time, avg_dur), delta, - "avg time: actual: %f, expected: %f, delta: %f" % - (stats.avg_time, avg_dur, delta)) - self.assertLess(time_abs_delta(stats.min_time, min_dur), delta, - "min time: actual: %f, expected: %f, delta: %f" % - (stats.min_time, min_dur, delta)) - self.assertLess(time_abs_delta(stats.max_time, max_dur), delta, - "max time: actual: %f, expected: %f, delta: %f" % - (stats.max_time, max_dur, delta)) + assert stats.count == 3 + assert time_abs_delta(stats.avg_time, avg_dur) < delta, \ + "avg time: actual: %f, expected: %f, delta: %f" % \ + (stats.avg_time, avg_dur, delta) + assert time_abs_delta(stats.min_time, min_dur) < delta, \ + "min time: actual: %f, expected: %f, delta: %f" % \ + (stats.min_time, min_dur, delta) + assert time_abs_delta(stats.max_time, max_dur) < delta, \ + "max time: actual: %f, expected: %f, delta: %f" % \ + (stats.max_time, max_dur, delta) def test_only_end(self): """Test that invoking end() before begin() has ever been called raises @@ -257,7 +253,7 @@ class TimeStatsTests(unittest.TestCase): keeper.enable() stats = keeper.get_stats('foo') - with self.assertRaises(RuntimeError): + with pytest.raises(RuntimeError): stats.end() def test_end_after_end(self): @@ -272,7 +268,7 @@ class TimeStatsTests(unittest.TestCase): time.sleep(0.01) stats.end() - with self.assertRaises(RuntimeError): + with pytest.raises(RuntimeError): stats.end() def test_str_empty(self): @@ -281,14 +277,14 @@ class TimeStatsTests(unittest.TestCase): keeper = TimeStatsKeeper() keeper.enable() s = str(keeper) - self.assertEqual(s, PRINT_HEADER) + assert s == PRINT_HEADER def test_str_disabled(self): """Test TimestatsKeeper.__str__() for a disabled keeper.""" keeper = TimeStatsKeeper() s = str(keeper) - self.assertEqual(s, PRINT_HEADER_DISABLED) + assert s == PRINT_HEADER_DISABLED def test_str_one(self): """Test TimestatsKeeper.__str__() for an enabled keeper with one data @@ -307,11 +303,11 @@ class TimeStatsTests(unittest.TestCase): stats.end() s = str(keeper) - self.assertTrue(s.startswith(PRINT_HEADER), - "Unexpected str(keeper): %r" % s) + assert s.startswith(PRINT_HEADER), \ + "Unexpected str(keeper): %r" % s num_lines = len(s.split('\n')) - self.assertEqual(num_lines, 3, - "Unexpected str(keeper): %r" % s) + assert num_lines == 3, \ + "Unexpected str(keeper): %r" % s def test_ts_str(self): """Test Timestats.__str__().""" @@ -320,12 +316,8 @@ class TimeStatsTests(unittest.TestCase): timestats = TimeStats(keeper, "foo") s = str(timestats) - self.assertTrue(s.startswith("TimeStats:"), - "Unexpected str(timestats): %r" % s) + assert s.startswith("TimeStats:"), \ + "Unexpected str(timestats): %r" % s num_lines = len(s.split('\n')) - self.assertEqual(num_lines, 1, - "Unexpected str(timestats): %r" % s) - - -if __name__ == '__main__': - unittest.main() + assert num_lines == 1, \ + "Unexpected str(timestats): %r" % s diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index b30173c..79de959 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -162,7 +162,7 @@ class TestPythonDatetime(object): st = time.gmtime(0) assert st == epoch_st - def test_print_gmtime_max(self): + def x_test_print_gmtime_max(self): """Print the maximum for time.gmtime().""" max_ts = find_max_value(time.gmtime, 1) max_st = time.gmtime(max_ts) @@ -170,7 +170,7 @@ class TestPythonDatetime(object): format(max_ts, max_st)) sys.stdout.flush() - def test_print_fromtimestamp_max(self): + def x_test_print_fromtimestamp_max(self): """Print the maximum for datetime.fromtimestamp(utc).""" def datetime_fromtimestamp_utc(ts): @@ -183,7 +183,7 @@ class TestPythonDatetime(object): format(max_ts, max_dt)) sys.stdout.flush() - def test_print_datetime_max(self): + def x_test_print_datetime_max(self): """Print datetime.max.""" print("\nMax value for Python datetime (datetime.max): {!r}". format(datetime.max)) @@ -233,7 +233,7 @@ class TestDatetimeFromTimestamp(object): # Verify the result assert isinstance(exc_info.value, exc_type) - def test_print_max_datetime_from_timestamp(self): + def x_test_print_max_datetime_from_timestamp(self): """Print the maximum for datetime_from_timestamp().""" max_ts = find_max_value(datetime_from_timestamp, 1) max_dt = datetime_from_timestamp(max_ts) diff --git a/tests/unit/test_virtual_function.py b/tests/unit/test_virtual_function.py old mode 100755 new mode 100644 index a4606e0..0d5f3b1 --- a/tests/unit/test_virtual_function.py +++ b/tests/unit/test_virtual_function.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,18 +18,18 @@ Unit tests for _virtual_function module. from __future__ import absolute_import, print_function -import unittest +# FIXME: Migrate requests_mock to zhmcclient_mock. import requests_mock from zhmcclient import Session, Client, VirtualFunction -class VirtualFunctionTests(unittest.TestCase): +class TestVirtualFunction(object): """ All tests for VirtualFunction and VirtualFunctionManager classes. """ - def setUp(self): + def setup_method(self): self.session = Session('test-dpm-host', 'test-user', 'test-id') self.client = Client(self.session) with requests_mock.mock() as m: @@ -108,7 +107,7 @@ class VirtualFunctionTests(unittest.TestCase): partitions = partition_mgr.list(full_properties=True) self.partition = partitions[0] - def tearDown(self): + def teardown_method(self): with requests_mock.mock() as m: m.delete('/api/sessions/this-session', status_code=204) self.session.logoff() @@ -116,7 +115,7 @@ class VirtualFunctionTests(unittest.TestCase): def test_init(self): """Test __init__() on VirtualFunctionManager instance in Partition.""" vf_mgr = self.partition.virtual_functions - self.assertEqual(vf_mgr.partition, self.partition) + assert vf_mgr.partition == self.partition def test_list_short_ok(self): """ @@ -126,18 +125,15 @@ class VirtualFunctionTests(unittest.TestCase): vf_mgr = self.partition.virtual_functions vfs = vf_mgr.list(full_properties=False) - self.assertEqual( - len(vfs), - len(self.partition.properties['virtual-function-uris'])) + assert len(vfs) == \ + len(self.partition.properties['virtual-function-uris']) for idx, vf in enumerate(vfs): - self.assertEqual( - vf.properties['element-uri'], - self.partition.properties['virtual-function-uris'][idx]) - self.assertEqual( - vf.uri, - self.partition.properties['virtual-function-uris'][idx]) - self.assertFalse(vf.full_properties) - self.assertEqual(vf.manager, vf_mgr) + assert vf.properties['element-uri'] == \ + self.partition.properties['virtual-function-uris'][idx] + assert vf.uri == \ + self.partition.properties['virtual-function-uris'][idx] + assert not vf.full_properties + assert vf.manager == vf_mgr def test_list_full_ok(self): """ @@ -179,18 +175,15 @@ class VirtualFunctionTests(unittest.TestCase): vfs = vf_mgr.list(full_properties=True) - self.assertEqual( - len(vfs), - len(self.partition.properties['virtual-function-uris'])) + assert len(vfs) == \ + len(self.partition.properties['virtual-function-uris']) for idx, vf in enumerate(vfs): - self.assertEqual( - vf.properties['element-uri'], - self.partition.properties['virtual-function-uris'][idx]) - self.assertEqual( - vf.uri, - self.partition.properties['virtual-function-uris'][idx]) - self.assertTrue(vf.full_properties) - self.assertEqual(vf.manager, vf_mgr) + assert vf.properties['element-uri'] == \ + self.partition.properties['virtual-function-uris'][idx] + assert vf.uri == \ + self.partition.properties['virtual-function-uris'][idx] + assert vf.full_properties + assert vf.manager == vf_mgr def test_list_filter_name_ok(self): """ @@ -233,16 +226,15 @@ class VirtualFunctionTests(unittest.TestCase): filter_args = {'name': 'vf2'} vfs = vf_mgr.list(filter_args=filter_args) - self.assertEqual(len(vfs), 1) + assert len(vfs) == 1 vf = vfs[0] - self.assertEqual(vf.name, 'vf2') - self.assertEqual( - vf.uri, - '/api/partitions/fake-part-id-1/virtual-functions/' - 'fake-vf-id-2') - self.assertEqual(vf.properties['name'], 'vf2') - self.assertEqual(vf.properties['element-id'], 'fake-vf-id-2') - self.assertEqual(vf.manager, vf_mgr) + assert vf.name == 'vf2' + assert vf.uri == \ + '/api/partitions/fake-part-id-1/virtual-functions/' \ + 'fake-vf-id-2' + assert vf.properties['name'] == 'vf2' + assert vf.properties['element-id'] == 'fake-vf-id-2' + assert vf.manager == vf_mgr def test_list_filter_elementid_ok(self): """ @@ -285,16 +277,15 @@ class VirtualFunctionTests(unittest.TestCase): filter_args = {'element-id': 'fake-vf-id-2'} vfs = vf_mgr.list(filter_args=filter_args) - self.assertEqual(len(vfs), 1) + assert len(vfs) == 1 vf = vfs[0] - self.assertEqual(vf.name, 'vf2') - self.assertEqual( - vf.uri, - '/api/partitions/fake-part-id-1/virtual-functions/' - 'fake-vf-id-2') - self.assertEqual(vf.properties['name'], 'vf2') - self.assertEqual(vf.properties['element-id'], 'fake-vf-id-2') - self.assertEqual(vf.manager, vf_mgr) + assert vf.name == 'vf2' + assert vf.uri == \ + '/api/partitions/fake-part-id-1/virtual-functions/' \ + 'fake-vf-id-2' + assert vf.properties['name'] == 'vf2' + assert vf.properties['element-id'] == 'fake-vf-id-2' + assert vf.manager == vf_mgr def test_create(self): """ @@ -312,9 +303,9 @@ class VirtualFunctionTests(unittest.TestCase): vf = vf_mgr.create(properties={}) - self.assertTrue(isinstance(vf, VirtualFunction)) - self.assertEqual(vf.properties, result) - self.assertEqual(vf.uri, result['element-uri']) + assert isinstance(vf, VirtualFunction) + assert vf.properties == result + assert vf.uri == result['element-uri'] def test_delete(self): """ @@ -339,7 +330,3 @@ class VirtualFunctionTests(unittest.TestCase): m.post('/api/partitions/fake-part-id-1/virtual-functions/' 'fake-vf-id-1', status_code=204) vf.update_properties(properties={}) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/unit/test_virtual_switch.py b/tests/unit/test_virtual_switch.py old mode 100755 new mode 100644 index 971cb83..c39ba91 --- a/tests/unit/test_virtual_switch.py +++ b/tests/unit/test_virtual_switch.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,17 +18,17 @@ Unit tests for _virtual_switch module. from __future__ import absolute_import, print_function -import unittest import re +# FIXME: Migrate requests_mock to zhmcclient_mock. import requests_mock from zhmcclient import Session, Client, Nic -class VirtualSwitchTests(unittest.TestCase): +class TestVirtualSwitch(object): """All tests for VirtualSwitch and VirtualSwitchManager classes.""" - def setUp(self): + def setup_method(self): self.session = Session('vswitch-dpm-host', 'vswitch-user', 'vswitch-pwd') self.client = Client(self.session) @@ -55,7 +54,7 @@ class VirtualSwitchTests(unittest.TestCase): cpcs = self.cpc_mgr.list() self.cpc = cpcs[0] - def tearDown(self): + def teardown_method(self): with requests_mock.mock() as m: m.delete('/api/sessions/this-session', status_code=204) self.session.logoff() @@ -63,7 +62,7 @@ class VirtualSwitchTests(unittest.TestCase): def test_init(self): """Test __init__() on VirtualSwitchManager instance in CPC.""" vswitch_mgr = self.cpc.virtual_switches - self.assertEqual(vswitch_mgr.cpc, self.cpc) + assert vswitch_mgr.cpc == self.cpc def test_list_short_ok(self): """ @@ -91,16 +90,14 @@ class VirtualSwitchTests(unittest.TestCase): vswitches = vswitch_mgr.list(full_properties=False) - self.assertEqual(len(vswitches), len(result['virtual-switches'])) + assert len(vswitches) == len(result['virtual-switches']) for idx, vswitch in enumerate(vswitches): - self.assertEqual( - vswitch.properties, - result['virtual-switches'][idx]) - self.assertEqual( - vswitch.uri, - result['virtual-switches'][idx]['object-uri']) - self.assertFalse(vswitch.full_properties) - self.assertEqual(vswitch.manager, vswitch_mgr) + assert vswitch.properties == \ + result['virtual-switches'][idx] + assert vswitch.uri == \ + result['virtual-switches'][idx]['object-uri'] + assert not vswitch.full_properties + assert vswitch.manager == vswitch_mgr def test_list_full_ok(self): """ @@ -149,15 +146,14 @@ class VirtualSwitchTests(unittest.TestCase): vswitches = vswitch_mgr.list(full_properties=True) - self.assertEqual(len(vswitches), len(result['virtual-switches'])) + assert len(vswitches) == len(result['virtual-switches']) for idx, vswitch in enumerate(vswitches): - self.assertEqual(vswitch.properties['name'], - result['virtual-switches'][idx]['name']) - self.assertEqual( - vswitch.uri, - result['virtual-switches'][idx]['object-uri']) - self.assertTrue(vswitch.full_properties) - self.assertEqual(vswitch.manager, vswitch_mgr) + assert vswitch.properties['name'] == \ + result['virtual-switches'][idx]['name'] + assert vswitch.uri == \ + result['virtual-switches'][idx]['object-uri'] + assert vswitch.full_properties + assert vswitch.manager == vswitch_mgr def test_update_properties(self): """ @@ -225,17 +221,13 @@ class VirtualSwitchTests(unittest.TestCase): nics = vswitch.get_connected_nics() - self.assertTrue(isinstance(nics, list)) + assert isinstance(nics, list) for i, nic in enumerate(nics): - self.assertTrue(isinstance(nic, Nic)) + assert isinstance(nic, Nic) nic_uri = result['connected-vnic-uris'][i] - self.assertEqual(nic.uri, nic_uri) - self.assertEqual(nic.properties['element-uri'], nic_uri) + assert nic.uri == nic_uri + assert nic.properties['element-uri'] == nic_uri m = re.match(r"^/api/partitions/([^/]+)/nics/([^/]+)/?$", nic_uri) nic_id = m.group(2) - self.assertEqual(nic.properties['element-id'], nic_id) - - -if __name__ == '__main__': - unittest.main() + assert nic.properties['element-id'] == nic_id diff --git a/tests/unit/zhmcclient_mock/test_example.py b/tests/unit/zhmcclient_mock/test_example.py old mode 100755 new mode 100644 index 92f695e..7324a21 --- a/tests/unit/zhmcclient_mock/test_example.py +++ b/tests/unit/zhmcclient_mock/test_example.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,12 +19,14 @@ Example unit test for a user of the zhmcclient package. from __future__ import absolute_import, print_function import requests.packages.urllib3 -import unittest + import zhmcclient import zhmcclient_mock +requests.packages.urllib3.disable_warnings() + -class MyTests(unittest.TestCase): +class TestMy(object): @staticmethod def create_session_1(): @@ -153,40 +154,40 @@ class MyTests(unittest.TestCase): Check the faked session and its faked HMC. """ - self.assertEqual(self.session.host, 'fake-host') + assert self.session.host == 'fake-host' - self.assertEqual(self.client.version_info(), (1, 8)) + assert self.client.version_info() == (1, 8) cpcs = self.client.cpcs.list() - self.assertEqual(len(cpcs), 2) + assert len(cpcs) == 2 cpc1 = cpcs[0] # a CPC in classic mode - self.assertEqual(cpc1.get_property('name'), 'cpc_1') - self.assertFalse(cpc1.dpm_enabled) + assert cpc1.get_property('name') == 'cpc_1' + assert not cpc1.dpm_enabled lpars = cpc1.lpars.list() - self.assertEqual(len(lpars), 1) + assert len(lpars) == 1 lpar1 = lpars[0] - self.assertEqual(lpar1.get_property('name'), 'lpar_1') + assert lpar1.get_property('name') == 'lpar_1' cpc2 = cpcs[1] # a CPC in DPM mode - self.assertEqual(cpc2.get_property('name'), 'cpc_2') - self.assertTrue(cpc2.dpm_enabled) + assert cpc2.get_property('name') == 'cpc_2' + assert cpc2.dpm_enabled partitions = cpc2.partitions.list() - self.assertEqual(len(partitions), 1) + assert len(partitions) == 1 partition1 = partitions[0] - self.assertEqual(partition1.get_property('name'), 'partition_1') + assert partition1.get_property('name') == 'partition_1' adapters = cpc2.adapters.list() - self.assertEqual(len(adapters), 1) + assert len(adapters) == 1 adapter1 = adapters[0] - self.assertEqual(adapter1.get_property('name'), 'osa_1') + assert adapter1.get_property('name') == 'osa_1' ports = adapter1.ports.list() - self.assertEqual(len(ports), 1) + assert len(ports) == 1 port1 = ports[0] - self.assertEqual(port1.get_property('name'), 'osa_1_port_1') + assert port1.get_property('name') == 'osa_1_port_1' def test_session_1(self): self.session = self.create_session_1() @@ -197,8 +198,3 @@ class MyTests(unittest.TestCase): self.session = self.create_session_2() self.client = zhmcclient.Client(self.session) self.check() - - -if __name__ == '__main__': - requests.packages.urllib3.disable_warnings() - unittest.main() diff --git a/tests/unit/zhmcclient_mock/test_hmc.py b/tests/unit/zhmcclient_mock/test_hmc.py old mode 100755 new mode 100644 index dcba1f0..7499a04 --- a/tests/unit/zhmcclient_mock/test_hmc.py +++ b/tests/unit/zhmcclient_mock/test_hmc.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,8 +18,9 @@ Unit tests for _hmc module of the zhmcclient_mock package. from __future__ import absolute_import, print_function -import unittest +import re from datetime import datetime +import pytest from zhmcclient_mock._hmc import FakedHmc, \ FakedBaseManager, FakedBaseResource, \ @@ -38,10 +38,10 @@ from zhmcclient_mock._hmc import FakedHmc, \ FakedMetricGroupDefinition, FakedMetricObjectValues -class FakedHmcTests(unittest.TestCase): +class TestFakedHmc(object): """All tests for the zhmcclient_mock.FakedHmc class.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') def test_repr(self): @@ -52,22 +52,20 @@ class FakedHmcTests(unittest.TestCase): repr_str = repr_str.replace('\n', '\\n') # We check just the begin of the string: - self.assertRegexpMatches( - repr_str, - r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.format( - classname=hmc.__class__.__name__, - id=id(hmc))) + assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'. + format(classname=hmc.__class__.__name__, id=id(hmc)), + repr_str) def test_hmc(self): - self.assertEqual(self.hmc.hmc_name, 'fake-hmc') - self.assertEqual(self.hmc.hmc_version, '2.13.1') - self.assertEqual(self.hmc.api_version, '1.8') - self.assertIsInstance(self.hmc.cpcs, FakedCpcManager) + assert self.hmc.hmc_name == 'fake-hmc' + assert self.hmc.hmc_version == '2.13.1' + assert self.hmc.api_version == '1.8' + assert isinstance(self.hmc.cpcs, FakedCpcManager) # the function to be tested: cpcs = self.hmc.cpcs.list() - self.assertEqual(len(cpcs), 0) + assert len(cpcs) == 0 def test_hmc_1_cpc(self): cpc1_in_props = {'name': 'cpc1'} @@ -87,12 +85,12 @@ class FakedHmcTests(unittest.TestCase): # the function to be tested: cpcs = self.hmc.cpcs.list() - self.assertEqual(len(cpcs), 1) - self.assertEqual(cpcs[0], cpc1) + assert len(cpcs) == 1 + assert cpcs[0] == cpc1 - self.assertIsInstance(cpc1, FakedCpc) - self.assertEqual(cpc1.properties, cpc1_out_props) - self.assertEqual(cpc1.manager, self.hmc.cpcs) + assert isinstance(cpc1, FakedCpc) + assert cpc1.properties == cpc1_out_props + assert cpc1.manager == self.hmc.cpcs def test_hmc_2_cpcs(self): cpc1_in_props = {'name': 'cpc1'} @@ -126,18 +124,18 @@ class FakedHmcTests(unittest.TestCase): # the function to be tested: cpcs = self.hmc.cpcs.list() - self.assertEqual(len(cpcs), 2) + assert len(cpcs) == 2 # We expect the order of addition to be maintained: - self.assertEqual(cpcs[0], cpc1) - self.assertEqual(cpcs[1], cpc2) + assert cpcs[0] == cpc1 + assert cpcs[1] == cpc2 - self.assertIsInstance(cpc1, FakedCpc) - self.assertEqual(cpc1.properties, cpc1_out_props) - self.assertEqual(cpc1.manager, self.hmc.cpcs) + assert isinstance(cpc1, FakedCpc) + assert cpc1.properties == cpc1_out_props + assert cpc1.manager == self.hmc.cpcs - self.assertIsInstance(cpc2, FakedCpc) - self.assertEqual(cpc2.properties, cpc2_out_props) - self.assertEqual(cpc2.manager, self.hmc.cpcs) + assert isinstance(cpc2, FakedCpc) + assert cpc2.properties == cpc2_out_props + assert cpc2.manager == self.hmc.cpcs def test_res_dict(self): cpc1_in_props = {'name': 'cpc1'} @@ -165,7 +163,7 @@ class FakedHmcTests(unittest.TestCase): cpcs = self.hmc.cpcs.list() - self.assertEqual(len(cpcs), 1) + assert len(cpcs) == 1 cpc1 = cpcs[0] cpc1_out_props = cpc1_in_props.copy() @@ -176,18 +174,18 @@ class FakedHmcTests(unittest.TestCase): 'is-ensemble-member': False, 'status': 'operating', }) - self.assertIsInstance(cpc1, FakedCpc) - self.assertEqual(cpc1.properties, cpc1_out_props) - self.assertEqual(cpc1.manager, self.hmc.cpcs) + assert isinstance(cpc1, FakedCpc) + assert cpc1.properties == cpc1_out_props + assert cpc1.manager == self.hmc.cpcs cpc1_adapters = cpc1.adapters.list() - self.assertEqual(len(cpc1_adapters), 1) + assert len(cpc1_adapters) == 1 adapter1 = cpc1_adapters[0] adapter1_ports = adapter1.ports.list() - self.assertEqual(len(adapter1_ports), 1) + assert len(adapter1_ports) == 1 port1 = adapter1_ports[0] adapter1_out_props = adapter1_in_props.copy() @@ -197,24 +195,24 @@ class FakedHmcTests(unittest.TestCase): 'status': 'active', 'network-port-uris': [port1.uri], }) - self.assertIsInstance(adapter1, FakedAdapter) - self.assertEqual(adapter1.properties, adapter1_out_props) - self.assertEqual(adapter1.manager, cpc1.adapters) + assert isinstance(adapter1, FakedAdapter) + assert adapter1.properties == adapter1_out_props + assert adapter1.manager == cpc1.adapters port1_out_props = port1_in_props.copy() port1_out_props.update({ 'element-id': port1.oid, 'element-uri': port1.uri, }) - self.assertIsInstance(port1, FakedPort) - self.assertEqual(port1.properties, port1_out_props) - self.assertEqual(port1.manager, adapter1.ports) + assert isinstance(port1, FakedPort) + assert port1.properties == port1_out_props + assert port1.manager == adapter1.ports -class FakedBaseTests(unittest.TestCase): +class TestFakedBase(object): """All tests for the FakedBaseManager and FakedBaseResource classes.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1_oid = '42-abc-543' @@ -250,11 +248,10 @@ class FakedBaseTests(unittest.TestCase): repr_str = repr_str.replace('\n', '\\n') # We check just the begin of the string: - self.assertRegexpMatches( - repr_str, - r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.format( - classname=resource.__class__.__name__, - id=id(resource))) + assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'. + format(classname=resource.__class__.__name__, + id=id(resource)), + repr_str) def test_manager_repr(self): """Test FakedBaseManager.__repr__().""" @@ -264,42 +261,39 @@ class FakedBaseTests(unittest.TestCase): repr_str = repr_str.replace('\n', '\\n') # We check just the begin of the string: - self.assertRegexpMatches( - repr_str, - r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.format( - classname=manager.__class__.__name__, - id=id(manager))) + assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'. + format(classname=manager.__class__.__name__, + id=id(manager)), + repr_str) def test_manager_attr(self): """Test FakedBaseManager attributes.""" - self.assertIsInstance(self.cpc_manager, FakedBaseManager) + assert isinstance(self.cpc_manager, FakedBaseManager) - self.assertEqual(self.cpc_manager.hmc, self.hmc) - self.assertEqual(self.cpc_manager.parent, self.hmc) - self.assertEqual(self.cpc_manager.resource_class, FakedCpc) - self.assertEqual(self.cpc_manager.base_uri, '/api/cpcs') - self.assertEqual(self.cpc_manager.oid_prop, 'object-id') - self.assertEqual(self.cpc_manager.uri_prop, 'object-uri') + assert self.cpc_manager.hmc == self.hmc + assert self.cpc_manager.parent == self.hmc + assert self.cpc_manager.resource_class == FakedCpc + assert self.cpc_manager.base_uri == '/api/cpcs' + assert self.cpc_manager.oid_prop == 'object-id' + assert self.cpc_manager.uri_prop == 'object-uri' def test_resource_attr(self): """Test FakedBaseResource attributes.""" - self.assertIsInstance(self.cpc_resource, FakedBaseResource) + assert isinstance(self.cpc_resource, FakedBaseResource) - self.assertEqual(self.cpc_resource.manager, self.cpc_manager) - self.assertEqual(self.cpc_resource.properties, self.cpc1_out_props) - self.assertEqual(self.cpc_resource.oid, - self.cpc1_out_props['object-id']) - self.assertEqual(self.cpc_resource.uri, - self.cpc1_out_props['object-uri']) + assert self.cpc_resource.manager == self.cpc_manager + assert self.cpc_resource.properties == self.cpc1_out_props + assert self.cpc_resource.oid == self.cpc1_out_props['object-id'] + assert self.cpc_resource.uri == self.cpc1_out_props['object-uri'] -class FakedActivationProfileTests(unittest.TestCase): +class TestFakedActivationProfile(object): """All tests for the FakedActivationProfileManager and FakedActivationProfile classes.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1_in_props = {'name': 'cpc1'} self.resetprofile1_in_props = {'name': 'resetprofile1'} @@ -331,27 +325,27 @@ class FakedActivationProfileTests(unittest.TestCase): # Test reset activation profiles - self.assertIsInstance(cpc1.reset_activation_profiles, - FakedActivationProfileManager) - self.assertEqual(cpc1.reset_activation_profiles.profile_type, 'reset') - self.assertRegexpMatches(cpc1.reset_activation_profiles.base_uri, - r'/api/cpcs/[^/]+/reset-activation-profiles') + assert isinstance(cpc1.reset_activation_profiles, + FakedActivationProfileManager) + assert cpc1.reset_activation_profiles.profile_type == 'reset' + assert re.match(r'/api/cpcs/[^/]+/reset-activation-profiles', + cpc1.reset_activation_profiles.base_uri) # Test image activation profiles - self.assertIsInstance(cpc1.image_activation_profiles, - FakedActivationProfileManager) - self.assertEqual(cpc1.image_activation_profiles.profile_type, 'image') - self.assertRegexpMatches(cpc1.image_activation_profiles.base_uri, - r'/api/cpcs/[^/]+/image-activation-profiles') + assert isinstance(cpc1.image_activation_profiles, + FakedActivationProfileManager) + assert cpc1.image_activation_profiles.profile_type == 'image' + assert re.match(r'/api/cpcs/[^/]+/image-activation-profiles', + cpc1.image_activation_profiles.base_uri) # Test load activation profiles - self.assertIsInstance(cpc1.load_activation_profiles, - FakedActivationProfileManager) - self.assertEqual(cpc1.load_activation_profiles.profile_type, 'load') - self.assertRegexpMatches(cpc1.load_activation_profiles.base_uri, - r'/api/cpcs/[^/]+/load-activation-profiles') + assert isinstance(cpc1.load_activation_profiles, + FakedActivationProfileManager) + assert cpc1.load_activation_profiles.profile_type == 'load' + assert re.match(r'/api/cpcs/[^/]+/load-activation-profiles', + cpc1.load_activation_profiles.base_uri) def test_profiles_list(self): """Test list() of FakedActivationProfileManager.""" @@ -362,53 +356,53 @@ class FakedActivationProfileTests(unittest.TestCase): resetprofiles = cpc1.reset_activation_profiles.list() - self.assertEqual(len(resetprofiles), 1) + assert len(resetprofiles) == 1 resetprofile1 = resetprofiles[0] resetprofile1_out_props = self.resetprofile1_in_props.copy() resetprofile1_out_props.update({ 'name': resetprofile1.oid, 'element-uri': resetprofile1.uri, }) - self.assertIsInstance(resetprofile1, FakedActivationProfile) - self.assertEqual(resetprofile1.properties, resetprofile1_out_props) - self.assertEqual(resetprofile1.manager, cpc1.reset_activation_profiles) + assert isinstance(resetprofile1, FakedActivationProfile) + assert resetprofile1.properties == resetprofile1_out_props + assert resetprofile1.manager == cpc1.reset_activation_profiles # Test image activation profiles imageprofiles = cpc1.image_activation_profiles.list() - self.assertEqual(len(imageprofiles), 1) + assert len(imageprofiles) == 1 imageprofile1 = imageprofiles[0] imageprofile1_out_props = self.imageprofile1_in_props.copy() imageprofile1_out_props.update({ 'name': imageprofile1.oid, 'element-uri': imageprofile1.uri, }) - self.assertIsInstance(imageprofile1, FakedActivationProfile) - self.assertEqual(imageprofile1.properties, imageprofile1_out_props) - self.assertEqual(imageprofile1.manager, cpc1.image_activation_profiles) + assert isinstance(imageprofile1, FakedActivationProfile) + assert imageprofile1.properties == imageprofile1_out_props + assert imageprofile1.manager == cpc1.image_activation_profiles # Test load activation profiles loadprofiles = cpc1.load_activation_profiles.list() - self.assertEqual(len(loadprofiles), 1) + assert len(loadprofiles) == 1 loadprofile1 = loadprofiles[0] loadprofile1_out_props = self.loadprofile1_in_props.copy() loadprofile1_out_props.update({ 'name': loadprofile1.oid, 'element-uri': loadprofile1.uri, }) - self.assertIsInstance(loadprofile1, FakedActivationProfile) - self.assertEqual(loadprofile1.properties, loadprofile1_out_props) - self.assertEqual(loadprofile1.manager, cpc1.load_activation_profiles) + assert isinstance(loadprofile1, FakedActivationProfile) + assert loadprofile1.properties == loadprofile1_out_props + assert loadprofile1.manager == cpc1.load_activation_profiles def test_profiles_add(self): """Test add() of FakedActivationProfileManager.""" cpcs = self.hmc.cpcs.list() cpc1 = cpcs[0] resetprofiles = cpc1.reset_activation_profiles.list() - self.assertEqual(len(resetprofiles), 1) + assert len(resetprofiles) == 1 resetprofile2_in_props = {'name': 'resetprofile2'} @@ -417,23 +411,23 @@ class FakedActivationProfileTests(unittest.TestCase): resetprofile2_in_props) resetprofiles = cpc1.reset_activation_profiles.list() - self.assertEqual(len(resetprofiles), 2) + assert len(resetprofiles) == 2 resetprofile2 = [p for p in resetprofiles if p.properties['name'] == resetprofile2_in_props['name']][0] - self.assertEqual(new_resetprofile.properties, resetprofile2.properties) - self.assertEqual(new_resetprofile.manager, resetprofile2.manager) + assert new_resetprofile.properties == resetprofile2.properties + assert new_resetprofile.manager == resetprofile2.manager resetprofile2_out_props = resetprofile2_in_props.copy() resetprofile2_out_props.update({ 'name': resetprofile2.oid, 'element-uri': resetprofile2.uri, }) - self.assertIsInstance(resetprofile2, FakedActivationProfile) - self.assertEqual(resetprofile2.properties, resetprofile2_out_props) - self.assertEqual(resetprofile2.manager, cpc1.reset_activation_profiles) + assert isinstance(resetprofile2, FakedActivationProfile) + assert resetprofile2.properties == resetprofile2_out_props + assert resetprofile2.manager == cpc1.reset_activation_profiles # Because we know that the image and load profile managers are of the # same class, we don't need to test them. @@ -444,22 +438,22 @@ class FakedActivationProfileTests(unittest.TestCase): cpc1 = cpcs[0] resetprofiles = cpc1.reset_activation_profiles.list() resetprofile1 = resetprofiles[0] - self.assertEqual(len(resetprofiles), 1) + assert len(resetprofiles) == 1 # the function to be tested: cpc1.reset_activation_profiles.remove(resetprofile1.oid) resetprofiles = cpc1.reset_activation_profiles.list() - self.assertEqual(len(resetprofiles), 0) + assert len(resetprofiles) == 0 # Because we know that the image and load profile managers are of the # same class, we don't need to test them. -class FakedAdapterTests(unittest.TestCase): +class TestFakedAdapter(object): """All tests for the FakedAdapterManager and FakedAdapter classes.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1_in_props = {'name': 'cpc1'} self.adapter1_in_props = {'name': 'adapter1', 'type': 'roce'} @@ -487,19 +481,18 @@ class FakedAdapterTests(unittest.TestCase): repr_str = repr_str.replace('\n', '\\n') # We check just the begin of the string: - self.assertRegexpMatches( - repr_str, - r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.format( - classname=adapter.__class__.__name__, - id=id(adapter))) + assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'. + format(classname=adapter.__class__.__name__, + id=id(adapter)), + repr_str) def test_adapters_attr(self): """Test CPC 'adapters' attribute.""" cpcs = self.hmc.cpcs.list() cpc1 = cpcs[0] - self.assertIsInstance(cpc1.adapters, FakedAdapterManager) - self.assertRegexpMatches(cpc1.adapters.base_uri, r'/api/adapters') + assert isinstance(cpc1.adapters, FakedAdapterManager) + assert re.match(r'/api/adapters', cpc1.adapters.base_uri) def test_adapters_list(self): """Test list() of FakedAdapterManager.""" @@ -509,7 +502,7 @@ class FakedAdapterTests(unittest.TestCase): # the function to be tested: adapters = cpc1.adapters.list() - self.assertEqual(len(adapters), 1) + assert len(adapters) == 1 adapter1 = adapters[0] adapter1_out_props = self.adapter1_in_props.copy() adapter1_out_props.update({ @@ -519,19 +512,19 @@ class FakedAdapterTests(unittest.TestCase): 'adapter-family': 'roce', 'network-port-uris': [], }) - self.assertIsInstance(adapter1, FakedAdapter) - self.assertEqual(adapter1.properties, adapter1_out_props) - self.assertEqual(adapter1.manager, cpc1.adapters) + assert isinstance(adapter1, FakedAdapter) + assert adapter1.properties == adapter1_out_props + assert adapter1.manager == cpc1.adapters # Quick check of child resources: - self.assertIsInstance(adapter1.ports, FakedPortManager) + assert isinstance(adapter1.ports, FakedPortManager) def test_adapters_add(self): """Test add() of FakedAdapterManager.""" cpcs = self.hmc.cpcs.list() cpc1 = cpcs[0] adapters = cpc1.adapters.list() - self.assertEqual(len(adapters), 1) + assert len(adapters) == 1 adapter2_in_props = {'name': 'adapter2', 'adapter-family': 'ficon'} @@ -540,13 +533,13 @@ class FakedAdapterTests(unittest.TestCase): adapter2_in_props) adapters = cpc1.adapters.list() - self.assertEqual(len(adapters), 2) + assert len(adapters) == 2 adapter2 = [a for a in adapters if a.properties['name'] == adapter2_in_props['name']][0] - self.assertEqual(new_adapter.properties, adapter2.properties) - self.assertEqual(new_adapter.manager, adapter2.manager) + assert new_adapter.properties == adapter2.properties + assert new_adapter.manager == adapter2.manager adapter2_out_props = adapter2_in_props.copy() adapter2_out_props.update({ @@ -555,9 +548,9 @@ class FakedAdapterTests(unittest.TestCase): 'status': 'active', 'storage-port-uris': [], }) - self.assertIsInstance(adapter2, FakedAdapter) - self.assertEqual(adapter2.properties, adapter2_out_props) - self.assertEqual(adapter2.manager, cpc1.adapters) + assert isinstance(adapter2, FakedAdapter) + assert adapter2.properties == adapter2_out_props + assert adapter2.manager == cpc1.adapters def test_adapters_remove(self): """Test remove() of FakedAdapterManager.""" @@ -565,19 +558,19 @@ class FakedAdapterTests(unittest.TestCase): cpc1 = cpcs[0] adapters = cpc1.adapters.list() adapter1 = adapters[0] - self.assertEqual(len(adapters), 1) + assert len(adapters) == 1 # the function to be tested: cpc1.adapters.remove(adapter1.oid) adapters = cpc1.adapters.list() - self.assertEqual(len(adapters), 0) + assert len(adapters) == 0 -class FakedCpcTests(unittest.TestCase): +class TestFakedCpc(object): """All tests for the FakedCpcManager and FakedCpc classes.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1_in_props = {'name': 'cpc1'} rd = { @@ -599,16 +592,14 @@ class FakedCpcTests(unittest.TestCase): repr_str = repr_str.replace('\n', '\\n') # We check just the begin of the string: - self.assertRegexpMatches( - repr_str, - r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.format( - classname=cpc.__class__.__name__, - id=id(cpc))) + assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'. + format(classname=cpc.__class__.__name__, id=id(cpc)), + repr_str) def test_cpcs_attr(self): """Test HMC 'cpcs' attribute.""" - self.assertIsInstance(self.hmc.cpcs, FakedCpcManager) - self.assertRegexpMatches(self.hmc.cpcs.base_uri, r'/api/cpcs') + assert isinstance(self.hmc.cpcs, FakedCpcManager) + assert re.match(r'/api/cpcs', self.hmc.cpcs.base_uri) def test_cpcs_list(self): """Test list() of FakedCpcManager.""" @@ -625,26 +616,26 @@ class FakedCpcTests(unittest.TestCase): 'is-ensemble-member': False, 'status': 'operating', }) - self.assertIsInstance(cpc1, FakedCpc) - self.assertEqual(cpc1.properties, cpc1_out_props) - self.assertEqual(cpc1.manager, self.hmc.cpcs) + assert isinstance(cpc1, FakedCpc) + assert cpc1.properties == cpc1_out_props + assert cpc1.manager == self.hmc.cpcs # Quick check of child resources: - self.assertIsInstance(cpc1.lpars, FakedLparManager) - self.assertIsInstance(cpc1.partitions, FakedPartitionManager) - self.assertIsInstance(cpc1.adapters, FakedAdapterManager) - self.assertIsInstance(cpc1.virtual_switches, FakedVirtualSwitchManager) - self.assertIsInstance(cpc1.reset_activation_profiles, - FakedActivationProfileManager) - self.assertIsInstance(cpc1.image_activation_profiles, - FakedActivationProfileManager) - self.assertIsInstance(cpc1.load_activation_profiles, - FakedActivationProfileManager) + assert isinstance(cpc1.lpars, FakedLparManager) + assert isinstance(cpc1.partitions, FakedPartitionManager) + assert isinstance(cpc1.adapters, FakedAdapterManager) + assert isinstance(cpc1.virtual_switches, FakedVirtualSwitchManager) + assert isinstance(cpc1.reset_activation_profiles, + FakedActivationProfileManager) + assert isinstance(cpc1.image_activation_profiles, + FakedActivationProfileManager) + assert isinstance(cpc1.load_activation_profiles, + FakedActivationProfileManager) def test_cpcs_add(self): """Test add() of FakedCpcManager.""" cpcs = self.hmc.cpcs.list() - self.assertEqual(len(cpcs), 1) + assert len(cpcs) == 1 cpc2_in_props = {'name': 'cpc2'} @@ -652,13 +643,13 @@ class FakedCpcTests(unittest.TestCase): new_cpc = self.hmc.cpcs.add(cpc2_in_props) cpcs = self.hmc.cpcs.list() - self.assertEqual(len(cpcs), 2) + assert len(cpcs) == 2 cpc2 = [cpc for cpc in cpcs if cpc.properties['name'] == cpc2_in_props['name']][0] - self.assertEqual(new_cpc.properties, cpc2.properties) - self.assertEqual(new_cpc.manager, cpc2.manager) + assert new_cpc.properties == cpc2.properties + assert new_cpc.manager == cpc2.manager cpc2_out_props = cpc2_in_props.copy() cpc2_out_props.update({ @@ -668,27 +659,27 @@ class FakedCpcTests(unittest.TestCase): 'is-ensemble-member': False, 'status': 'operating', }) - self.assertIsInstance(cpc2, FakedCpc) - self.assertEqual(cpc2.properties, cpc2_out_props) - self.assertEqual(cpc2.manager, self.hmc.cpcs) + assert isinstance(cpc2, FakedCpc) + assert cpc2.properties == cpc2_out_props + assert cpc2.manager == self.hmc.cpcs def test_cpcs_remove(self): """Test remove() of FakedCpcManager.""" cpcs = self.hmc.cpcs.list() cpc1 = cpcs[0] - self.assertEqual(len(cpcs), 1) + assert len(cpcs) == 1 # the function to be tested: self.hmc.cpcs.remove(cpc1.oid) cpcs = self.hmc.cpcs.list() - self.assertEqual(len(cpcs), 0) + assert len(cpcs) == 0 -class FakedHbaTests(unittest.TestCase): +class TestFakedHba(object): """All tests for the FakedHbaManager and FakedHba classes.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.adapter1_oid = '747-abc-12345' @@ -747,9 +738,9 @@ class FakedHbaTests(unittest.TestCase): partitions = cpc1.partitions.list() partition1 = partitions[0] - self.assertIsInstance(partition1.hbas, FakedHbaManager) - self.assertRegexpMatches(partition1.hbas.base_uri, - r'/api/partitions/[^/]+/hbas') + assert isinstance(partition1.hbas, FakedHbaManager) + assert re.match(r'/api/partitions/[^/]+/hbas', + partition1.hbas.base_uri) def test_hbas_list(self): """Test list() of FakedHbaManager.""" @@ -761,7 +752,7 @@ class FakedHbaTests(unittest.TestCase): # the function to be tested: hbas = partition1.hbas.list() - self.assertEqual(len(hbas), 1) + assert len(hbas) == 1 hba1 = hbas[0] hba1_out_props = self.hba1_in_props.copy() hba1_out_props.update({ @@ -770,9 +761,9 @@ class FakedHbaTests(unittest.TestCase): 'device-number': hba1.properties['device-number'], 'wwpn': hba1.properties['wwpn'], }) - self.assertIsInstance(hba1, FakedHba) - self.assertEqual(hba1.properties, hba1_out_props) - self.assertEqual(hba1.manager, partition1.hbas) + assert isinstance(hba1, FakedHba) + assert hba1.properties == hba1_out_props + assert hba1.manager == partition1.hbas def test_hbas_add(self): """Test add() of FakedHbaManager.""" @@ -781,7 +772,7 @@ class FakedHbaTests(unittest.TestCase): partitions = cpc1.partitions.list() partition1 = partitions[0] hbas = partition1.hbas.list() - self.assertEqual(len(hbas), 1) + assert len(hbas) == 1 hba2_oid = '22-55-xy' port_uri = '/api/adapters/abc-123/storage-ports/42' @@ -799,22 +790,22 @@ class FakedHbaTests(unittest.TestCase): hba2_in_props) hbas = partition1.hbas.list() - self.assertEqual(len(hbas), 2) + assert len(hbas) == 2 hba2 = [hba for hba in hbas if hba.properties['name'] == hba2_in_props['name']][0] - self.assertEqual(new_hba.properties, hba2.properties) - self.assertEqual(new_hba.manager, hba2.manager) + assert new_hba.properties == hba2.properties + assert new_hba.manager == hba2.manager hba2_out_props = hba2_in_props.copy() hba2_out_props.update({ 'element-id': hba2_oid, 'element-uri': hba2.uri, }) - self.assertIsInstance(hba2, FakedHba) - self.assertEqual(hba2.properties, hba2_out_props) - self.assertEqual(hba2.manager, partition1.hbas) + assert isinstance(hba2, FakedHba) + assert hba2.properties == hba2_out_props + assert hba2.manager == partition1.hbas def test_hbas_remove(self): """Test remove() of FakedHbaManager.""" @@ -824,21 +815,21 @@ class FakedHbaTests(unittest.TestCase): partition1 = partitions[0] hbas = partition1.hbas.list() hba1 = hbas[0] - self.assertEqual(len(hbas), 1) + assert len(hbas) == 1 # the function to be tested: partition1.hbas.remove(hba1.oid) hbas = partition1.hbas.list() - self.assertEqual(len(hbas), 0) + assert len(hbas) == 0 # TODO: Add testcases for updating 'hba-uris' parent property -class FakedLparTests(unittest.TestCase): +class TestFakedLpar(object): """All tests for the FakedLparManager and FakedLpar classes.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1_in_props = {'name': 'cpc1'} self.lpar1_in_props = {'name': 'lpar1'} @@ -860,9 +851,8 @@ class FakedLparTests(unittest.TestCase): cpcs = self.hmc.cpcs.list() cpc1 = cpcs[0] - self.assertIsInstance(cpc1.lpars, FakedLparManager) - self.assertRegexpMatches(cpc1.lpars.base_uri, - r'/api/logical-partitions') + assert isinstance(cpc1.lpars, FakedLparManager) + assert re.match(r'/api/logical-partitions', cpc1.lpars.base_uri) def test_lpars_list(self): """Test list() of FakedLparManager.""" @@ -872,7 +862,7 @@ class FakedLparTests(unittest.TestCase): # the function to be tested: lpars = cpc1.lpars.list() - self.assertEqual(len(lpars), 1) + assert len(lpars) == 1 lpar1 = lpars[0] lpar1_out_props = self.lpar1_in_props.copy() lpar1_out_props.update({ @@ -880,16 +870,16 @@ class FakedLparTests(unittest.TestCase): 'object-uri': lpar1.uri, 'status': 'not-activated', }) - self.assertIsInstance(lpar1, FakedLpar) - self.assertEqual(lpar1.properties, lpar1_out_props) - self.assertEqual(lpar1.manager, cpc1.lpars) + assert isinstance(lpar1, FakedLpar) + assert lpar1.properties == lpar1_out_props + assert lpar1.manager == cpc1.lpars def test_lpars_add(self): """Test add() of FakedLparManager.""" cpcs = self.hmc.cpcs.list() cpc1 = cpcs[0] lpars = cpc1.lpars.list() - self.assertEqual(len(lpars), 1) + assert len(lpars) == 1 lpar2_in_props = {'name': 'lpar2'} @@ -898,13 +888,13 @@ class FakedLparTests(unittest.TestCase): lpar2_in_props) lpars = cpc1.lpars.list() - self.assertEqual(len(lpars), 2) + assert len(lpars) == 2 lpar2 = [p for p in lpars if p.properties['name'] == lpar2_in_props['name']][0] - self.assertEqual(new_lpar.properties, lpar2.properties) - self.assertEqual(new_lpar.manager, lpar2.manager) + assert new_lpar.properties == lpar2.properties + assert new_lpar.manager == lpar2.manager lpar2_out_props = lpar2_in_props.copy() lpar2_out_props.update({ @@ -912,9 +902,9 @@ class FakedLparTests(unittest.TestCase): 'object-uri': lpar2.uri, 'status': 'not-activated', }) - self.assertIsInstance(lpar2, FakedLpar) - self.assertEqual(lpar2.properties, lpar2_out_props) - self.assertEqual(lpar2.manager, cpc1.lpars) + assert isinstance(lpar2, FakedLpar) + assert lpar2.properties == lpar2_out_props + assert lpar2.manager == cpc1.lpars def test_lpars_remove(self): """Test remove() of FakedLparManager.""" @@ -922,19 +912,19 @@ class FakedLparTests(unittest.TestCase): cpc1 = cpcs[0] lpars = cpc1.lpars.list() lpar1 = lpars[0] - self.assertEqual(len(lpars), 1) + assert len(lpars) == 1 # the function to be tested: cpc1.lpars.remove(lpar1.oid) lpars = cpc1.lpars.list() - self.assertEqual(len(lpars), 0) + assert len(lpars) == 0 -class FakedNicTests(unittest.TestCase): +class TestFakedNic(object): """All tests for the FakedNicManager and FakedNic classes.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.adapter1_oid = '380-xyz-12345' @@ -993,9 +983,9 @@ class FakedNicTests(unittest.TestCase): partitions = cpc1.partitions.list() partition1 = partitions[0] - self.assertIsInstance(partition1.nics, FakedNicManager) - self.assertRegexpMatches(partition1.nics.base_uri, - r'/api/partitions/[^/]+/nics') + assert isinstance(partition1.nics, FakedNicManager) + assert re.match(r'/api/partitions/[^/]+/nics', + partition1.nics.base_uri) def test_nics_list(self): """Test list() of FakedNicManager.""" @@ -1007,7 +997,7 @@ class FakedNicTests(unittest.TestCase): # the function to be tested: nics = partition1.nics.list() - self.assertEqual(len(nics), 1) + assert len(nics) == 1 nic1 = nics[0] nic1_out_props = self.nic1_in_props.copy() nic1_out_props.update({ @@ -1015,9 +1005,9 @@ class FakedNicTests(unittest.TestCase): 'element-uri': nic1.uri, 'device-number': nic1.properties['device-number'], }) - self.assertIsInstance(nic1, FakedNic) - self.assertEqual(nic1.properties, nic1_out_props) - self.assertEqual(nic1.manager, partition1.nics) + assert isinstance(nic1, FakedNic) + assert nic1.properties == nic1_out_props + assert nic1.manager == partition1.nics def test_nics_add(self): """Test add() of FakedNicManager.""" @@ -1026,7 +1016,7 @@ class FakedNicTests(unittest.TestCase): partitions = cpc1.partitions.list() partition1 = partitions[0] nics = partition1.nics.list() - self.assertEqual(len(nics), 1) + assert len(nics) == 1 nic2_oid = '77-55-ab' port_uri = '/api/adapters/abc-123/network-ports/42' @@ -1042,13 +1032,13 @@ class FakedNicTests(unittest.TestCase): nic2_in_props) nics = partition1.nics.list() - self.assertEqual(len(nics), 2) + assert len(nics) == 2 nic2 = [nic for nic in nics if nic.properties['name'] == nic2_in_props['name']][0] - self.assertEqual(new_nic.properties, nic2.properties) - self.assertEqual(new_nic.manager, nic2.manager) + assert new_nic.properties == nic2.properties + assert new_nic.manager == nic2.manager nic2_out_props = nic2_in_props.copy() nic2_out_props.update({ @@ -1056,9 +1046,9 @@ class FakedNicTests(unittest.TestCase): 'element-uri': nic2.uri, 'device-number': nic2.properties['device-number'], }) - self.assertIsInstance(nic2, FakedNic) - self.assertEqual(nic2.properties, nic2_out_props) - self.assertEqual(nic2.manager, partition1.nics) + assert isinstance(nic2, FakedNic) + assert nic2.properties == nic2_out_props + assert nic2.manager == partition1.nics def test_nics_remove(self): """Test remove() of FakedNicManager.""" @@ -1068,21 +1058,21 @@ class FakedNicTests(unittest.TestCase): partition1 = partitions[0] nics = partition1.nics.list() nic1 = nics[0] - self.assertEqual(len(nics), 1) + assert len(nics) == 1 # the function to be tested: partition1.nics.remove(nic1.oid) nics = partition1.nics.list() - self.assertEqual(len(nics), 0) + assert len(nics) == 0 # TODO: Add testcases for updating 'nic-uris' parent property -class FakedPartitionTests(unittest.TestCase): +class TestFakedPartition(object): """All tests for the FakedPartitionManager and FakedPartition classes.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1_in_props = {'name': 'cpc1'} self.partition1_in_props = {'name': 'partition1'} @@ -1110,19 +1100,18 @@ class FakedPartitionTests(unittest.TestCase): repr_str = repr_str.replace('\n', '\\n') # We check just the begin of the string: - self.assertRegexpMatches( - repr_str, - r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.format( - classname=partition.__class__.__name__, - id=id(partition))) + assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'. + format(classname=partition.__class__.__name__, + id=id(partition)), + repr_str) def test_partitions_attr(self): """Test CPC 'partitions' attribute.""" cpcs = self.hmc.cpcs.list() cpc1 = cpcs[0] - self.assertIsInstance(cpc1.partitions, FakedPartitionManager) - self.assertRegexpMatches(cpc1.partitions.base_uri, r'/api/partitions') + assert isinstance(cpc1.partitions, FakedPartitionManager) + assert re.match(r'/api/partitions', cpc1.partitions.base_uri) def test_partitions_list(self): """Test list() of FakedPartitionManager.""" @@ -1132,7 +1121,7 @@ class FakedPartitionTests(unittest.TestCase): # the function to be tested: partitions = cpc1.partitions.list() - self.assertEqual(len(partitions), 1) + assert len(partitions) == 1 partition1 = partitions[0] partition1_out_props = self.partition1_in_props.copy() partition1_out_props.update({ @@ -1143,16 +1132,16 @@ class FakedPartitionTests(unittest.TestCase): 'nic-uris': [], 'virtual-function-uris': [], }) - self.assertIsInstance(partition1, FakedPartition) - self.assertEqual(partition1.properties, partition1_out_props) - self.assertEqual(partition1.manager, cpc1.partitions) + assert isinstance(partition1, FakedPartition) + assert partition1.properties == partition1_out_props + assert partition1.manager == cpc1.partitions def test_partitions_add(self): """Test add() of FakedPartitionManager.""" cpcs = self.hmc.cpcs.list() cpc1 = cpcs[0] partitions = cpc1.partitions.list() - self.assertEqual(len(partitions), 1) + assert len(partitions) == 1 partition2_in_props = {'name': 'partition2'} @@ -1161,14 +1150,14 @@ class FakedPartitionTests(unittest.TestCase): partition2_in_props) partitions = cpc1.partitions.list() - self.assertEqual(len(partitions), 2) + assert len(partitions) == 2 partition2 = [p for p in partitions if p.properties['name'] == partition2_in_props['name']][0] - self.assertEqual(new_partition.properties, partition2.properties) - self.assertEqual(new_partition.manager, partition2.manager) + assert new_partition.properties == partition2.properties + assert new_partition.manager == partition2.manager partition2_out_props = partition2_in_props.copy() partition2_out_props.update({ @@ -1179,9 +1168,9 @@ class FakedPartitionTests(unittest.TestCase): 'nic-uris': [], 'virtual-function-uris': [], }) - self.assertIsInstance(partition2, FakedPartition) - self.assertEqual(partition2.properties, partition2_out_props) - self.assertEqual(partition2.manager, cpc1.partitions) + assert isinstance(partition2, FakedPartition) + assert partition2.properties == partition2_out_props + assert partition2.manager == cpc1.partitions def test_partitions_remove(self): """Test remove() of FakedPartitionManager.""" @@ -1189,19 +1178,19 @@ class FakedPartitionTests(unittest.TestCase): cpc1 = cpcs[0] partitions = cpc1.partitions.list() partition1 = partitions[0] - self.assertEqual(len(partitions), 1) + assert len(partitions) == 1 # the function to be tested: cpc1.partitions.remove(partition1.oid) partitions = cpc1.partitions.list() - self.assertEqual(len(partitions), 0) + assert len(partitions) == 0 -class FakedPortTests(unittest.TestCase): +class TestFakedPort(object): """All tests for the FakedPortManager and FakedPort classes.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1_in_props = {'name': 'cpc1'} self.adapter1_in_props = {'name': 'adapter1', 'adapter-family': 'osa'} @@ -1231,9 +1220,9 @@ class FakedPortTests(unittest.TestCase): adapters = cpc1.adapters.list() adapter1 = adapters[0] - self.assertIsInstance(adapter1.ports, FakedPortManager) - self.assertRegexpMatches(adapter1.ports.base_uri, - r'/api/adapters/[^/]+/network-ports') + assert isinstance(adapter1.ports, FakedPortManager) + assert re.match(r'/api/adapters/[^/]+/network-ports', + adapter1.ports.base_uri) def test_ports_list(self): """Test list() of FakedPortManager.""" @@ -1245,16 +1234,16 @@ class FakedPortTests(unittest.TestCase): # the function to be tested: ports = adapter1.ports.list() - self.assertEqual(len(ports), 1) + assert len(ports) == 1 port1 = ports[0] port1_out_props = self.port1_in_props.copy() port1_out_props.update({ 'element-id': port1.oid, 'element-uri': port1.uri, }) - self.assertIsInstance(port1, FakedPort) - self.assertEqual(port1.properties, port1_out_props) - self.assertEqual(port1.manager, adapter1.ports) + assert isinstance(port1, FakedPort) + assert port1.properties == port1_out_props + assert port1.manager == adapter1.ports def test_ports_add(self): """Test add() of FakedPortManager.""" @@ -1263,7 +1252,7 @@ class FakedPortTests(unittest.TestCase): adapters = cpc1.adapters.list() adapter1 = adapters[0] ports = adapter1.ports.list() - self.assertEqual(len(ports), 1) + assert len(ports) == 1 port2_in_props = {'name': 'port2'} @@ -1272,22 +1261,22 @@ class FakedPortTests(unittest.TestCase): port2_in_props) ports = adapter1.ports.list() - self.assertEqual(len(ports), 2) + assert len(ports) == 2 port2 = [p for p in ports if p.properties['name'] == port2_in_props['name']][0] - self.assertEqual(new_port.properties, port2.properties) - self.assertEqual(new_port.manager, port2.manager) + assert new_port.properties == port2.properties + assert new_port.manager == port2.manager port2_out_props = port2_in_props.copy() port2_out_props.update({ 'element-id': port2.oid, 'element-uri': port2.uri, }) - self.assertIsInstance(port2, FakedPort) - self.assertEqual(port2.properties, port2_out_props) - self.assertEqual(port2.manager, adapter1.ports) + assert isinstance(port2, FakedPort) + assert port2.properties == port2_out_props + assert port2.manager == adapter1.ports def test_ports_remove(self): """Test remove() of FakedPortManager.""" @@ -1297,23 +1286,23 @@ class FakedPortTests(unittest.TestCase): adapter1 = adapters[0] ports = adapter1.ports.list() port1 = ports[0] - self.assertEqual(len(ports), 1) + assert len(ports) == 1 # the function to be tested: adapter1.ports.remove(port1.oid) ports = adapter1.ports.list() - self.assertEqual(len(ports), 0) + assert len(ports) == 0 # TODO: Add testcases for updating 'network-port-uris' and # 'storage-port-uris' parent properties -class FakedVirtualFunctionTests(unittest.TestCase): +class TestFakedVirtualFunction(object): """All tests for the FakedVirtualFunctionManager and FakedVirtualFunction classes.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1_in_props = {'name': 'cpc1'} self.partition1_in_props = {'name': 'partition1'} @@ -1344,10 +1333,10 @@ class FakedVirtualFunctionTests(unittest.TestCase): partitions = cpc1.partitions.list() partition1 = partitions[0] - self.assertIsInstance(partition1.virtual_functions, - FakedVirtualFunctionManager) - self.assertRegexpMatches(partition1.virtual_functions.base_uri, - r'/api/partitions/[^/]+/virtual-functions') + assert isinstance(partition1.virtual_functions, + FakedVirtualFunctionManager) + assert re.match(r'/api/partitions/[^/]+/virtual-functions', + partition1.virtual_functions.base_uri) def test_virtual_functions_list(self): """Test list() of FakedVirtualFunctionManager.""" @@ -1359,7 +1348,7 @@ class FakedVirtualFunctionTests(unittest.TestCase): # the function to be tested: virtual_functions = partition1.virtual_functions.list() - self.assertEqual(len(virtual_functions), 1) + assert len(virtual_functions) == 1 virtual_function1 = virtual_functions[0] virtual_function1_out_props = self.virtual_function1_in_props.copy() virtual_function1_out_props.update({ @@ -1367,11 +1356,9 @@ class FakedVirtualFunctionTests(unittest.TestCase): 'element-uri': virtual_function1.uri, 'device-number': virtual_function1.properties['device-number'], }) - self.assertIsInstance(virtual_function1, FakedVirtualFunction) - self.assertEqual(virtual_function1.properties, - virtual_function1_out_props) - self.assertEqual(virtual_function1.manager, - partition1.virtual_functions) + assert isinstance(virtual_function1, FakedVirtualFunction) + assert virtual_function1.properties == virtual_function1_out_props + assert virtual_function1.manager == partition1.virtual_functions def test_virtual_functions_add(self): """Test add() of FakedVirtualFunctionManager.""" @@ -1380,7 +1367,7 @@ class FakedVirtualFunctionTests(unittest.TestCase): partitions = cpc1.partitions.list() partition1 = partitions[0] virtual_functions = partition1.virtual_functions.list() - self.assertEqual(len(virtual_functions), 1) + assert len(virtual_functions) == 1 virtual_function2_in_props = {'name': 'virtual_function2'} @@ -1389,16 +1376,14 @@ class FakedVirtualFunctionTests(unittest.TestCase): virtual_function2_in_props) virtual_functions = partition1.virtual_functions.list() - self.assertEqual(len(virtual_functions), 2) + assert len(virtual_functions) == 2 virtual_function2 = [vf for vf in virtual_functions if vf.properties['name'] == virtual_function2_in_props['name']][0] - self.assertEqual(new_virtual_function.properties, - virtual_function2.properties) - self.assertEqual(new_virtual_function.manager, - virtual_function2.manager) + assert new_virtual_function.properties == virtual_function2.properties + assert new_virtual_function.manager == virtual_function2.manager virtual_function2_out_props = virtual_function2_in_props.copy() virtual_function2_out_props.update({ @@ -1406,11 +1391,9 @@ class FakedVirtualFunctionTests(unittest.TestCase): 'element-uri': virtual_function2.uri, 'device-number': virtual_function2.properties['device-number'], }) - self.assertIsInstance(virtual_function2, FakedVirtualFunction) - self.assertEqual(virtual_function2.properties, - virtual_function2_out_props) - self.assertEqual(virtual_function2.manager, - partition1.virtual_functions) + assert isinstance(virtual_function2, FakedVirtualFunction) + assert virtual_function2.properties == virtual_function2_out_props + assert virtual_function2.manager == partition1.virtual_functions def test_virtual_functions_remove(self): """Test remove() of FakedVirtualFunctionManager.""" @@ -1420,22 +1403,22 @@ class FakedVirtualFunctionTests(unittest.TestCase): partition1 = partitions[0] virtual_functions = partition1.virtual_functions.list() virtual_function1 = virtual_functions[0] - self.assertEqual(len(virtual_functions), 1) + assert len(virtual_functions) == 1 # the function to be tested: partition1.virtual_functions.remove(virtual_function1.oid) virtual_functions = partition1.virtual_functions.list() - self.assertEqual(len(virtual_functions), 0) + assert len(virtual_functions) == 0 # TODO: Add testcases for updating 'virtual-function-uris' parent property -class FakedVirtualSwitchTests(unittest.TestCase): +class TestFakedVirtualSwitch(object): """All tests for the FakedVirtualSwitchManager and FakedVirtualSwitch classes.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1_in_props = {'name': 'cpc1'} self.virtual_switch1_in_props = {'name': 'virtual_switch1'} @@ -1457,9 +1440,9 @@ class FakedVirtualSwitchTests(unittest.TestCase): cpcs = self.hmc.cpcs.list() cpc1 = cpcs[0] - self.assertIsInstance(cpc1.virtual_switches, FakedVirtualSwitchManager) - self.assertRegexpMatches(cpc1.virtual_switches.base_uri, - r'/api/virtual-switches') + assert isinstance(cpc1.virtual_switches, FakedVirtualSwitchManager) + assert re.match(r'/api/virtual-switches', + cpc1.virtual_switches.base_uri) def test_virtual_switches_list(self): """Test list() of FakedVirtualSwitchManager.""" @@ -1469,7 +1452,7 @@ class FakedVirtualSwitchTests(unittest.TestCase): # the function to be tested: virtual_switches = cpc1.virtual_switches.list() - self.assertEqual(len(virtual_switches), 1) + assert len(virtual_switches) == 1 virtual_switch1 = virtual_switches[0] virtual_switch1_out_props = self.virtual_switch1_in_props.copy() virtual_switch1_out_props.update({ @@ -1477,16 +1460,16 @@ class FakedVirtualSwitchTests(unittest.TestCase): 'object-uri': virtual_switch1.uri, 'connected-vnic-uris': [], }) - self.assertIsInstance(virtual_switch1, FakedVirtualSwitch) - self.assertEqual(virtual_switch1.properties, virtual_switch1_out_props) - self.assertEqual(virtual_switch1.manager, cpc1.virtual_switches) + assert isinstance(virtual_switch1, FakedVirtualSwitch) + assert virtual_switch1.properties == virtual_switch1_out_props + assert virtual_switch1.manager == cpc1.virtual_switches def test_virtual_switches_add(self): """Test add() of FakedVirtualSwitchManager.""" cpcs = self.hmc.cpcs.list() cpc1 = cpcs[0] virtual_switches = cpc1.virtual_switches.list() - self.assertEqual(len(virtual_switches), 1) + assert len(virtual_switches) == 1 virtual_switch2_in_props = {'name': 'virtual_switch2'} @@ -1495,16 +1478,14 @@ class FakedVirtualSwitchTests(unittest.TestCase): virtual_switch2_in_props) virtual_switches = cpc1.virtual_switches.list() - self.assertEqual(len(virtual_switches), 2) + assert len(virtual_switches) == 2 virtual_switch2 = [p for p in virtual_switches if p.properties['name'] == virtual_switch2_in_props['name']][0] - self.assertEqual(new_virtual_switch.properties, - virtual_switch2.properties) - self.assertEqual(new_virtual_switch.manager, - virtual_switch2.manager) + assert new_virtual_switch.properties == virtual_switch2.properties + assert new_virtual_switch.manager == virtual_switch2.manager virtual_switch2_out_props = virtual_switch2_in_props.copy() virtual_switch2_out_props.update({ @@ -1512,9 +1493,9 @@ class FakedVirtualSwitchTests(unittest.TestCase): 'object-uri': virtual_switch2.uri, 'connected-vnic-uris': [], }) - self.assertIsInstance(virtual_switch2, FakedVirtualSwitch) - self.assertEqual(virtual_switch2.properties, virtual_switch2_out_props) - self.assertEqual(virtual_switch2.manager, cpc1.virtual_switches) + assert isinstance(virtual_switch2, FakedVirtualSwitch) + assert virtual_switch2.properties == virtual_switch2_out_props + assert virtual_switch2.manager == cpc1.virtual_switches def test_virtual_switches_remove(self): """Test remove() of FakedVirtualSwitchManager.""" @@ -1522,20 +1503,20 @@ class FakedVirtualSwitchTests(unittest.TestCase): cpc1 = cpcs[0] virtual_switches = cpc1.virtual_switches.list() virtual_switch1 = virtual_switches[0] - self.assertEqual(len(virtual_switches), 1) + assert len(virtual_switches) == 1 # the function to be tested: cpc1.virtual_switches.remove(virtual_switch1.oid) virtual_switches = cpc1.virtual_switches.list() - self.assertEqual(len(virtual_switches), 0) + assert len(virtual_switches) == 0 -class FakedMetricsContextTests(unittest.TestCase): +class TestFakedMetricsContext(object): """All tests for the FakedMetricsContextManager and FakedMetricsContext classes.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1_in_props = {'name': 'cpc1'} self.partition1_in_props = {'name': 'partition1'} @@ -1557,10 +1538,10 @@ class FakedMetricsContextTests(unittest.TestCase): """Test faked HMC 'metrics_contexts' attribute.""" faked_hmc = self.hmc - self.assertIsInstance(faked_hmc.metrics_contexts, - FakedMetricsContextManager) - self.assertRegexpMatches(faked_hmc.metrics_contexts.base_uri, - r'api/services/metrics/context') + assert isinstance(faked_hmc.metrics_contexts, + FakedMetricsContextManager) + assert re.match(r'/api/services/metrics/context', + faked_hmc.metrics_contexts.base_uri) def test_metrics_contexts_add(self): """Test add() of FakedMetricsContextManager.""" @@ -1574,16 +1555,15 @@ class FakedMetricsContextTests(unittest.TestCase): # the function to be tested: mc = faked_hmc.metrics_contexts.add(mc_in_props) - self.assertIsInstance(mc, FakedMetricsContext) - self.assertRegexpMatches(mc.uri, - r'api/services/metrics/context/[^/]+') - self.assertIs(mc.manager, faked_hmc.metrics_contexts) + assert isinstance(mc, FakedMetricsContext) + assert re.match(r'/api/services/metrics/context/[^/]+', mc.uri) + assert mc.manager is faked_hmc.metrics_contexts mc_props = mc_in_props.copy() mc_props.update({ 'fake-id': mc.oid, 'fake-uri': mc.uri, }) - self.assertEqual(mc.properties, mc_props) + assert mc.properties == mc_props def test_metrics_contexts_add_get_mg_def(self): """Test add_metric_group_definition(), get_metric_group_definition(), @@ -1603,38 +1583,36 @@ class FakedMetricsContextTests(unittest.TestCase): # Verify the initial M.G.Def names mg_def_names = mc_mgr.get_metric_group_definition_names() - self.assertEqual(list(mg_def_names), []) + assert list(mg_def_names) == [] # Verify that a M.G.Def can be added mc_mgr.add_metric_group_definition(mg_def_input) # Verify the M.G.Def names after having added one mg_def_names = mc_mgr.get_metric_group_definition_names() - self.assertEqual(list(mg_def_names), [mg_name]) + assert list(mg_def_names) == [mg_name] # Verify that it can be retrieved mg_def = mc_mgr.get_metric_group_definition(mg_name) - self.assertEqual(mg_def, mg_def_input) + assert mg_def == mg_def_input # Verify that retrieving a non-existing M.G.Def fails - with self.assertRaises(ValueError) as cm: + with pytest.raises(ValueError) as exc_info: mc_mgr.get_metric_group_definition('foo') - exc = cm.exception - self.assertRegexpMatches( - str(exc), - r"^A metric group definition with this name does not exist:.*") + exc = exc_info.value + assert re.match(r"^A metric group definition with this name does " + r"not exist:.*", str(exc)) # Verify that adding an M.G.Def with an existing name fails - with self.assertRaises(ValueError) as cm: + with pytest.raises(ValueError) as exc_info: mc_mgr.add_metric_group_definition(mg_def_input) - exc = cm.exception - self.assertRegexpMatches( - str(exc), - r"^A metric group definition with this name already exists:.*") + exc = exc_info.value + assert re.match(r"^A metric group definition with this name already " + r"exists:.*", str(exc)) # Verify that the M.G.Def names have not changed in these fails mg_def_names = mc_mgr.get_metric_group_definition_names() - self.assertEqual(list(mg_def_names), [mg_name]) + assert list(mg_def_names) == [mg_name] def test_metrics_contexts_add_get_metric_values(self): """Test add_metric_values(), get_metric_values(), and @@ -1663,26 +1641,25 @@ class FakedMetricsContextTests(unittest.TestCase): # Verify the initial M.O.Val group names mo_val_group_names = mc_mgr.get_metric_values_group_names() - self.assertEqual(list(mo_val_group_names), []) + assert list(mo_val_group_names) == [] # Verify that a first M.O.Val can be added mc_mgr.add_metric_values(mo_val_input) # Verify the M.O.Val group names after having added one mo_val_group_names = mc_mgr.get_metric_values_group_names() - self.assertEqual(list(mo_val_group_names), [mg_name]) + assert list(mo_val_group_names) == [mg_name] # Verify that the M.O.Vals can be retrieved and contain the first one mo_vals = mc_mgr.get_metric_values(mg_name) - self.assertEqual(list(mo_vals), [mo_val_input]) + assert list(mo_vals) == [mo_val_input] # Verify that retrieving a non-existing M.O.Val fails - with self.assertRaises(ValueError) as cm: + with pytest.raises(ValueError) as exc_info: mc_mgr.get_metric_values('foo') - exc = cm.exception - self.assertRegexpMatches( - str(exc), - r"^Metric values for this group name do not exist:.*") + exc = exc_info.value + assert re.match(r"^Metric values for this group name do not " + r"exist:.*", str(exc)) # Verify that a second M.O.Val can be added for the same group name mc_mgr.add_metric_values(mo_val2_input) @@ -1690,11 +1667,11 @@ class FakedMetricsContextTests(unittest.TestCase): # Verify the M.O.Val group names after having added a second M.O.Val # for the same group name -> still just one group name mo_val_group_names = mc_mgr.get_metric_values_group_names() - self.assertEqual(list(mo_val_group_names), [mg_name]) + assert list(mo_val_group_names) == [mg_name] # Verify that the M.O.Vals can be retrieved and contain both mo_vals = mc_mgr.get_metric_values(mg_name) - self.assertEqual(list(mo_vals), [mo_val_input, mo_val2_input]) + assert list(mo_vals) == [mo_val_input, mo_val2_input] def test_metrics_context_get_mg_defs(self): """Test get_metric_group_definitions() of FakedMetricsContext.""" @@ -1732,7 +1709,7 @@ class FakedMetricsContextTests(unittest.TestCase): mg_defs = mc.get_metric_group_definitions() # Verify the returned M.G.Defs - self.assertEqual(list(mg_defs), exp_mg_defs) + assert list(mg_defs) == exp_mg_defs # Test case where the default for M.G.Defs is tested mc_in_props = { @@ -1746,7 +1723,7 @@ class FakedMetricsContextTests(unittest.TestCase): mg_defs = mc.get_metric_group_definitions() # Verify the returned M.G.Defs - self.assertEqual(list(mg_defs), exp_mg_defs) + assert list(mg_defs) == exp_mg_defs def test_metrics_context_get_mg_infos(self): """Test get_metric_group_infos() of FakedMetricsContext.""" @@ -1810,7 +1787,7 @@ class FakedMetricsContextTests(unittest.TestCase): mg_infos = mc.get_metric_group_infos() # Verify the returned M.G.Defs - self.assertEqual(list(mg_infos), exp_mg_infos) + assert list(mg_infos) == exp_mg_infos # Test case where the default for M.G.Defs is tested mc_in_props = { @@ -1824,7 +1801,7 @@ class FakedMetricsContextTests(unittest.TestCase): mg_infos = mc.get_metric_group_infos() # Verify the returned M.G.Defs - self.assertEqual(list(mg_infos), exp_mg_infos) + assert list(mg_infos) == exp_mg_infos def test_metrics_context_get_m_values(self): """Test get_metric_values() of FakedMetricsContext.""" @@ -1871,10 +1848,10 @@ class FakedMetricsContextTests(unittest.TestCase): # the function to be tested: mv_list = mc.get_metric_values() - self.assertEqual(len(mv_list), 1) + assert len(mv_list) == 1 mv = mv_list[0] - self.assertEqual(mv[0], mg_name) - self.assertEqual(mv[1], exp_mo_vals) + assert mv[0] == mg_name + assert mv[1] == exp_mo_vals def test_metrics_context_get_m_values_response(self): """Test get_metric_values_response() of FakedMetricsContext.""" @@ -1959,14 +1936,13 @@ class FakedMetricsContextTests(unittest.TestCase): # the function to be tested: mv_resp = mc.get_metric_values_response() - self.assertEqual( - mv_resp, exp_mv_resp, - "Actual response string:\n{!r}\n" - "Expected response string:\n{!r}\n". - format(mv_resp, exp_mv_resp)) + assert mv_resp == exp_mv_resp, \ + "Actual response string:\n{!r}\n" \ + "Expected response string:\n{!r}\n". \ + format(mv_resp, exp_mv_resp) -class FakedMetricGroupDefinitionTests(unittest.TestCase): +class TestFakedMetricGroupDefinition(object): """All tests for the FakedMetricGroupDefinition class.""" def test_metric_group_definition_attr(self): @@ -1983,12 +1959,12 @@ class FakedMetricGroupDefinitionTests(unittest.TestCase): # the function to be tested: new_mgd = FakedMetricGroupDefinition(**in_kwargs) - self.assertEqual(new_mgd.name, in_kwargs['name']) - self.assertEqual(new_mgd.types, in_kwargs['types']) - self.assertIsNot(new_mgd.types, in_kwargs['types']) # was copied + assert new_mgd.name == in_kwargs['name'] + assert new_mgd.types == in_kwargs['types'] + assert new_mgd.types is not in_kwargs['types'] # was copied -class FakedMetricObjectValuesTests(unittest.TestCase): +class TestFakedMetricObjectValues(object): """All tests for the FakedMetricObjectValues class.""" def test_metric_object_values_attr(self): @@ -2007,12 +1983,8 @@ class FakedMetricObjectValuesTests(unittest.TestCase): # the function to be tested: new_mov = FakedMetricObjectValues(**in_kwargs) - self.assertEqual(new_mov.group_name, in_kwargs['group_name']) - self.assertEqual(new_mov.resource_uri, in_kwargs['resource_uri']) - self.assertEqual(new_mov.timestamp, in_kwargs['timestamp']) - self.assertEqual(new_mov.values, in_kwargs['values']) - self.assertIsNot(new_mov.values, in_kwargs['values']) # was copied - - -if __name__ == '__main__': - unittest.main() + assert new_mov.group_name == in_kwargs['group_name'] + assert new_mov.resource_uri == in_kwargs['resource_uri'] + assert new_mov.timestamp == in_kwargs['timestamp'] + assert new_mov.values == in_kwargs['values'] + assert new_mov.values is not in_kwargs['values'] # was copied diff --git a/tests/unit/zhmcclient_mock/test_idpool.py b/tests/unit/zhmcclient_mock/test_idpool.py old mode 100755 new mode 100644 index 9c84f2e..36b8917 --- a/tests/unit/zhmcclient_mock/test_idpool.py +++ b/tests/unit/zhmcclient_mock/test_idpool.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,30 +19,32 @@ Unit test cases for the _idpool module of the zhmcclient_mock package. from __future__ import absolute_import, print_function import requests.packages.urllib3 -import unittest +import pytest from zhmcclient_mock._idpool import IdPool +requests.packages.urllib3.disable_warnings() -class IdPoolTests(unittest.TestCase): + +class TestIdPool(object): """All tests for class IdPool.""" def test_init_error_1(self): - with self.assertRaises(ValueError): + with pytest.raises(ValueError): IdPool(7, 6) def test_invalid_free_error_1(self): pool = IdPool(5, 5) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): pool.free(4) # not in range - with self.assertRaises(ValueError): + with pytest.raises(ValueError): pool.free(5) # in range but not allocated - with self.assertRaises(ValueError): + with pytest.raises(ValueError): pool.free(6) # not in range def test_invalid_free_error_2(self): @@ -70,10 +71,10 @@ class IdPoolTests(unittest.TestCase): # Verify uniqueness of the ID values id_set = set(id_list) - self.assertEqual(len(id_set), len(id_list)) + assert len(id_set) == len(id_list) # Verify that the pool is exhausted - with self.assertRaises(ValueError): + with pytest.raises(ValueError): pool.alloc() def _test_free_for_lo_hi(self, lowest, highest): @@ -93,7 +94,7 @@ class IdPoolTests(unittest.TestCase): pool.free(id) # Verify that nothing is used in the pool - self.assertEqual(len(pool._used), 0) + assert len(pool._used) == 0 # Exhaust the pool id_list2 = [] @@ -102,10 +103,10 @@ class IdPoolTests(unittest.TestCase): id_list2.append(id) # Verify that the same ID values came back as last time - self.assertEqual(set(id_list1), set(id_list2)) + assert set(id_list1) == set(id_list2) # Verify that the pool is exhausted - with self.assertRaises(ValueError): + with pytest.raises(ValueError): pool.alloc() def _test_all_for_lo_hi(self, lowest, highest): @@ -147,8 +148,3 @@ class IdPoolTests(unittest.TestCase): self._test_all_for_lo_hi(11, 20) self._test_all_for_lo_hi(11, 21) self._test_all_for_lo_hi(11, 22) - - -if __name__ == '__main__': - requests.packages.urllib3.disable_warnings() - unittest.main() diff --git a/tests/unit/zhmcclient_mock/test_urihandler.py b/tests/unit/zhmcclient_mock/test_urihandler.py old mode 100755 new mode 100644 index 5903556..5631d3f --- a/tests/unit/zhmcclient_mock/test_urihandler.py +++ b/tests/unit/zhmcclient_mock/test_urihandler.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016-2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,9 +19,10 @@ Unit tests for _urihandler module of the zhmcclient_mock package. from __future__ import absolute_import, print_function import requests.packages.urllib3 -import unittest from datetime import datetime +# FIXME: Migrate mock to zhmcclient_mock from mock import MagicMock +import pytest from zhmcclient_mock._hmc import FakedHmc, FakedMetricGroupDefinition, \ FakedMetricObjectValues @@ -69,8 +69,10 @@ from zhmcclient_mock._urihandler import HTTPError, InvalidResourceError, \ ImageActProfilesHandler, ImageActProfileHandler, \ LoadActProfilesHandler, LoadActProfileHandler +requests.packages.urllib3.disable_warnings() -class HTTPErrorTests(unittest.TestCase): + +class TestHTTPError(object): """All tests for class HTTPError.""" def test_attributes(self): @@ -82,11 +84,11 @@ class HTTPErrorTests(unittest.TestCase): exc = HTTPError(method, uri, http_status, reason, message) - self.assertEqual(exc.method, method) - self.assertEqual(exc.uri, uri) - self.assertEqual(exc.http_status, http_status) - self.assertEqual(exc.reason, reason) - self.assertEqual(exc.message, message) + assert exc.method == method + assert exc.uri == uri + assert exc.http_status == http_status + assert exc.reason == reason + assert exc.message == message def test_response(self): method = 'GET' @@ -105,13 +107,13 @@ class HTTPErrorTests(unittest.TestCase): response = exc.response() - self.assertEqual(response, expected_response) + assert response == expected_response -class ConnectionErrorTests(unittest.TestCase): +class TestConnectionError(object): """All tests for class ConnectionError.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1 = self.hmc.cpcs.add({'name': 'cpc1'}) @@ -120,7 +122,7 @@ class ConnectionErrorTests(unittest.TestCase): exc = ConnectionError(msg) - self.assertEqual(exc.message, msg) + assert exc.message == msg class DummyHandler1(object): @@ -135,7 +137,7 @@ class DummyHandler3(object): pass -class InvalidResourceErrorTests(unittest.TestCase): +class TestInvalidResourceError(object): """All tests for class InvalidResourceError.""" def test_attributes_with_handler(self): @@ -146,11 +148,11 @@ class InvalidResourceErrorTests(unittest.TestCase): exc = InvalidResourceError(method, uri, DummyHandler1) - self.assertEqual(exc.method, method) - self.assertEqual(exc.uri, uri) - self.assertEqual(exc.http_status, exp_http_status) - self.assertEqual(exc.reason, exp_reason) - self.assertIn(uri, exc.message) + assert exc.method == method + assert exc.uri == uri + assert exc.http_status == exp_http_status + assert exc.reason == exp_reason + assert uri in exc.message # next test case exp_reason = 2 @@ -158,7 +160,7 @@ class InvalidResourceErrorTests(unittest.TestCase): exc = InvalidResourceError(method, uri, DummyHandler1, reason=exp_reason) - self.assertEqual(exc.reason, exp_reason) + assert exc.reason == exp_reason # next test case exp_resource_uri = '/api/resource' @@ -166,7 +168,7 @@ class InvalidResourceErrorTests(unittest.TestCase): exc = InvalidResourceError(method, uri, DummyHandler1, resource_uri=exp_resource_uri) - self.assertIn(exp_resource_uri, exc.message) + assert exp_resource_uri in exc.message def test_attributes_no_handler(self): method = 'GET' @@ -176,13 +178,13 @@ class InvalidResourceErrorTests(unittest.TestCase): exc = InvalidResourceError(method, uri, None) - self.assertEqual(exc.method, method) - self.assertEqual(exc.uri, uri) - self.assertEqual(exc.http_status, exp_http_status) - self.assertEqual(exc.reason, exp_reason) + assert exc.method == method + assert exc.uri == uri + assert exc.http_status == exp_http_status + assert exc.reason == exp_reason -class InvalidMethodErrorTests(unittest.TestCase): +class TestInvalidMethodError(object): """All tests for class InvalidMethodError.""" def test_attributes_with_handler(self): @@ -193,10 +195,10 @@ class InvalidMethodErrorTests(unittest.TestCase): exc = InvalidMethodError(method, uri, DummyHandler1) - self.assertEqual(exc.method, method) - self.assertEqual(exc.uri, uri) - self.assertEqual(exc.http_status, exp_http_status) - self.assertEqual(exc.reason, exp_reason) + assert exc.method == method + assert exc.uri == uri + assert exc.http_status == exp_http_status + assert exc.reason == exp_reason def test_attributes_no_handler(self): method = 'DELETE' @@ -206,16 +208,16 @@ class InvalidMethodErrorTests(unittest.TestCase): exc = InvalidMethodError(method, uri, None) - self.assertEqual(exc.method, method) - self.assertEqual(exc.uri, uri) - self.assertEqual(exc.http_status, exp_http_status) - self.assertEqual(exc.reason, exp_reason) + assert exc.method == method + assert exc.uri == uri + assert exc.http_status == exp_http_status + assert exc.reason == exp_reason -class CpcNotInDpmErrorTests(unittest.TestCase): +class TestCpcNotInDpmError(object): """All tests for class CpcNotInDpmError.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1 = self.hmc.cpcs.add({'name': 'cpc1'}) @@ -227,16 +229,16 @@ class CpcNotInDpmErrorTests(unittest.TestCase): exc = CpcNotInDpmError(method, uri, self.cpc1) - self.assertEqual(exc.method, method) - self.assertEqual(exc.uri, uri) - self.assertEqual(exc.http_status, exp_http_status) - self.assertEqual(exc.reason, exp_reason) + assert exc.method == method + assert exc.uri == uri + assert exc.http_status == exp_http_status + assert exc.reason == exp_reason -class CpcInDpmErrorTests(unittest.TestCase): +class TestCpcInDpmError(object): """All tests for class CpcInDpmError.""" - def setUp(self): + def setup_method(self): self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') self.cpc1 = self.hmc.cpcs.add({'name': 'cpc1'}) @@ -248,129 +250,129 @@ class CpcInDpmErrorTests(unittest.TestCase): exc = CpcInDpmError(method, uri, self.cpc1) - self.assertEqual(exc.method, method) - self.assertEqual(exc.uri, uri) - self.assertEqual(exc.http_status, exp_http_status) - self.assertEqual(exc.reason, exp_reason) + assert exc.method == method + assert exc.uri == uri + assert exc.http_status == exp_http_status + assert exc.reason == exp_reason -class ParseQueryParmsTests(unittest.TestCase): +class TestParseQueryParms(object): """All tests for parse_query_parms().""" def test_none(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', None) - self.assertIsNone(filter_args) + assert filter_args is None def test_empty(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', '') - self.assertIsNone(filter_args) + assert filter_args is None def test_one_normal(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', 'a=b') - self.assertEqual(filter_args, {'a': 'b'}) + assert filter_args == {'a': 'b'} def test_two_normal(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', 'a=b&c=d') - self.assertEqual(filter_args, {'a': 'b', 'c': 'd'}) + assert filter_args == {'a': 'b', 'c': 'd'} def test_one_trailing_amp(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', 'a=b&') - self.assertEqual(filter_args, {'a': 'b'}) + assert filter_args == {'a': 'b'} def test_one_leading_amp(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', '&a=b') - self.assertEqual(filter_args, {'a': 'b'}) + assert filter_args == {'a': 'b'} def test_one_missing_value(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', 'a=') - self.assertEqual(filter_args, {'a': ''}) + assert filter_args == {'a': ''} def test_one_missing_name(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', '=b') - self.assertEqual(filter_args, {'': 'b'}) + assert filter_args == {'': 'b'} def test_two_same_normal(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', 'a=b&a=c') - self.assertEqual(filter_args, {'a': ['b', 'c']}) + assert filter_args == {'a': ['b', 'c']} def test_two_same_one_normal(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', 'a=b&d=e&a=c') - self.assertEqual(filter_args, {'a': ['b', 'c'], 'd': 'e'}) + assert filter_args == {'a': ['b', 'c'], 'd': 'e'} def test_space_value_1(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', 'a=b%20c') - self.assertEqual(filter_args, {'a': 'b c'}) + assert filter_args == {'a': 'b c'} def test_space_value_2(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', 'a=%20c') - self.assertEqual(filter_args, {'a': ' c'}) + assert filter_args == {'a': ' c'} def test_space_value_3(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', 'a=b%20') - self.assertEqual(filter_args, {'a': 'b '}) + assert filter_args == {'a': 'b '} def test_space_value_4(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', 'a=%20') - self.assertEqual(filter_args, {'a': ' '}) + assert filter_args == {'a': ' '} def test_space_name_1(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', 'a%20b=c') - self.assertEqual(filter_args, {'a b': 'c'}) + assert filter_args == {'a b': 'c'} def test_space_name_2(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', '%20b=c') - self.assertEqual(filter_args, {' b': 'c'}) + assert filter_args == {' b': 'c'} def test_space_name_3(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', 'a%20=c') - self.assertEqual(filter_args, {'a ': 'c'}) + assert filter_args == {'a ': 'c'} def test_space_name_4(self): filter_args = parse_query_parms('fake-meth', 'fake-uri', '%20=c') - self.assertEqual(filter_args, {' ': 'c'}) + assert filter_args == {' ': 'c'} def test_invalid_format_1(self): - with self.assertRaises(HTTPError) as cm: + with pytest.raises(HTTPError) as exc_info: parse_query_parms('fake-meth', 'fake-uri', 'a==b') - exc = cm.exception - self.assertEqual(exc.http_status, 400) - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.http_status == 400 + assert exc.reason == 1 def test_invalid_format_2(self): - with self.assertRaises(HTTPError) as cm: + with pytest.raises(HTTPError) as exc_info: parse_query_parms('fake-meth', 'fake-uri', 'a=b=c') - exc = cm.exception - self.assertEqual(exc.http_status, 400) - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.http_status == 400 + assert exc.reason == 1 def test_invalid_format_3(self): - with self.assertRaises(HTTPError) as cm: + with pytest.raises(HTTPError) as exc_info: parse_query_parms('fake-meth', 'fake-uri', 'a') - exc = cm.exception - self.assertEqual(exc.http_status, 400) - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.http_status == 400 + assert exc.reason == 1 -class UriHandlerHandlerEmptyTests(unittest.TestCase): +class TestUriHandlerHandlerEmpty(object): """All tests for UriHandler.handler() with empty URIs.""" - def setUp(self): + def setup_method(self): self.uris = () self.urihandler = UriHandler(self.uris) def test_uris_empty_1(self): - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.handler('/api/cpcs', 'GET') def test_uris_empty_2(self): - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.handler('', 'GET') -class UriHandlerHandlerSimpleTests(unittest.TestCase): +class TestUriHandlerHandlerSimple(object): """All tests for UriHandler.handler() with a simple set of URIs.""" - def setUp(self): + def setup_method(self): self.uris = ( (r'/api/cpcs', DummyHandler1), (r'/api/cpcs/([^/]+)', DummyHandler2), @@ -381,60 +383,60 @@ class UriHandlerHandlerSimpleTests(unittest.TestCase): def test_ok1(self): handler_class, uri_parms = self.urihandler.handler( '/api/cpcs', 'GET') - self.assertEqual(handler_class, DummyHandler1) - self.assertEqual(len(uri_parms), 0) + assert handler_class == DummyHandler1 + assert len(uri_parms) == 0 def test_ok2(self): handler_class, uri_parms = self.urihandler.handler( '/api/cpcs/fake-id1', 'GET') - self.assertEqual(handler_class, DummyHandler2) - self.assertEqual(len(uri_parms), 1) - self.assertEqual(uri_parms[0], 'fake-id1') + assert handler_class == DummyHandler2 + assert len(uri_parms) == 1 + assert uri_parms[0] == 'fake-id1' def test_ok3(self): handler_class, uri_parms = self.urihandler.handler( '/api/cpcs/fake-id1/child', 'GET') - self.assertEqual(handler_class, DummyHandler3) - self.assertEqual(len(uri_parms), 1) - self.assertEqual(uri_parms[0], 'fake-id1') + assert handler_class == DummyHandler3 + assert len(uri_parms) == 1 + assert uri_parms[0] == 'fake-id1' def test_err_begin_missing(self): - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.handler('api/cpcs', 'GET') def test_err_begin_extra(self): - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.handler('x/api/cpcs', 'GET') def test_err_end_missing(self): - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.handler('/api/cpc', 'GET') def test_err_end_extra(self): - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.handler('/api/cpcs_x', 'GET') def test_err_end_slash(self): - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.handler('/api/cpcs/', 'GET') def test_err_end2_slash(self): - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.handler('/api/cpcs/fake-id1/', 'GET') def test_err_end2_missing(self): - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.handler('/api/cpcs/fake-id1/chil', 'GET') def test_err_end2_extra(self): - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.handler('/api/cpcs/fake-id1/child_x', 'GET') -class UriHandlerMethodTests(unittest.TestCase): +class TestUriHandlerMethod(object): """All tests for get(), post(), delete() methods of class UriHandler.""" - def setUp(self): + def setup_method(self): self.uris = ( (r'/api/cpcs', DummyHandler1), (r'/api/cpcs/([^/]+)', DummyHandler2), @@ -468,7 +470,7 @@ class UriHandlerMethodTests(unittest.TestCase): self.urihandler = UriHandler(self.uris) self.hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') - def tearDown(self): + def teardown_method(self): delattr(DummyHandler1, 'get') delattr(DummyHandler1, 'post') delattr(DummyHandler2, 'get') @@ -479,48 +481,48 @@ class UriHandlerMethodTests(unittest.TestCase): # the function to be tested result = self.urihandler.get(self.hmc, '/api/cpcs', True) - self.assertEqual(result, self.cpcs) + assert result == self.cpcs DummyHandler1.get.assert_called_with( 'GET', self.hmc, '/api/cpcs', tuple(), True) - self.assertEqual(DummyHandler1.post.called, 0) - self.assertEqual(DummyHandler2.get.called, 0) - self.assertEqual(DummyHandler2.delete.called, 0) + assert DummyHandler1.post.called == 0 + assert DummyHandler2.get.called == 0 + assert DummyHandler2.delete.called == 0 def test_get_cpc1(self): # the function to be tested result = self.urihandler.get(self.hmc, '/api/cpcs/1', True) - self.assertEqual(result, self.cpc1) + assert result == self.cpc1 - self.assertEqual(DummyHandler1.get.called, 0) - self.assertEqual(DummyHandler1.post.called, 0) + assert DummyHandler1.get.called == 0 + assert DummyHandler1.post.called == 0 DummyHandler2.get.assert_called_with( 'GET', self.hmc, '/api/cpcs/1', tuple('1'), True) - self.assertEqual(DummyHandler2.delete.called, 0) + assert DummyHandler2.delete.called == 0 def test_post_cpcs(self): # the function to be tested result = self.urihandler.post(self.hmc, '/api/cpcs', {}, True, True) - self.assertEqual(result, self.new_cpc) + assert result == self.new_cpc - self.assertEqual(DummyHandler1.get.called, 0) + assert DummyHandler1.get.called == 0 DummyHandler1.post.assert_called_with( 'POST', self.hmc, '/api/cpcs', tuple(), {}, True, True) - self.assertEqual(DummyHandler2.get.called, 0) - self.assertEqual(DummyHandler2.delete.called, 0) + assert DummyHandler2.get.called == 0 + assert DummyHandler2.delete.called == 0 def test_delete_cpc2(self): # the function to be tested self.urihandler.delete(self.hmc, '/api/cpcs/2', True) - self.assertEqual(DummyHandler1.get.called, 0) - self.assertEqual(DummyHandler1.post.called, 0) - self.assertEqual(DummyHandler2.get.called, 0) + assert DummyHandler1.get.called == 0 + assert DummyHandler1.post.called == 0 + assert DummyHandler2.get.called == 0 DummyHandler2.delete.assert_called_with( 'DELETE', self.hmc, '/api/cpcs/2', tuple('2'), True) @@ -815,10 +817,10 @@ def standard_test_hmc(): return hmc, hmc_resources -class GenericGetPropertiesHandlerTests(unittest.TestCase): +class TestGenericGetPropertiesHandler(object): """All tests for class GenericGetPropertiesHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)', GenericGetPropertiesHandler), @@ -839,13 +841,13 @@ class GenericGetPropertiesHandlerTests(unittest.TestCase): 'description': 'CPC #1 (classic mode)', 'status': 'operating', } - self.assertEqual(cpc1, exp_cpc1) + assert cpc1 == exp_cpc1 def test_get_error_offline(self): self.hmc.disable() - with self.assertRaises(ConnectionError): + with pytest.raises(ConnectionError): # the function to be tested: self.urihandler.get(self.hmc, '/api/cpcs/1', True) @@ -855,10 +857,10 @@ class _GenericGetUpdatePropertiesHandler(GenericGetPropertiesHandler, pass -class GenericUpdatePropertiesHandlerTests(unittest.TestCase): +class TestGenericUpdatePropertiesHandler(object): """All tests for class GenericUpdatePropertiesHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)', _GenericGetUpdatePropertiesHandler), @@ -874,9 +876,9 @@ class GenericUpdatePropertiesHandlerTests(unittest.TestCase): resp = self.urihandler.post(self.hmc, '/api/cpcs/1', update_cpc1, True, True) - self.assertEqual(resp, None) + assert resp is None cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True) - self.assertEqual(cpc1['description'], 'CPC #1 (updated)') + assert cpc1['description'] == 'CPC #1 (updated)' def test_post_error_offline(self): @@ -886,16 +888,16 @@ class GenericUpdatePropertiesHandlerTests(unittest.TestCase): 'description': 'CPC #1 (updated)', } - with self.assertRaises(ConnectionError): + with pytest.raises(ConnectionError): # the function to be tested: self.urihandler.post(self.hmc, '/api/cpcs/1', update_cpc1, True, True) -class GenericDeleteHandlerTests(unittest.TestCase): +class TestGenericDeleteHandler(object): """All tests for class GenericDeleteHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( ('/api/console/ldap-server-definitions/([^/]+)', @@ -910,10 +912,10 @@ class GenericDeleteHandlerTests(unittest.TestCase): # the function to be tested: ret = self.urihandler.delete(self.hmc, uri, True) - self.assertIsNone(ret) + assert ret is None # Verify it no longer exists: - with self.assertRaises(KeyError): + with pytest.raises(KeyError): self.hmc.lookup_by_uri(uri) def test_delete_error_offline(self): @@ -922,15 +924,15 @@ class GenericDeleteHandlerTests(unittest.TestCase): uri = '/api/console/ldap-server-definitions/fake-ldap-srv-def-oid-1' - with self.assertRaises(ConnectionError): + with pytest.raises(ConnectionError): # the function to be tested: self.urihandler.delete(self.hmc, uri, True) -class VersionHandlerTests(unittest.TestCase): +class TestVersionHandler(object): """All tests for class VersionHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/version', VersionHandler), @@ -949,13 +951,13 @@ class VersionHandlerTests(unittest.TestCase): 'api-major-version': int(api_major), 'api-minor-version': int(api_minor), } - self.assertEqual(resp, exp_resp) + assert resp == exp_resp -class ConsoleHandlerTests(unittest.TestCase): +class TestConsoleHandler(object): """All tests for class ConsoleHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -975,13 +977,13 @@ class ConsoleHandlerTests(unittest.TestCase): 'object-uri': '/api/console', 'name': 'fake_console_name', } - self.assertEqual(console, exp_console) + assert console == exp_console -class ConsoleRestartHandlerTests(unittest.TestCase): +class TestConsoleRestartHandler(object): """All tests for class ConsoleRestartHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -999,8 +1001,8 @@ class ConsoleRestartHandlerTests(unittest.TestCase): resp = self.urihandler.post( self.hmc, '/api/console/operations/restart', body, True, True) - self.assertTrue(self.hmc.enabled) - self.assertIsNone(resp) + assert self.hmc.enabled + assert resp is None def test_restart_error_not_found(self): @@ -1010,20 +1012,20 @@ class ConsoleRestartHandlerTests(unittest.TestCase): body = { 'force': False, } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post( self.hmc, '/api/console/operations/restart', body, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 -class ConsoleShutdownHandlerTests(unittest.TestCase): +class TestConsoleShutdownHandler(object): """All tests for class ConsoleShutdownHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -1041,8 +1043,8 @@ class ConsoleShutdownHandlerTests(unittest.TestCase): resp = self.urihandler.post( self.hmc, '/api/console/operations/shutdown', body, True, True) - self.assertFalse(self.hmc.enabled) - self.assertIsNone(resp) + assert not self.hmc.enabled + assert resp is None def test_shutdown_error_not_found(self): @@ -1052,20 +1054,20 @@ class ConsoleShutdownHandlerTests(unittest.TestCase): body = { 'force': False, } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post( self.hmc, '/api/console/operations/shutdown', body, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 -class ConsoleMakePrimaryHandlerTests(unittest.TestCase): +class TestConsoleMakePrimaryHandler(object): """All tests for class ConsoleMakePrimaryHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -1081,8 +1083,8 @@ class ConsoleMakePrimaryHandlerTests(unittest.TestCase): resp = self.urihandler.post( self.hmc, '/api/console/operations/make-primary', None, True, True) - self.assertTrue(self.hmc.enabled) - self.assertIsNone(resp) + assert self.hmc.enabled + assert resp is None def test_make_primary_error_not_found(self): @@ -1092,21 +1094,21 @@ class ConsoleMakePrimaryHandlerTests(unittest.TestCase): body = { 'force': False, } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post( self.hmc, '/api/console/operations/make-primary', body, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 -class ConsoleReorderUserPatternsHandlerTests(unittest.TestCase): +class TestConsoleReorderUserPatternsHandler(object): """All tests for class ConsoleReorderUserPatternsHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() # Remove the standard User Pattern objects for this test @@ -1172,7 +1174,7 @@ class ConsoleReorderUserPatternsHandlerTests(unittest.TestCase): act_uris = [up['element-uri'] for up in act_user_patterns] # Verify that the actual order is the new (expected) order: - self.assertEqual(act_uris, new_uris) + assert act_uris == new_uris def test_reorder_error_not_found(self): @@ -1182,7 +1184,7 @@ class ConsoleReorderUserPatternsHandlerTests(unittest.TestCase): body = { 'user-pattern-uris': [] } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: @@ -1190,14 +1192,14 @@ class ConsoleReorderUserPatternsHandlerTests(unittest.TestCase): self.hmc, '/api/console/operations/reorder-user-patterns', body, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 -class ConsoleGetAuditLogHandlerTests(unittest.TestCase): +class TestConsoleGetAuditLogHandler(object): """All tests for class ConsoleGetAuditLogHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -1214,7 +1216,7 @@ class ConsoleGetAuditLogHandlerTests(unittest.TestCase): self.hmc, '/api/console/operations/get-audit-log', None, True, True) - self.assertEqual(resp, []) + assert resp == [] # TODO: Add testcases with non-empty audit log (once supported in mock) @@ -1223,21 +1225,21 @@ class ConsoleGetAuditLogHandlerTests(unittest.TestCase): # Remove the faked Console object self.hmc.consoles.remove(None) - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post( self.hmc, '/api/console/operations/get-audit-log', None, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 -class ConsoleGetSecurityLogHandlerTests(unittest.TestCase): +class TestConsoleGetSecurityLogHandler(object): """All tests for class ConsoleGetSecurityLogHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -1254,7 +1256,7 @@ class ConsoleGetSecurityLogHandlerTests(unittest.TestCase): self.hmc, '/api/console/operations/get-security-log', None, True, True) - self.assertEqual(resp, []) + assert resp == [] # TODO: Add testcases with non-empty security log (once supported in mock) @@ -1263,21 +1265,21 @@ class ConsoleGetSecurityLogHandlerTests(unittest.TestCase): # Remove the faked Console object self.hmc.consoles.remove(None) - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post( self.hmc, '/api/console/operations/get-security-log', None, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 -class ConsoleListUnmanagedCpcsHandlerTests(unittest.TestCase): +class TestConsoleListUnmanagedCpcsHandler(object): """All tests for class ConsoleListUnmanagedCpcsHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -1294,7 +1296,7 @@ class ConsoleListUnmanagedCpcsHandlerTests(unittest.TestCase): self.hmc, '/api/console/operations/list-unmanaged-cpcs', True) cpcs = resp['cpcs'] - self.assertEqual(cpcs, []) + assert cpcs == [] # TODO: Add testcases for non-empty list of unmanaged CPCs @@ -1303,20 +1305,20 @@ class ConsoleListUnmanagedCpcsHandlerTests(unittest.TestCase): # Remove the faked Console object self.hmc.consoles.remove(None) - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.get( self.hmc, '/api/console/operations/list-unmanaged-cpcs', True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 -class UserHandlersTests(unittest.TestCase): +class TestUserHandlers(object): """All tests for classes UsersHandler and UserHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -1339,20 +1341,20 @@ class UserHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(users, exp_users) + assert users == exp_users def test_list_error_console_not_found(self): # Remove the faked Console object self.hmc.consoles.remove(None) - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.get(self.hmc, '/api/console/users', True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_get(self): @@ -1367,7 +1369,7 @@ class UserHandlersTests(unittest.TestCase): 'description': 'User #1', 'type': 'system-defined', } - self.assertEqual(user1, exp_user1) + assert user1 == exp_user1 def test_create_verify(self): new_user2 = { @@ -1382,10 +1384,10 @@ class UserHandlersTests(unittest.TestCase): resp = self.urihandler.post(self.hmc, '/api/console/users', new_user2, True, True) - self.assertEqual(len(resp), 1) - self.assertIn('object-uri', resp) + assert len(resp) == 1 + assert 'object-uri' in resp new_user2_uri = resp['object-uri'] - self.assertEqual(new_user2_uri, '/api/users/2') + assert new_user2_uri == '/api/users/2' exp_user2 = { 'object-id': '2', @@ -1399,7 +1401,7 @@ class UserHandlersTests(unittest.TestCase): # the function to be tested: user2 = self.urihandler.get(self.hmc, '/api/users/2', True) - self.assertEqual(user2, exp_user2) + assert user2 == exp_user2 def test_create_error_console_not_found(self): @@ -1414,14 +1416,14 @@ class UserHandlersTests(unittest.TestCase): 'authentication-type': 'local', } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, '/api/console/users', new_user2, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_update_verify(self): update_user1 = { @@ -1434,7 +1436,7 @@ class UserHandlersTests(unittest.TestCase): user1 = self.urihandler.get(self.hmc, '/api/users/fake-user-oid-1', True) - self.assertEqual(user1['description'], 'updated user #1') + assert user1['description'] == 'updated user #1' def test_delete_verify_all(self): testcases = [ @@ -1478,12 +1480,12 @@ class UserHandlersTests(unittest.TestCase): if exp_exc_tuple is not None: - with self.assertRaises(HTTPError) as cm: + with pytest.raises(HTTPError) as exc_info: # Execute the code to be tested self.urihandler.delete(self.hmc, user_uri, True) - exc = cm.exception + exc = exc_info.value assert exc.http_status == exp_exc_tuple[0] assert exc.reason == exp_exc_tuple[1] @@ -1496,14 +1498,14 @@ class UserHandlersTests(unittest.TestCase): self.urihandler.delete(self.hmc, user_uri, True) # Verify that it has been deleted - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.get(self.hmc, user_uri, True) -class UserAddUserRoleHandlerTests(unittest.TestCase): +class TestUserAddUserRoleHandler(object): """All tests for class UserAddUserRoleHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() # Has a system-defined User (oid=fake-user-oid-1) # Has a system-defined User Role (oid=fake-user-role-oid-1) @@ -1553,13 +1555,13 @@ class UserAddUserRoleHandlerTests(unittest.TestCase): # the function to be tested: resp = self.urihandler.post(self.hmc, uri, input_parms, True, True) - self.assertIsNone(resp) + assert resp is None user2_props = self.urihandler.get(self.hmc, self.user2_uri, True) - self.assertTrue('user-roles' in user2_props) + assert 'user-roles' in user2_props user_roles = user2_props['user-roles'] - self.assertEqual(len(user_roles), 1) + assert len(user_roles) == 1 user_role_uri = user_roles[0] - self.assertEqual(user_role_uri, self.user_role2_uri) + assert user_role_uri == self.user_role2_uri def test_add_error_bad_user(self): """Test failed addition of a user role to a bad user.""" @@ -1581,13 +1583,13 @@ class UserAddUserRoleHandlerTests(unittest.TestCase): input_parms = { 'user-role-uri': self.user_role2_uri } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, uri, input_parms, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 # TODO: Add testcase for adding to system-defined or pattern-based user @@ -1613,19 +1615,19 @@ class UserAddUserRoleHandlerTests(unittest.TestCase): input_parms = { 'user-role-uri': bad_user_role_uri } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, uri, input_parms, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 2) + exc = exc_info.value + assert exc.reason == 2 -class UserRemoveUserRoleHandlerTests(unittest.TestCase): +class TestUserRemoveUserRoleHandler(object): """All tests for class UserRemoveUserRoleHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() # Has a system-defined User (oid=fake-user-oid-1) # Has a system-defined User Role (oid=fake-user-role-oid-1) @@ -1684,11 +1686,11 @@ class UserRemoveUserRoleHandlerTests(unittest.TestCase): # the function to be tested: resp = self.urihandler.post(self.hmc, uri, input_parms, True, True) - self.assertIsNone(resp) + assert resp is None user2_props = self.urihandler.get(self.hmc, self.user2_uri, True) - self.assertTrue('user-roles' in user2_props) + assert 'user-roles' in user2_props user_roles = user2_props['user-roles'] - self.assertEqual(len(user_roles), 0) + assert len(user_roles) == 0 def test_remove_error_bad_user(self): """Test failed removal of a user role from a bad user.""" @@ -1710,13 +1712,13 @@ class UserRemoveUserRoleHandlerTests(unittest.TestCase): input_parms = { 'user-role-uri': self.user_role2_uri } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, uri, input_parms, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 # TODO: Add testcase for removing from system-defined or pattern-based user @@ -1742,13 +1744,13 @@ class UserRemoveUserRoleHandlerTests(unittest.TestCase): input_parms = { 'user-role-uri': bad_user_role_uri } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, uri, input_parms, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 2) + exc = exc_info.value + assert exc.reason == 2 def test_remove_error_no_user_role(self): """Test failed removal of a user role that a user does not have.""" @@ -1783,19 +1785,19 @@ class UserRemoveUserRoleHandlerTests(unittest.TestCase): input_parms = { 'user-role-uri': self.user_role2_uri } - with self.assertRaises(ConflictError) as cm: + with pytest.raises(ConflictError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, uri, input_parms, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 316) + exc = exc_info.value + assert exc.reason == 316 -class UserRoleHandlersTests(unittest.TestCase): +class TestUserRoleHandlers(object): """All tests for classes UserRolesHandler and UserRoleHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -1819,20 +1821,20 @@ class UserRoleHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(user_roles, exp_user_roles) + assert user_roles == exp_user_roles def test_list_error_console_not_found(self): # Remove the faked Console object self.hmc.consoles.remove(None) - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.get(self.hmc, '/api/console/user-roles', True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_get(self): @@ -1847,7 +1849,7 @@ class UserRoleHandlersTests(unittest.TestCase): 'description': 'User Role #1', 'type': 'system-defined', } - self.assertEqual(user_role1, exp_user_role1) + assert user_role1 == exp_user_role1 def test_create_verify(self): new_user_role2 = { @@ -1859,14 +1861,14 @@ class UserRoleHandlersTests(unittest.TestCase): resp = self.urihandler.post( self.hmc, '/api/console/user-roles', new_user_role2, True, True) - self.assertEqual(len(resp), 1) - self.assertIn('object-uri', resp) + assert len(resp) == 1 + assert 'object-uri' in resp new_user_role2_uri = resp['object-uri'] # the function to be tested: user_role2 = self.urihandler.get(self.hmc, new_user_role2_uri, True) - self.assertEqual(user_role2['type'], 'user-defined') + assert user_role2['type'] == 'user-defined' def test_create_error_console_not_found(self): @@ -1878,14 +1880,14 @@ class UserRoleHandlersTests(unittest.TestCase): 'description': 'User Role #2', } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, '/api/console/user-roles', new_user_role2, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_create_error_type(self): @@ -1895,14 +1897,14 @@ class UserRoleHandlersTests(unittest.TestCase): 'type': 'user-defined', # error: type is implied } - with self.assertRaises(BadRequestError) as cm: + with pytest.raises(BadRequestError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, '/api/console/user-roles', new_user_role2, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 6) + exc = exc_info.value + assert exc.reason == 6 def test_update_verify(self): update_user_role1 = { @@ -1916,7 +1918,7 @@ class UserRoleHandlersTests(unittest.TestCase): user_role1 = self.urihandler.get( self.hmc, '/api/user-roles/fake-user-role-oid-1', True) - self.assertEqual(user_role1['description'], 'updated user #1') + assert user_role1['description'] == 'updated user #1' def test_delete_verify(self): @@ -1938,14 +1940,14 @@ class UserRoleHandlersTests(unittest.TestCase): self.urihandler.delete(self.hmc, new_user_role2_uri, True) # Verify that it has been deleted - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.get(self.hmc, new_user_role2_uri, True) -class UserRoleAddPermissionHandlerTests(unittest.TestCase): +class TestUserRoleAddPermissionHandler(object): """All tests for class UserRoleAddPermissionHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() # Has a system-defined User Role (oid=fake-user-role-oid-1) @@ -2010,13 +2012,13 @@ class UserRoleAddPermissionHandlerTests(unittest.TestCase): resp = self.urihandler.post( self.hmc, uri, input_permission, True, True) - self.assertIsNone(resp) + assert resp is None props = self.urihandler.get(self.hmc, self.user_role2_uri, True) - self.assertTrue('permissions' in props) + assert 'permissions' in props permissions = props['permissions'] - self.assertEqual(len(permissions), 1) + assert len(permissions) == 1 perm = permissions[0] - self.assertEqual(perm, exp_permission) + assert perm == exp_permission def test_add_error_bad_user_role(self): """Test failed addition of a permission to a bad User Role.""" @@ -2030,13 +2032,13 @@ class UserRoleAddPermissionHandlerTests(unittest.TestCase): 'include-members': True, 'view-only-mode': False, } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, uri, input_parms, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_add_error_system_user_role(self): """Test failed addition of a permission to a system-defined User @@ -2051,19 +2053,19 @@ class UserRoleAddPermissionHandlerTests(unittest.TestCase): 'include-members': True, 'view-only-mode': False, } - with self.assertRaises(BadRequestError) as cm: + with pytest.raises(BadRequestError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, uri, input_parms, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 314) + exc = exc_info.value + assert exc.reason == 314 -class UserRoleRemovePermissionHandlerTests(unittest.TestCase): +class TestUserRoleRemovePermissionHandler(object): """All tests for class UserRoleRemovePermissionHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() # Has a system-defined User Role (oid=fake-user-role-oid-1) @@ -2135,11 +2137,11 @@ class UserRoleRemovePermissionHandlerTests(unittest.TestCase): resp = self.urihandler.post( self.hmc, uri, input_permission, True, True) - self.assertIsNone(resp) + assert resp is None props = self.urihandler.get(self.hmc, self.user_role2_uri, True) - self.assertTrue('permissions' in props) + assert 'permissions' in props permissions = props['permissions'] - self.assertEqual(len(permissions), 0) + assert len(permissions) == 0 def test_remove_error_bad_user_role(self): """Test failed removal of a permission from a bad User Role.""" @@ -2153,13 +2155,13 @@ class UserRoleRemovePermissionHandlerTests(unittest.TestCase): 'include-members': True, 'view-only-mode': False, } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, uri, input_parms, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_remove_error_system_user_role(self): """Test failed removal of a permission from a system-defined User @@ -2174,19 +2176,19 @@ class UserRoleRemovePermissionHandlerTests(unittest.TestCase): 'include-members': True, 'view-only-mode': False, } - with self.assertRaises(BadRequestError) as cm: + with pytest.raises(BadRequestError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, uri, input_parms, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 314) + exc = exc_info.value + assert exc.reason == 314 -class TaskHandlersTests(unittest.TestCase): +class TestTaskHandlers(object): """All tests for classes TasksHandler and TaskHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -2212,20 +2214,20 @@ class TaskHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(tasks, exp_tasks) + assert tasks == exp_tasks def test_list_error_console_not_found(self): # Remove the faked Console object self.hmc.consoles.remove(None) - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.get(self.hmc, '/api/console/tasks', True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_get(self): @@ -2239,13 +2241,13 @@ class TaskHandlersTests(unittest.TestCase): 'name': 'fake_task_name_1', 'description': 'Task #1', } - self.assertEqual(task1, exp_task1) + assert task1 == exp_task1 -class UserPatternHandlersTests(unittest.TestCase): +class TestUserPatternHandlers(object): """All tests for classes UserPatternsHandler and UserPatternHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -2270,20 +2272,20 @@ class UserPatternHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(user_patterns, exp_user_patterns) + assert user_patterns == exp_user_patterns def test_list_error_console_not_found(self): # Remove the faked Console object self.hmc.consoles.remove(None) - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.get(self.hmc, '/api/console/user-patterns', True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_get(self): @@ -2303,7 +2305,7 @@ class UserPatternHandlersTests(unittest.TestCase): 'retention-time': 0, 'user-template-uri': '/api/users/fake-user-oid-1', } - self.assertEqual(user_pattern1, exp_user_pattern1) + assert user_pattern1 == exp_user_pattern1 def test_create_verify(self): new_user_pattern_input = { @@ -2320,8 +2322,8 @@ class UserPatternHandlersTests(unittest.TestCase): self.hmc, '/api/console/user-patterns', new_user_pattern_input, True, True) - self.assertEqual(len(resp), 1) - self.assertIn('element-uri', resp) + assert len(resp) == 1 + assert 'element-uri' in resp new_user_pattern_uri = resp['element-uri'] # the function to be tested: @@ -2330,7 +2332,7 @@ class UserPatternHandlersTests(unittest.TestCase): new_name = new_user_pattern['name'] input_name = new_user_pattern_input['name'] - self.assertEqual(new_name, input_name) + assert new_name == input_name def test_create_error_console_not_found(self): @@ -2342,14 +2344,14 @@ class UserPatternHandlersTests(unittest.TestCase): 'description': 'User Pattern #X', } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, '/api/console/user-patterns', new_user_pattern_input, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_update_verify(self): update_user_pattern1 = { @@ -2364,8 +2366,7 @@ class UserPatternHandlersTests(unittest.TestCase): user_pattern1 = self.urihandler.get( self.hmc, '/api/console/user-patterns/fake-user-pattern-oid-1', True) - self.assertEqual(user_pattern1['description'], - 'updated user pattern #1') + assert user_pattern1['description'] == 'updated user pattern #1' def test_delete_verify(self): @@ -2392,14 +2393,14 @@ class UserPatternHandlersTests(unittest.TestCase): self.urihandler.delete(self.hmc, new_user_pattern_uri, True) # Verify that it has been deleted - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.get(self.hmc, new_user_pattern_uri, True) -class PasswordRuleHandlersTests(unittest.TestCase): +class TestPasswordRuleHandlers(object): """All tests for classes PasswordRulesHandler and PasswordRuleHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -2424,20 +2425,20 @@ class PasswordRuleHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(password_rules, exp_password_rules) + assert password_rules == exp_password_rules def test_list_error_console_not_found(self): # Remove the faked Console object self.hmc.consoles.remove(None) - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.get(self.hmc, '/api/console/password-rules', True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_get(self): @@ -2454,7 +2455,7 @@ class PasswordRuleHandlersTests(unittest.TestCase): 'description': 'Password Rule #1', 'type': 'system-defined', } - self.assertEqual(password_rule1, exp_password_rule1) + assert password_rule1 == exp_password_rule1 def test_create_verify(self): new_password_rule_input = { @@ -2467,8 +2468,8 @@ class PasswordRuleHandlersTests(unittest.TestCase): self.hmc, '/api/console/password-rules', new_password_rule_input, True, True) - self.assertEqual(len(resp), 1) - self.assertIn('element-uri', resp) + assert len(resp) == 1 + assert 'element-uri' in resp new_password_rule_uri = resp['element-uri'] # the function to be tested: @@ -2477,7 +2478,7 @@ class PasswordRuleHandlersTests(unittest.TestCase): new_name = new_password_rule['name'] input_name = new_password_rule_input['name'] - self.assertEqual(new_name, input_name) + assert new_name == input_name def test_create_error_console_not_found(self): @@ -2489,14 +2490,14 @@ class PasswordRuleHandlersTests(unittest.TestCase): 'description': 'Password Rule #X', } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post(self.hmc, '/api/console/password-rules', new_password_rule_input, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_update_verify(self): update_password_rule1 = { @@ -2511,8 +2512,7 @@ class PasswordRuleHandlersTests(unittest.TestCase): password_rule1 = self.urihandler.get( self.hmc, '/api/console/password-rules/fake-password-rule-oid-1', True) - self.assertEqual(password_rule1['description'], - 'updated password rule #1') + assert password_rule1['description'] == 'updated password rule #1' def test_delete_verify(self): @@ -2535,15 +2535,15 @@ class PasswordRuleHandlersTests(unittest.TestCase): self.urihandler.delete(self.hmc, new_password_rule_uri, True) # Verify that it has been deleted - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.get(self.hmc, new_password_rule_uri, True) -class LdapServerDefinitionHandlersTests(unittest.TestCase): +class TestLdapServerDefinitionHandlers(object): """All tests for classes LdapServerDefinitionsHandler and LdapServerDefinitionHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( @@ -2570,21 +2570,21 @@ class LdapServerDefinitionHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(ldap_srv_defs, exp_ldap_srv_defs) + assert ldap_srv_defs == exp_ldap_srv_defs def test_list_error_console_not_found(self): # Remove the faked Console object self.hmc.consoles.remove(None) - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.get( self.hmc, '/api/console/ldap-server-definitions', True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_get(self): @@ -2603,7 +2603,7 @@ class LdapServerDefinitionHandlersTests(unittest.TestCase): 'description': 'LDAP Srv Def #1', 'primary-hostname-ipaddr': '10.11.12.13', } - self.assertEqual(ldap_srv_def1, exp_ldap_srv_def1) + assert ldap_srv_def1 == exp_ldap_srv_def1 def test_create_verify(self): new_ldap_srv_def_input = { @@ -2616,8 +2616,8 @@ class LdapServerDefinitionHandlersTests(unittest.TestCase): self.hmc, '/api/console/ldap-server-definitions', new_ldap_srv_def_input, True, True) - self.assertEqual(len(resp), 1) - self.assertIn('element-uri', resp) + assert len(resp) == 1 + assert 'element-uri' in resp new_ldap_srv_def_uri = resp['element-uri'] # the function to be tested: @@ -2626,7 +2626,7 @@ class LdapServerDefinitionHandlersTests(unittest.TestCase): new_name = new_ldap_srv_def['name'] input_name = new_ldap_srv_def_input['name'] - self.assertEqual(new_name, input_name) + assert new_name == input_name def test_create_error_console_not_found(self): @@ -2638,15 +2638,15 @@ class LdapServerDefinitionHandlersTests(unittest.TestCase): 'description': 'LDAP Srv Def #X', } - with self.assertRaises(InvalidResourceError) as cm: + with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post( self.hmc, '/api/console/ldap-server-definitions', new_ldap_srv_def_input, True, True) - exc = cm.exception - self.assertEqual(exc.reason, 1) + exc = exc_info.value + assert exc.reason == 1 def test_update_verify(self): update_ldap_srv_def1 = { @@ -2663,8 +2663,7 @@ class LdapServerDefinitionHandlersTests(unittest.TestCase): self.hmc, '/api/console/ldap-server-definitions/fake-ldap-srv-def-oid-1', True) - self.assertEqual(ldap_srv_def1['description'], - 'updated LDAP Srv Def #1') + assert ldap_srv_def1['description'] == 'updated LDAP Srv Def #1' def test_delete_verify(self): @@ -2687,14 +2686,14 @@ class LdapServerDefinitionHandlersTests(unittest.TestCase): self.urihandler.delete(self.hmc, new_ldap_srv_def_uri, True) # Verify that it has been deleted - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.get(self.hmc, new_ldap_srv_def_uri, True) -class CpcHandlersTests(unittest.TestCase): +class TestCpcHandlers(object): """All tests for classes CpcsHandler and CpcHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs(?:\?(.*))?', CpcsHandler), @@ -2721,7 +2720,7 @@ class CpcHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(cpcs, exp_cpcs) + assert cpcs == exp_cpcs def test_get(self): @@ -2737,7 +2736,7 @@ class CpcHandlersTests(unittest.TestCase): 'description': 'CPC #1 (classic mode)', 'status': 'operating', } - self.assertEqual(cpc1, exp_cpc1) + assert cpc1 == exp_cpc1 def test_update_verify(self): update_cpc1 = { @@ -2749,13 +2748,13 @@ class CpcHandlersTests(unittest.TestCase): update_cpc1, True, True) cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True) - self.assertEqual(cpc1['description'], 'updated cpc #1') + assert cpc1['description'] == 'updated cpc #1' -class CpcStartStopHandlerTests(unittest.TestCase): +class TestCpcStartStopHandler(object): """All tests for classes CpcStartHandler and CpcStopHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)', CpcHandler), @@ -2767,53 +2766,53 @@ class CpcStartStopHandlerTests(unittest.TestCase): def test_stop_classic(self): # CPC1 is in classic mode cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True) - self.assertEqual(cpc1['status'], 'operating') + assert cpc1['status'] == 'operating' # the function to be tested: - with self.assertRaises(CpcNotInDpmError): + with pytest.raises(CpcNotInDpmError): self.urihandler.post(self.hmc, '/api/cpcs/1/operations/stop', None, True, True) cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True) - self.assertEqual(cpc1['status'], 'operating') + assert cpc1['status'] == 'operating' def test_start_classic(self): # CPC1 is in classic mode cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True) - self.assertEqual(cpc1['status'], 'operating') + assert cpc1['status'] == 'operating' # the function to be tested: - with self.assertRaises(CpcNotInDpmError): + with pytest.raises(CpcNotInDpmError): self.urihandler.post(self.hmc, '/api/cpcs/1/operations/start', None, True, True) cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True) - self.assertEqual(cpc1['status'], 'operating') + assert cpc1['status'] == 'operating' def test_stop_start_dpm(self): # CPC2 is in DPM mode cpc2 = self.urihandler.get(self.hmc, '/api/cpcs/2', True) - self.assertEqual(cpc2['status'], 'active') + assert cpc2['status'] == 'active' # the function to be tested: self.urihandler.post(self.hmc, '/api/cpcs/2/operations/stop', None, True, True) cpc2 = self.urihandler.get(self.hmc, '/api/cpcs/2', True) - self.assertEqual(cpc2['status'], 'not-operating') + assert cpc2['status'] == 'not-operating' # the function to be tested: self.urihandler.post(self.hmc, '/api/cpcs/2/operations/start', None, True, True) cpc2 = self.urihandler.get(self.hmc, '/api/cpcs/2', True) - self.assertEqual(cpc2['status'], 'active') + assert cpc2['status'] == 'active' -class CpcExportPortNamesListHandlerTests(unittest.TestCase): +class TestCpcExportPortNamesListHandler(object): """All tests for class CpcExportPortNamesListHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs(?:\?(.*))?', CpcsHandler), @@ -2826,7 +2825,7 @@ class CpcExportPortNamesListHandlerTests(unittest.TestCase): def test_invoke_err_no_input(self): # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/cpcs/2/operations/export-port-names-list', None, True, True) @@ -2846,16 +2845,16 @@ class CpcExportPortNamesListHandlerTests(unittest.TestCase): self.hmc, '/api/cpcs/2/operations/export-port-names-list', operation_body, True, True) - self.assertEqual(len(resp), 1) - self.assertIn('wwpn-list', resp) + assert len(resp) == 1 + assert 'wwpn-list' in resp wwpn_list = resp['wwpn-list'] - self.assertEqual(wwpn_list, exp_wwpn_list) + assert wwpn_list == exp_wwpn_list -class CpcImportProfilesHandlerTests(unittest.TestCase): +class TestCpcImportProfilesHandler(object): """All tests for class CpcImportProfilesHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs(?:\?(.*))?', CpcsHandler), @@ -2868,7 +2867,7 @@ class CpcImportProfilesHandlerTests(unittest.TestCase): def test_invoke_err_no_input(self): # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/cpcs/1/operations/import-profiles', None, True, True) @@ -2883,13 +2882,13 @@ class CpcImportProfilesHandlerTests(unittest.TestCase): self.hmc, '/api/cpcs/1/operations/import-profiles', operation_body, True, True) - self.assertIsNone(resp) + assert resp is None -class CpcExportProfilesHandlerTests(unittest.TestCase): +class TestCpcExportProfilesHandler(object): """All tests for class CpcExportProfilesHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs(?:\?(.*))?', CpcsHandler), @@ -2902,7 +2901,7 @@ class CpcExportProfilesHandlerTests(unittest.TestCase): def test_invoke_err_no_input(self): # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/cpcs/1/operations/export-profiles', None, True, True) @@ -2917,14 +2916,14 @@ class CpcExportProfilesHandlerTests(unittest.TestCase): self.hmc, '/api/cpcs/1/operations/export-profiles', operation_body, True, True) - self.assertIsNone(resp) + assert resp is None -class MetricsContextHandlersTests(unittest.TestCase): +class TestMetricsContextHandlers(object): """All tests for classes MetricsContextsHandler and MetricsContextHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/services/metrics/context', MetricsContextsHandler), @@ -3023,13 +3022,13 @@ class MetricsContextHandlersTests(unittest.TestCase): resp = self.urihandler.post(self.hmc, '/api/services/metrics/context', body, True, True) - self.assertIsInstance(resp, dict) - self.assertIn('metrics-context-uri', resp) + assert isinstance(resp, dict) + assert 'metrics-context-uri' in resp uri = resp['metrics-context-uri'] - self.assertTrue(uri.startswith('/api/services/metrics/context/')) - self.assertIn('metric-group-infos', resp) + assert uri.startswith('/api/services/metrics/context/') + assert 'metric-group-infos' in resp mg_infos = resp['metric-group-infos'] - self.assertEqual(mg_infos, [mg_info, mg_info2]) + assert mg_infos == [mg_info, mg_info2] # the get function to be tested: mv_resp = self.urihandler.get(self.hmc, uri, True) @@ -3052,20 +3051,19 @@ class MetricsContextHandlersTests(unittest.TestCase): ''' - self.assertEqual( - mv_resp, exp_mv_resp, - "Actual response string:\n{!r}\n" - "Expected response string:\n{!r}\n". - format(mv_resp, exp_mv_resp)) + assert mv_resp == exp_mv_resp, \ + "Actual response string:\n{!r}\n" \ + "Expected response string:\n{!r}\n". \ + format(mv_resp, exp_mv_resp) # the delete function to be tested: self.urihandler.delete(self.hmc, uri, True) -class AdapterHandlersTests(unittest.TestCase): +class TestAdapterHandlers(object): """All tests for classes AdaptersHandler and AdapterHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)/adapters(?:\?(.*))?', AdaptersHandler), @@ -3107,7 +3105,7 @@ class AdapterHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(adapters, exp_adapters) + assert adapters == exp_adapters def test_get(self): @@ -3124,7 +3122,7 @@ class AdapterHandlersTests(unittest.TestCase): 'network-port-uris': ['/api/adapters/1/network-ports/1'], 'adapter-id': 'BEF', } - self.assertEqual(adapter1, exp_adapter1) + assert adapter1 == exp_adapter1 def test_update_verify(self): update_adapter1 = { @@ -3136,13 +3134,13 @@ class AdapterHandlersTests(unittest.TestCase): update_adapter1, True, True) adapter1 = self.urihandler.get(self.hmc, '/api/adapters/1', True) - self.assertEqual(adapter1['description'], 'updated adapter #1') + assert adapter1['description'] == 'updated adapter #1' -class AdapterChangeCryptoTypeHandlerTests(unittest.TestCase): +class TestAdapterChangeCryptoTypeHandler(object): """All tests for class AdapterChangeCryptoTypeHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)/adapters(?:\?(.*))?', AdaptersHandler), @@ -3155,7 +3153,7 @@ class AdapterChangeCryptoTypeHandlerTests(unittest.TestCase): def test_invoke_err_no_body(self): # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/adapters/4/operations/change-crypto-type', @@ -3168,7 +3166,7 @@ class AdapterChangeCryptoTypeHandlerTests(unittest.TestCase): } # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/adapters/4/operations/change-crypto-type', @@ -3185,13 +3183,13 @@ class AdapterChangeCryptoTypeHandlerTests(unittest.TestCase): '/api/adapters/4/operations/change-crypto-type', operation_body, True, True) - self.assertIsNone(resp) + assert resp is None -class NetworkPortHandlersTests(unittest.TestCase): +class TestNetworkPortHandlers(object): """All tests for class NetworkPortHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/adapters/([^/]+)/network-ports/([^/]+)', @@ -3211,7 +3209,7 @@ class NetworkPortHandlersTests(unittest.TestCase): 'name': 'osa_1_port_1', 'description': 'Port #1 of OSA #1', } - self.assertEqual(port1, exp_port1) + assert port1 == exp_port1 def test_update_verify(self): update_port1 = { @@ -3224,13 +3222,13 @@ class NetworkPortHandlersTests(unittest.TestCase): port1 = self.urihandler.get(self.hmc, '/api/adapters/1/network-ports/1', True) - self.assertEqual(port1['description'], 'updated port #1') + assert port1['description'] == 'updated port #1' -class StoragePortHandlersTests(unittest.TestCase): +class TestStoragePortHandlers(object): """All tests for class StoragePortHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/adapters/([^/]+)/storage-ports/([^/]+)', @@ -3250,7 +3248,7 @@ class StoragePortHandlersTests(unittest.TestCase): 'name': 'fcp_2_port_1', 'description': 'Port #1 of FCP #2', } - self.assertEqual(port1, exp_port1) + assert port1 == exp_port1 def test_update_verify(self): update_port1 = { @@ -3263,13 +3261,13 @@ class StoragePortHandlersTests(unittest.TestCase): port1 = self.urihandler.get(self.hmc, '/api/adapters/2/storage-ports/1', True) - self.assertEqual(port1['description'], 'updated port #1') + assert port1['description'] == 'updated port #1' -class PartitionHandlersTests(unittest.TestCase): +class TestPartitionHandlers(object): """All tests for classes PartitionsHandler and PartitionHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)/partitions(?:\?(.*))?', PartitionsHandler), @@ -3292,7 +3290,7 @@ class PartitionHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(partitions, exp_partitions) + assert partitions == exp_partitions def test_get(self): @@ -3309,7 +3307,7 @@ class PartitionHandlersTests(unittest.TestCase): 'nic-uris': ['/api/partitions/1/nics/1'], 'virtual-function-uris': ['/api/partitions/1/virtual-functions/1'], } - self.assertEqual(partition1, exp_partition1) + assert partition1 == exp_partition1 def test_create_verify(self): new_partition2 = { @@ -3323,10 +3321,10 @@ class PartitionHandlersTests(unittest.TestCase): resp = self.urihandler.post(self.hmc, '/api/cpcs/2/partitions', new_partition2, True, True) - self.assertEqual(len(resp), 1) - self.assertIn('object-uri', resp) + assert len(resp) == 1 + assert 'object-uri' in resp new_partition2_uri = resp['object-uri'] - self.assertEqual(new_partition2_uri, '/api/partitions/2') + assert new_partition2_uri == '/api/partitions/2' exp_partition2 = { 'object-id': '2', @@ -3343,7 +3341,7 @@ class PartitionHandlersTests(unittest.TestCase): # the function to be tested: partition2 = self.urihandler.get(self.hmc, '/api/partitions/2', True) - self.assertEqual(partition2, exp_partition2) + assert partition2 == exp_partition2 def test_update_verify(self): update_partition1 = { @@ -3355,7 +3353,7 @@ class PartitionHandlersTests(unittest.TestCase): update_partition1, True, True) partition1 = self.urihandler.get(self.hmc, '/api/partitions/1', True) - self.assertEqual(partition1['description'], 'updated partition #1') + assert partition1['description'] == 'updated partition #1' def test_delete_verify(self): @@ -3364,14 +3362,14 @@ class PartitionHandlersTests(unittest.TestCase): # the function to be tested: self.urihandler.delete(self.hmc, '/api/partitions/1', True) - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.get(self.hmc, '/api/partitions/1', True) -class PartitionStartStopHandlerTests(unittest.TestCase): +class TestPartitionStartStopHandler(object): """All tests for classes PartitionStartHandler and PartitionStopHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/partitions/([^/]+)', PartitionHandler), @@ -3384,38 +3382,38 @@ class PartitionStartStopHandlerTests(unittest.TestCase): def test_start_stop(self): # CPC2 is in DPM mode partition1 = self.urihandler.get(self.hmc, '/api/partitions/1', True) - self.assertEqual(partition1['status'], 'stopped') + assert partition1['status'] == 'stopped' # the start() function to be tested, with a valid initial status: self.urihandler.post(self.hmc, '/api/partitions/1/operations/start', None, True, True) partition1 = self.urihandler.get(self.hmc, '/api/partitions/1', True) - self.assertEqual(partition1['status'], 'active') + assert partition1['status'] == 'active' # the start() function to be tested, with an invalid initial status: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post(self.hmc, '/api/partitions/1/operations/start', None, True, True) # the stop() function to be tested, with a valid initial status: - self.assertEqual(partition1['status'], 'active') + assert partition1['status'] == 'active' self.urihandler.post(self.hmc, '/api/partitions/1/operations/stop', None, True, True) partition1 = self.urihandler.get(self.hmc, '/api/partitions/1', True) - self.assertEqual(partition1['status'], 'stopped') + assert partition1['status'] == 'stopped' # the stop() function to be tested, with an invalid initial status: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post(self.hmc, '/api/partitions/1/operations/stop', None, True, True) -class PartitionScsiDumpHandlerTests(unittest.TestCase): +class TestPartitionScsiDumpHandler(object): """All tests for class PartitionScsiDumpHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/partitions/([^/]+)', PartitionHandler), @@ -3427,7 +3425,7 @@ class PartitionScsiDumpHandlerTests(unittest.TestCase): def test_invoke_err_no_body(self): # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/scsi-dump', None, True, True) @@ -3440,7 +3438,7 @@ class PartitionScsiDumpHandlerTests(unittest.TestCase): } # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/scsi-dump', operation_body, True, True) @@ -3453,7 +3451,7 @@ class PartitionScsiDumpHandlerTests(unittest.TestCase): } # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/scsi-dump', operation_body, True, True) @@ -3466,7 +3464,7 @@ class PartitionScsiDumpHandlerTests(unittest.TestCase): } # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/scsi-dump', operation_body, True, True) @@ -3483,7 +3481,7 @@ class PartitionScsiDumpHandlerTests(unittest.TestCase): partition1['status'] = 'stopped' # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/scsi-dump', operation_body, True, True) @@ -3504,13 +3502,13 @@ class PartitionScsiDumpHandlerTests(unittest.TestCase): self.hmc, '/api/partitions/1/operations/scsi-dump', operation_body, True, True) - self.assertEqual(resp, {}) + assert resp == {} -class PartitionPswRestartHandlerTests(unittest.TestCase): +class TestPartitionPswRestartHandler(object): """All tests for class PartitionPswRestartHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/partitions/([^/]+)', PartitionHandler), @@ -3526,7 +3524,7 @@ class PartitionPswRestartHandlerTests(unittest.TestCase): partition1['status'] = 'stopped' # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/psw-restart', None, True, True) @@ -3542,13 +3540,13 @@ class PartitionPswRestartHandlerTests(unittest.TestCase): self.hmc, '/api/partitions/1/operations/psw-restart', None, True, True) - self.assertEqual(resp, {}) + assert resp == {} -class PartitionMountIsoImageHandlerTests(unittest.TestCase): +class TestPartitionMountIsoImageHandler(object): """All tests for class PartitionMountIsoImageHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/partitions/([^/]+)', PartitionHandler), @@ -3560,7 +3558,7 @@ class PartitionMountIsoImageHandlerTests(unittest.TestCase): def test_invoke_err_queryparm_1(self): # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/mount-iso-image?' 'image-namex=fake-image&ins-file-name=fake-ins', @@ -3569,7 +3567,7 @@ class PartitionMountIsoImageHandlerTests(unittest.TestCase): def test_invoke_err_queryparm_2(self): # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/mount-iso-image?' 'image-name=fake-image&ins-file-namex=fake-ins', @@ -3582,7 +3580,7 @@ class PartitionMountIsoImageHandlerTests(unittest.TestCase): partition1['status'] = 'starting' # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/mount-iso-image?' 'image-name=fake-image&ins-file-name=fake-ins', @@ -3600,19 +3598,19 @@ class PartitionMountIsoImageHandlerTests(unittest.TestCase): 'image-name=fake-image&ins-file-name=fake-ins', None, True, True) - self.assertEqual(resp, {}) + assert resp == {} boot_iso_image_name = partition1['boot-iso-image-name'] - self.assertEqual(boot_iso_image_name, 'fake-image') + assert boot_iso_image_name == 'fake-image' boot_iso_ins_file = partition1['boot-iso-ins-file'] - self.assertEqual(boot_iso_ins_file, 'fake-ins') + assert boot_iso_ins_file == 'fake-ins' -class PartitionUnmountIsoImageHandlerTests(unittest.TestCase): +class TestPartitionUnmountIsoImageHandler(object): """All tests for class PartitionUnmountIsoImageHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/partitions/([^/]+)', PartitionHandler), @@ -3628,7 +3626,7 @@ class PartitionUnmountIsoImageHandlerTests(unittest.TestCase): partition1['status'] = 'starting' # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/unmount-iso-image', None, True, True) @@ -3644,19 +3642,19 @@ class PartitionUnmountIsoImageHandlerTests(unittest.TestCase): self.hmc, '/api/partitions/1/operations/unmount-iso-image', None, True, True) - self.assertEqual(resp, {}) + assert resp == {} boot_iso_image_name = partition1['boot-iso-image-name'] - self.assertIsNone(boot_iso_image_name) + assert boot_iso_image_name is None boot_iso_ins_file = partition1['boot-iso-ins-file'] - self.assertIsNone(boot_iso_ins_file) + assert boot_iso_ins_file is None -class PartitionIncreaseCryptoConfigHandlerTests(unittest.TestCase): +class TestPartitionIncreaseCryptoConfigHandler(object): """All tests for class PartitionIncreaseCryptoConfigHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/partitions/([^/]+)', PartitionHandler), @@ -3669,7 +3667,7 @@ class PartitionIncreaseCryptoConfigHandlerTests(unittest.TestCase): def test_invoke_err_missing_body(self): # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/increase-crypto-configuration', @@ -3682,7 +3680,7 @@ class PartitionIncreaseCryptoConfigHandlerTests(unittest.TestCase): partition1['status'] = 'starting' # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/increase-crypto-configuration', @@ -3738,28 +3736,28 @@ class PartitionIncreaseCryptoConfigHandlerTests(unittest.TestCase): '/api/partitions/1/operations/increase-crypto-configuration', operation_body, True, True) - self.assertIsNone(resp) + assert resp is None crypto_config = partition1['crypto-configuration'] - self.assertTrue(isinstance(crypto_config, dict)) + assert isinstance(crypto_config, dict) adapter_uris = crypto_config['crypto-adapter-uris'] - self.assertTrue(isinstance(adapter_uris, list)) + assert isinstance(adapter_uris, list) exp_adapter_uris = input_adapter_uris \ if input_adapter_uris is not None else [] - self.assertEqual(adapter_uris, exp_adapter_uris) + assert adapter_uris == exp_adapter_uris domain_configs = crypto_config['crypto-domain-configurations'] - self.assertTrue(isinstance(domain_configs, list)) + assert isinstance(domain_configs, list) exp_domain_configs = input_domain_configs \ if input_domain_configs is not None else [] - self.assertEqual(domain_configs, exp_domain_configs) + assert domain_configs == exp_domain_configs -class PartitionDecreaseCryptoConfigHandlerTests(unittest.TestCase): +class TestPartitionDecreaseCryptoConfigHandler(object): """All tests for class PartitionDecreaseCryptoConfigHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/partitions/([^/]+)', PartitionHandler), @@ -3772,7 +3770,7 @@ class PartitionDecreaseCryptoConfigHandlerTests(unittest.TestCase): def test_invoke_err_missing_body(self): # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/decrease-crypto-configuration', @@ -3785,7 +3783,7 @@ class PartitionDecreaseCryptoConfigHandlerTests(unittest.TestCase): partition1['status'] = 'starting' # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/decrease-crypto-configuration', @@ -3847,22 +3845,22 @@ class PartitionDecreaseCryptoConfigHandlerTests(unittest.TestCase): '/api/partitions/1/operations/decrease-crypto-configuration', operation_body, True, True) - self.assertIsNone(resp) + assert resp is None crypto_config = partition1['crypto-configuration'] - self.assertTrue(isinstance(crypto_config, dict)) + assert isinstance(crypto_config, dict) adapter_uris = crypto_config['crypto-adapter-uris'] - self.assertTrue(isinstance(adapter_uris, list)) + assert isinstance(adapter_uris, list) domain_configs = crypto_config['crypto-domain-configurations'] - self.assertTrue(isinstance(domain_configs, list)) + assert isinstance(domain_configs, list) -class PartitionChangeCryptoConfigHandlerTests(unittest.TestCase): +class TestPartitionChangeCryptoConfigHandler(object): """All tests for class PartitionChangeCryptoConfigHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/partitions/([^/]+)', PartitionHandler), @@ -3875,7 +3873,7 @@ class PartitionChangeCryptoConfigHandlerTests(unittest.TestCase): def test_invoke_err_missing_body(self): # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/' @@ -3890,7 +3888,7 @@ class PartitionChangeCryptoConfigHandlerTests(unittest.TestCase): } # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/' @@ -3905,7 +3903,7 @@ class PartitionChangeCryptoConfigHandlerTests(unittest.TestCase): } # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/' @@ -3919,7 +3917,7 @@ class PartitionChangeCryptoConfigHandlerTests(unittest.TestCase): partition1['status'] = 'starting' # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/operations/' @@ -3969,22 +3967,22 @@ class PartitionChangeCryptoConfigHandlerTests(unittest.TestCase): 'change-crypto-domain-configuration', operation_body, True, True) - self.assertIsNone(resp) + assert resp is None crypto_config = partition1['crypto-configuration'] - self.assertTrue(isinstance(crypto_config, dict)) + assert isinstance(crypto_config, dict) adapter_uris = crypto_config['crypto-adapter-uris'] - self.assertTrue(isinstance(adapter_uris, list)) + assert isinstance(adapter_uris, list) domain_configs = crypto_config['crypto-domain-configurations'] - self.assertTrue(isinstance(domain_configs, list)) + assert isinstance(domain_configs, list) -class HbaHandlerTests(unittest.TestCase): +class TestHbaHandler(object): """All tests for classes HbasHandler and HbaHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/partitions/([^/]+)', PartitionHandler), @@ -4003,7 +4001,7 @@ class HbaHandlerTests(unittest.TestCase): exp_hba_uris = [ '/api/partitions/1/hbas/1', ] - self.assertEqual(hba_uris, exp_hba_uris) + assert hba_uris == exp_hba_uris def test_get(self): @@ -4022,7 +4020,7 @@ class HbaHandlerTests(unittest.TestCase): 'wwpn': 'CFFEAFFE00008001', 'device-number': '1001', } - self.assertEqual(hba1, exp_hba1) + assert hba1 == exp_hba1 def test_create_verify(self): new_hba2 = { @@ -4035,10 +4033,10 @@ class HbaHandlerTests(unittest.TestCase): resp = self.urihandler.post(self.hmc, '/api/partitions/1/hbas', new_hba2, True, True) - self.assertEqual(len(resp), 1) - self.assertIn('element-uri', resp) + assert len(resp) == 1 + assert 'element-uri' in resp new_hba2_uri = resp['element-uri'] - self.assertEqual(new_hba2_uri, '/api/partitions/1/hbas/2') + assert new_hba2_uri == '/api/partitions/1/hbas/2' # the function to be tested: hba2 = self.urihandler.get(self.hmc, '/api/partitions/1/hbas/2', True) @@ -4052,7 +4050,7 @@ class HbaHandlerTests(unittest.TestCase): 'wwpn': hba2['wwpn'], # auto-generated } - self.assertEqual(hba2, exp_hba2) + assert hba2 == exp_hba2 def test_update_verify(self): update_hba1 = { @@ -4064,7 +4062,7 @@ class HbaHandlerTests(unittest.TestCase): update_hba1, True, True) hba1 = self.urihandler.get(self.hmc, '/api/partitions/1/hbas/1', True) - self.assertEqual(hba1['description'], 'updated hba #1') + assert hba1['description'] == 'updated hba #1' def test_delete_verify(self): @@ -4073,14 +4071,14 @@ class HbaHandlerTests(unittest.TestCase): # the function to be tested: self.urihandler.delete(self.hmc, '/api/partitions/1/hbas/1', True) - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.get(self.hmc, '/api/partitions/1/hbas/1', True) -class HbaReassignPortHandlerTests(unittest.TestCase): +class TestHbaReassignPortHandler(object): """All tests for class HbaReassignPortHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/partitions/([^/]+)', PartitionHandler), @@ -4094,7 +4092,7 @@ class HbaReassignPortHandlerTests(unittest.TestCase): def test_invoke_err_missing_body(self): # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/hbas/1/operations/' @@ -4107,7 +4105,7 @@ class HbaReassignPortHandlerTests(unittest.TestCase): } # the function to be tested: - with self.assertRaises(HTTPError): + with pytest.raises(HTTPError): self.urihandler.post( self.hmc, '/api/partitions/1/hbas/1/operations/' @@ -4127,17 +4125,17 @@ class HbaReassignPortHandlerTests(unittest.TestCase): 'reassign-storage-adapter-port', operation_body, True, True) - self.assertIsNone(resp) + assert resp is None hba = self.urihandler.get(self.hmc, '/api/partitions/1/hbas/1', True) adapter_port_uri = hba['adapter-port-uri'] - self.assertEqual(adapter_port_uri, new_adapter_port_uri) + assert adapter_port_uri == new_adapter_port_uri -class NicHandlerTests(unittest.TestCase): +class TestNicHandler(object): """All tests for classes NicsHandler and NicHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/partitions/([^/]+)', PartitionHandler), @@ -4156,7 +4154,7 @@ class NicHandlerTests(unittest.TestCase): exp_nic_uris = [ '/api/partitions/1/nics/1', ] - self.assertEqual(nic_uris, exp_nic_uris) + assert nic_uris == exp_nic_uris def test_get(self): @@ -4174,7 +4172,7 @@ class NicHandlerTests(unittest.TestCase): 'network-adapter-port-uri': '/api/adapters/3/network-ports/1', 'device-number': '2001', } - self.assertEqual(nic1, exp_nic1) + assert nic1 == exp_nic1 def test_create_verify(self): new_nic2 = { @@ -4187,10 +4185,10 @@ class NicHandlerTests(unittest.TestCase): resp = self.urihandler.post(self.hmc, '/api/partitions/1/nics', new_nic2, True, True) - self.assertEqual(len(resp), 1) - self.assertIn('element-uri', resp) + assert len(resp) == 1 + assert 'element-uri' in resp new_nic2_uri = resp['element-uri'] - self.assertEqual(new_nic2_uri, '/api/partitions/1/nics/2') + assert new_nic2_uri == '/api/partitions/1/nics/2' # the function to be tested: nic2 = self.urihandler.get(self.hmc, '/api/partitions/1/nics/2', True) @@ -4203,7 +4201,7 @@ class NicHandlerTests(unittest.TestCase): 'device-number': nic2['device-number'], # auto-generated } - self.assertEqual(nic2, exp_nic2) + assert nic2 == exp_nic2 def test_update_verify(self): update_nic1 = { @@ -4215,7 +4213,7 @@ class NicHandlerTests(unittest.TestCase): update_nic1, True, True) nic1 = self.urihandler.get(self.hmc, '/api/partitions/1/nics/1', True) - self.assertEqual(nic1['description'], 'updated nic #1') + assert nic1['description'] == 'updated nic #1' def test_delete_verify(self): @@ -4224,15 +4222,15 @@ class NicHandlerTests(unittest.TestCase): # the function to be tested: self.urihandler.delete(self.hmc, '/api/partitions/1/nics/1', True) - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.get(self.hmc, '/api/partitions/1/nics/1', True) -class VirtualFunctionHandlerTests(unittest.TestCase): +class TestVirtualFunctionHandler(object): """All tests for classes VirtualFunctionsHandler and VirtualFunctionHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/partitions/([^/]+)', PartitionHandler), @@ -4253,7 +4251,7 @@ class VirtualFunctionHandlerTests(unittest.TestCase): exp_vf_uris = [ '/api/partitions/1/virtual-functions/1', ] - self.assertEqual(vf_uris, exp_vf_uris) + assert vf_uris == exp_vf_uris def test_get(self): @@ -4270,7 +4268,7 @@ class VirtualFunctionHandlerTests(unittest.TestCase): 'description': 'VF #1 in Partition #1', 'device-number': '3001', } - self.assertEqual(vf1, exp_vf1) + assert vf1 == exp_vf1 def test_create_verify(self): new_vf2 = { @@ -4283,10 +4281,10 @@ class VirtualFunctionHandlerTests(unittest.TestCase): '/api/partitions/1/virtual-functions', new_vf2, True, True) - self.assertEqual(len(resp), 1) - self.assertIn('element-uri', resp) + assert len(resp) == 1 + assert 'element-uri' in resp new_vf2_uri = resp['element-uri'] - self.assertEqual(new_vf2_uri, '/api/partitions/1/virtual-functions/2') + assert new_vf2_uri == '/api/partitions/1/virtual-functions/2' # the function to be tested: vf2 = self.urihandler.get(self.hmc, @@ -4300,7 +4298,7 @@ class VirtualFunctionHandlerTests(unittest.TestCase): 'device-number': vf2['device-number'], # auto-generated } - self.assertEqual(vf2, exp_vf2) + assert vf2 == exp_vf2 def test_update_verify(self): update_vf1 = { @@ -4314,7 +4312,7 @@ class VirtualFunctionHandlerTests(unittest.TestCase): vf1 = self.urihandler.get(self.hmc, '/api/partitions/1/virtual-functions/1', True) - self.assertEqual(vf1['description'], 'updated vf #1') + assert vf1['description'] == 'updated vf #1' def test_delete_verify(self): @@ -4325,16 +4323,16 @@ class VirtualFunctionHandlerTests(unittest.TestCase): self.urihandler.delete(self.hmc, '/api/partitions/1/virtual-functions/1', True) - with self.assertRaises(InvalidResourceError): + with pytest.raises(InvalidResourceError): self.urihandler.get(self.hmc, '/api/partitions/1/virtual-functions/1', True) -class VirtualSwitchHandlersTests(unittest.TestCase): +class TestVirtualSwitchHandlers(object): """All tests for classes VirtualSwitchesHandler and VirtualSwitchHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)/virtual-switches(?:\?(.*))?', @@ -4358,7 +4356,7 @@ class VirtualSwitchHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(vswitches, exp_vswitches) + assert vswitches == exp_vswitches def test_get(self): @@ -4373,13 +4371,13 @@ class VirtualSwitchHandlersTests(unittest.TestCase): 'description': 'Vswitch for OSA #1 in CPC #2', 'connected-vnic-uris': [], # auto-generated } - self.assertEqual(vswitch1, exp_vswitch1) + assert vswitch1 == exp_vswitch1 -class VirtualSwitchGetVnicsHandlerTests(unittest.TestCase): +class TestVirtualSwitchGetVnicsHandler(object): """All tests for class VirtualSwitchGetVnicsHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/virtual-switches/([^/]+)', VirtualSwitchHandler), @@ -4406,13 +4404,13 @@ class VirtualSwitchGetVnicsHandlerTests(unittest.TestCase): exp_resp = { 'connected-vnic-uris': connected_nic_uris, } - self.assertEqual(resp, exp_resp) + assert resp == exp_resp -class LparHandlersTests(unittest.TestCase): +class TestLparHandlers(object): """All tests for classes LparsHandler and LparHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)/logical-partitions(?:\?(.*))?', LparsHandler), @@ -4435,7 +4433,7 @@ class LparHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(lpars, exp_lpars) + assert lpars == exp_lpars def test_get(self): @@ -4450,14 +4448,14 @@ class LparHandlersTests(unittest.TestCase): 'status': 'not-activated', 'description': 'LPAR #1 in CPC #1', } - self.assertEqual(lpar1, exp_lpar1) + assert lpar1 == exp_lpar1 -class LparActLoadDeactHandlerTests(unittest.TestCase): +class TestLparActLoadDeactHandler(object): """All tests for classes LparActivateHandler, LparLoadHandler, and LparDeactivateHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/logical-partitions/([^/]+)', @@ -4475,7 +4473,7 @@ class LparActLoadDeactHandlerTests(unittest.TestCase): # CPC1 is in classic mode lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1', True) - self.assertEqual(lpar1['status'], 'not-activated') + assert lpar1['status'] == 'not-activated' # the function to be tested: self.urihandler.post(self.hmc, @@ -4484,7 +4482,7 @@ class LparActLoadDeactHandlerTests(unittest.TestCase): lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1', True) - self.assertEqual(lpar1['status'], 'not-operating') + assert lpar1['status'] == 'not-operating' # the function to be tested: self.urihandler.post(self.hmc, @@ -4493,7 +4491,7 @@ class LparActLoadDeactHandlerTests(unittest.TestCase): lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1', True) - self.assertEqual(lpar1['status'], 'operating') + assert lpar1['status'] == 'operating' # the function to be tested: self.urihandler.post(self.hmc, @@ -4502,14 +4500,14 @@ class LparActLoadDeactHandlerTests(unittest.TestCase): lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1', True) - self.assertEqual(lpar1['status'], 'not-activated') + assert lpar1['status'] == 'not-activated' -class ResetActProfileHandlersTests(unittest.TestCase): +class TestResetActProfileHandlers(object): """All tests for classes ResetActProfilesHandler and ResetActProfileHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)/reset-activation-profiles(?:\?(.*))?', @@ -4534,7 +4532,7 @@ class ResetActProfileHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(raps, exp_raps) + assert raps == exp_raps def test_get(self): @@ -4548,14 +4546,14 @@ class ResetActProfileHandlersTests(unittest.TestCase): 'element-uri': '/api/cpcs/1/reset-activation-profiles/r1', 'description': 'Reset profile #1 in CPC #1', } - self.assertEqual(rap1, exp_rap1) + assert rap1 == exp_rap1 -class ImageActProfileHandlersTests(unittest.TestCase): +class TestImageActProfileHandlers(object): """All tests for classes ImageActProfilesHandler and ImageActProfileHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)/image-activation-profiles/([^/]+)', @@ -4580,7 +4578,7 @@ class ImageActProfileHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(iaps, exp_iaps) + assert iaps == exp_iaps def test_get(self): @@ -4594,14 +4592,14 @@ class ImageActProfileHandlersTests(unittest.TestCase): 'element-uri': '/api/cpcs/1/image-activation-profiles/i1', 'description': 'Image profile #1 in CPC #1', } - self.assertEqual(iap1, exp_iap1) + assert iap1 == exp_iap1 -class LoadActProfileHandlersTests(unittest.TestCase): +class TestLoadActProfileHandlers(object): """All tests for classes LoadActProfilesHandler and LoadActProfileHandler.""" - def setUp(self): + def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)/load-activation-profiles/([^/]+)', @@ -4626,7 +4624,7 @@ class LoadActProfileHandlersTests(unittest.TestCase): }, ] } - self.assertEqual(laps, exp_laps) + assert laps == exp_laps def test_get(self): @@ -4640,9 +4638,4 @@ class LoadActProfileHandlersTests(unittest.TestCase): 'element-uri': '/api/cpcs/1/load-activation-profiles/L1', 'description': 'Load profile #1 in CPC #1', } - self.assertEqual(lap1, exp_lap1) - - -if __name__ == '__main__': - requests.packages.urllib3.disable_warnings() - unittest.main() + assert lap1 == exp_lap1
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": -1, "issue_text_score": 0, "test_score": -1 }, "num_modified_files": 5 }
0.17
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt", "dev-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 anyio==3.6.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 backcall==0.2.0 bleach==4.1.0 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 colorama==0.4.5 comm==0.1.4 contextvars==2.4 coverage==6.2 cryptography==40.0.2 dataclasses==0.8 decorator==5.1.1 defusedxml==0.7.1 docopt==0.6.2 docutils==0.18.1 entrypoints==0.4 flake8==3.9.2 gitdb==4.0.9 GitPython==3.1.18 idna==3.10 imagesize==1.4.1 immutables==0.19 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 ipykernel==5.5.6 ipython==7.16.3 ipython-genutils==0.2.0 ipywidgets==7.8.5 jedi==0.17.2 jeepney==0.7.1 Jinja2==3.0.3 json5==0.9.16 jsonschema==3.2.0 jupyter==1.1.1 jupyter-client==7.1.2 jupyter-console==6.4.3 jupyter-core==4.9.2 jupyter-server==1.13.1 jupyterlab==3.2.9 jupyterlab-pygments==0.1.2 jupyterlab-server==2.10.3 jupyterlab_widgets==1.1.11 keyring==23.4.1 MarkupSafe==2.0.1 mccabe==0.6.1 mistune==0.8.4 mock==5.2.0 nbclassic==0.3.5 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nest-asyncio==1.6.0 notebook==6.4.10 packaging==21.3 pandocfilters==1.5.1 parso==0.7.1 pbr==6.1.1 pexpect==4.9.0 pickleshare==0.7.5 pkginfo==1.10.0 pluggy==1.0.0 prometheus-client==0.17.1 prompt-toolkit==3.0.36 ptyprocess==0.7.0 py==1.11.0 pycodestyle==2.7.0 pycparser==2.21 pyflakes==2.3.1 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-coveralls==2.9.3 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 pyzmq==25.1.2 readme-renderer==34.0 requests==2.27.1 requests-mock==1.12.1 requests-toolbelt==1.0.0 rfc3986==1.5.0 SecretStorage==3.3.3 Send2Trash==1.8.3 six==1.17.0 smmap==5.0.0 sniffio==1.2.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-git==11.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 stomp.py==8.1.0 terminado==0.12.1 testfixtures==7.2.2 testpath==0.6.0 tomli==1.2.3 tornado==6.1 tqdm==4.64.1 traitlets==4.3.3 twine==3.8.0 typing_extensions==4.1.1 urllib3==1.26.20 wcwidth==0.2.13 webencodings==0.5.1 websocket-client==1.3.1 widgetsnbextension==3.6.10 -e git+https://github.com/zhmcclient/python-zhmcclient.git@f67d075e5f00522a32d60f5f5966956d3c66a5b2#egg=zhmcclient zipp==3.6.0
name: python-zhmcclient channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - anyio==3.6.2 - argon2-cffi==21.3.0 - argon2-cffi-bindings==21.2.0 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - backcall==0.2.0 - bleach==4.1.0 - cffi==1.15.1 - charset-normalizer==2.0.12 - colorama==0.4.5 - comm==0.1.4 - contextvars==2.4 - coverage==6.2 - cryptography==40.0.2 - dataclasses==0.8 - decorator==5.1.1 - defusedxml==0.7.1 - docopt==0.6.2 - docutils==0.18.1 - entrypoints==0.4 - flake8==3.9.2 - gitdb==4.0.9 - gitpython==3.1.18 - idna==3.10 - imagesize==1.4.1 - immutables==0.19 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - ipykernel==5.5.6 - ipython==7.16.3 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - jedi==0.17.2 - jeepney==0.7.1 - jinja2==3.0.3 - json5==0.9.16 - jsonschema==3.2.0 - jupyter==1.1.1 - jupyter-client==7.1.2 - jupyter-console==6.4.3 - jupyter-core==4.9.2 - jupyter-server==1.13.1 - jupyterlab==3.2.9 - jupyterlab-pygments==0.1.2 - jupyterlab-server==2.10.3 - jupyterlab-widgets==1.1.11 - keyring==23.4.1 - markupsafe==2.0.1 - mccabe==0.6.1 - mistune==0.8.4 - mock==5.2.0 - nbclassic==0.3.5 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nest-asyncio==1.6.0 - notebook==6.4.10 - packaging==21.3 - pandocfilters==1.5.1 - parso==0.7.1 - pbr==6.1.1 - pexpect==4.9.0 - pickleshare==0.7.5 - pkginfo==1.10.0 - pluggy==1.0.0 - prometheus-client==0.17.1 - prompt-toolkit==3.0.36 - ptyprocess==0.7.0 - py==1.11.0 - pycodestyle==2.7.0 - pycparser==2.21 - pyflakes==2.3.1 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-coveralls==2.9.3 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - pyzmq==25.1.2 - readme-renderer==34.0 - requests==2.27.1 - requests-mock==1.12.1 - requests-toolbelt==1.0.0 - rfc3986==1.5.0 - secretstorage==3.3.3 - send2trash==1.8.3 - six==1.17.0 - smmap==5.0.0 - sniffio==1.2.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-git==11.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - stomp-py==8.1.0 - terminado==0.12.1 - testfixtures==7.2.2 - testpath==0.6.0 - tomli==1.2.3 - tornado==6.1 - tqdm==4.64.1 - traitlets==4.3.3 - twine==3.8.0 - typing-extensions==4.1.1 - urllib3==1.26.20 - wcwidth==0.2.13 - webencodings==0.5.1 - websocket-client==1.3.1 - widgetsnbextension==3.6.10 - zipp==3.6.0 prefix: /opt/conda/envs/python-zhmcclient
[ "tests/unit/test_session.py::TestSession::test_logon[fake-host-fake-userid-fake-pw-True-None]", "tests/unit/test_session.py::TestSession::test_logon[fake-host-fake-userid-None-True-None]" ]
[ "tests/unit/test_session.py::TestSession::test_get_error_html_1" ]
[ "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_success[all-False]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_success[all-True]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_success[some-False]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_success[some-True]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_success[empty-False]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_success[empty-True]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_success[none-False]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_success[none-True]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_error_props[all-False]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_error_props[all-True]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_error_props[some-False]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_error_props[some-True]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_error_props[none-False]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_error_props[none-True]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_error_res[False]", "tests/unit/common/test_utils.py::TestUtilsAssertResources::test_assert_resources_error_res[True]", "tests/unit/test_client.py::TestClient::test_client_initial_attrs[fake-hmc-2.13.1-1.8]", "tests/unit/test_client.py::TestClient::test_version_info[fake-hmc1-2.13.1-1.8]", "tests/unit/test_client.py::TestClient::test_version_info[fake-hmc2-2.14.0-2.20]", "tests/unit/test_client.py::TestClient::test_query_api_version[fake-hmc1-2.13.1-1.8]", "tests/unit/test_client.py::TestClient::test_query_api_version[fake-hmc2-2.14.0-2.20]", "tests/unit/test_exceptions.py::TestError::test_error_initial_attrs[args0]", "tests/unit/test_exceptions.py::TestError::test_error_initial_attrs[args1]", "tests/unit/test_exceptions.py::TestError::test_error_initial_attrs[args2]", "tests/unit/test_exceptions.py::TestError::test_error_initial_attrs[args3]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_initial_attrs[arg_names0-args0]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_initial_attrs[arg_names0-args1]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_initial_attrs[arg_names0-args2]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_initial_attrs[arg_names1-args0]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_initial_attrs[arg_names1-args1]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_initial_attrs[arg_names1-args2]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_initial_attrs[arg_names2-args0]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_initial_attrs[arg_names2-args1]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_initial_attrs[arg_names2-args2]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_repr[fake", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_repr[-None]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_repr[None-None]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_str[fake", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_str[-None]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_str[None-None]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_str_def[fake", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_str_def[-None]", "tests/unit/test_exceptions.py::TestConnectionError::test_connectionerror_str_def[None-None]", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_initial_attrs[arg_names0-args0]", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_initial_attrs[arg_names0-args1]", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_initial_attrs[arg_names0-args2]", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_initial_attrs[arg_names1-args0]", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_initial_attrs[arg_names1-args1]", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_initial_attrs[arg_names1-args2]", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_repr[fake", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_repr[-None-30-3]", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_repr[None-None-0-0]", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_str[fake", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_str[-None-30-3]", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_str[None-None-0-0]", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_str_def[fake", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_str_def[-None-30-3]", "tests/unit/test_exceptions.py::TestConnectTimeout::test_connecttimeout_str_def[None-None-0-0]", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_initial_attrs[arg_names0-args0]", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_initial_attrs[arg_names0-args1]", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_initial_attrs[arg_names0-args2]", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_initial_attrs[arg_names1-args0]", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_initial_attrs[arg_names1-args1]", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_initial_attrs[arg_names1-args2]", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_repr[fake", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_repr[-None-30-3]", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_repr[None-None-0-0]", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_str[fake", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_str[-None-30-3]", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_str[None-None-0-0]", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_str_def[fake", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_str_def[-None-30-3]", "tests/unit/test_exceptions.py::TestReadTimeout::test_readtimeout_str_def[None-None-0-0]", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_initial_attrs[arg_names0-args0]", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_initial_attrs[arg_names0-args1]", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_initial_attrs[arg_names0-args2]", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_initial_attrs[arg_names1-args0]", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_initial_attrs[arg_names1-args1]", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_initial_attrs[arg_names1-args2]", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_repr[fake", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_repr[-None-3]", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_repr[None-None-0]", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_str[fake", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_str[-None-3]", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_str[None-None-0]", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_str_def[fake", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_str_def[-None-3]", "tests/unit/test_exceptions.py::TestRetriesExceeded::test_retriesexceeded_str_def[None-None-0]", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_initial_attrs[arg_names0-args0]", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_initial_attrs[arg_names0-args1]", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_initial_attrs[arg_names1-args0]", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_initial_attrs[arg_names1-args1]", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_repr[fake", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_repr[]", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_repr[None]", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_str[fake", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_str[]", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_str[None]", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_str_def[fake", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_str_def[]", "tests/unit/test_exceptions.py::TestClientAuthError::test_clientautherror_str_def[None]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_initial_attrs[arg_names0-args0]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_initial_attrs[arg_names0-args1]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_initial_attrs[arg_names0-args2]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_initial_attrs[arg_names1-args0]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_initial_attrs[arg_names1-args1]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_initial_attrs[arg_names1-args2]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_initial_attrs[arg_names2-args0]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_initial_attrs[arg_names2-args1]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_initial_attrs[arg_names2-args2]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_repr[fake", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_repr[-details1]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_repr[None-details2]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_str[fake", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_str[-details1]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_str[None-details2]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_str_def[fake", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_str_def[-details1]", "tests/unit/test_exceptions.py::TestServerAuthError::test_serverautherror_str_def[None-details2]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_initial_attrs[arg_names0-args0-42-7]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_initial_attrs[arg_names0-args1-None-None]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_initial_attrs[arg_names0-args2-None-None]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_initial_attrs[arg_names0-args3-None-None]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_initial_attrs[arg_names0-args4-None-None]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_initial_attrs[arg_names0-args5-None-None]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_initial_attrs[arg_names1-args0-42-7]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_initial_attrs[arg_names1-args1-None-None]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_initial_attrs[arg_names1-args2-None-None]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_initial_attrs[arg_names1-args3-None-None]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_initial_attrs[arg_names1-args4-None-None]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_initial_attrs[arg_names1-args5-None-None]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_repr[Bla:", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_repr[fake", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_repr[]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_repr[None]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_str[Bla:", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_str[fake", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_str[]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_str[None]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_str_def[Bla:", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_str_def[fake", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_str_def[]", "tests/unit/test_exceptions.py::TestParseError::test_parseerror_str_def[None]", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_initial_attrs[arg_names0-args0]", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_initial_attrs[arg_names0-args1]", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_initial_attrs[arg_names0-args2]", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_initial_attrs[arg_names1-args0]", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_initial_attrs[arg_names1-args1]", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_initial_attrs[arg_names1-args2]", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_repr[fake", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_repr[-min_api_version1-api_version1]", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_repr[None-min_api_version2-api_version2]", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_str[fake", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_str[-min_api_version1-api_version1]", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_str[None-min_api_version2-api_version2]", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_str_def[fake", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_str_def[-min_api_version1-api_version1]", "tests/unit/test_exceptions.py::TestVersionError::test_versionerror_str_def[None-min_api_version2-api_version2]", "tests/unit/test_exceptions.py::TestHTTPError::test_httperror_initial_attrs[arg_names0-args0]", "tests/unit/test_exceptions.py::TestHTTPError::test_httperror_initial_attrs[arg_names0-args1]", "tests/unit/test_exceptions.py::TestHTTPError::test_httperror_initial_attrs[arg_names0-args2]", "tests/unit/test_exceptions.py::TestHTTPError::test_httperror_initial_attrs[arg_names0-args3]", "tests/unit/test_exceptions.py::TestHTTPError::test_httperror_initial_attrs[arg_names1-args0]", "tests/unit/test_exceptions.py::TestHTTPError::test_httperror_initial_attrs[arg_names1-args1]", "tests/unit/test_exceptions.py::TestHTTPError::test_httperror_initial_attrs[arg_names1-args2]", "tests/unit/test_exceptions.py::TestHTTPError::test_httperror_initial_attrs[arg_names1-args3]", "tests/unit/test_exceptions.py::TestHTTPError::test_httperror_repr[body0]", "tests/unit/test_exceptions.py::TestHTTPError::test_httperror_str[body0]", "tests/unit/test_exceptions.py::TestHTTPError::test_httperror_str_def[body0]", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_initial_attrs[arg_names0-args0]", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_initial_attrs[arg_names0-args1]", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_initial_attrs[arg_names0-args2]", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_initial_attrs[arg_names1-args0]", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_initial_attrs[arg_names1-args1]", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_initial_attrs[arg_names1-args2]", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_repr[fake", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_repr[-3]", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_repr[None-0]", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_str[fake", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_str[-3]", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_str[None-0]", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_str_def[fake", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_str_def[-3]", "tests/unit/test_exceptions.py::TestOperationTimeout::test_operationtimeout_str_def[None-0]", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_initial_attrs[arg_names0-args0]", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_initial_attrs[arg_names0-args1]", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_initial_attrs[arg_names0-args2]", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_initial_attrs[arg_names1-args0]", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_initial_attrs[arg_names1-args1]", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_initial_attrs[arg_names1-args2]", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_repr[fake", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_repr[--desired_statuses1-3]", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_repr[None-None-desired_statuses2-0]", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_str[fake", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_str[--desired_statuses1-3]", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_str[None-None-desired_statuses2-0]", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_str_def[fake", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_str_def[--desired_statuses1-3]", "tests/unit/test_exceptions.py::TestStatusTimeout::test_statustimeout_str_def[None-None-desired_statuses2-0]", "tests/unit/test_exceptions.py::TestNoUniqueMatch::test_nouniquematch_initial_attrs[arg_names0-args0]", "tests/unit/test_exceptions.py::TestNoUniqueMatch::test_nouniquematch_initial_attrs[arg_names0-args1]", "tests/unit/test_exceptions.py::TestNoUniqueMatch::test_nouniquematch_initial_attrs[arg_names0-args2]", "tests/unit/test_exceptions.py::TestNoUniqueMatch::test_nouniquematch_initial_attrs[arg_names1-args0]", "tests/unit/test_exceptions.py::TestNoUniqueMatch::test_nouniquematch_initial_attrs[arg_names1-args1]", "tests/unit/test_exceptions.py::TestNoUniqueMatch::test_nouniquematch_initial_attrs[arg_names1-args2]", "tests/unit/test_exceptions.py::TestNoUniqueMatch::test_nouniquematch_repr[filter_args0]", "tests/unit/test_exceptions.py::TestNoUniqueMatch::test_nouniquematch_str[filter_args0]", "tests/unit/test_exceptions.py::TestNoUniqueMatch::test_nouniquematch_str_def[filter_args0]", "tests/unit/test_exceptions.py::TestNotFound::test_notfound_initial_attrs[arg_names0-args0]", "tests/unit/test_exceptions.py::TestNotFound::test_notfound_initial_attrs[arg_names0-args1]", "tests/unit/test_exceptions.py::TestNotFound::test_notfound_initial_attrs[arg_names0-args2]", "tests/unit/test_exceptions.py::TestNotFound::test_notfound_initial_attrs[arg_names1-args0]", "tests/unit/test_exceptions.py::TestNotFound::test_notfound_initial_attrs[arg_names1-args1]", "tests/unit/test_exceptions.py::TestNotFound::test_notfound_initial_attrs[arg_names1-args2]", "tests/unit/test_exceptions.py::TestNotFound::test_notfound_repr[filter_args0]", "tests/unit/test_exceptions.py::TestNotFound::test_notfound_str[filter_args0]", "tests/unit/test_exceptions.py::TestNotFound::test_notfound_str_def[filter_args0]", "tests/unit/test_hba.py::TestHba::test_hbamanager_initial_attrs", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_full_properties[full_properties_kwargs0-prop_names0]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_full_properties[full_properties_kwargs1-prop_names1]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_full_properties[full_properties_kwargs2-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args0-exp_oids0]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args1-exp_oids1]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args2-exp_oids2]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args3-exp_oids3]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args4-exp_oids4]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args5-exp_oids5]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args6-exp_oids6]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args7-exp_oids7]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args8-exp_oids8]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args9-exp_oids9]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args10-exp_oids10]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args11-exp_oids11]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args12-exp_oids12]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args13-exp_oids13]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args14-exp_oids14]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args15-exp_oids15]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args16-exp_oids16]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args17-exp_oids17]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args18-exp_oids18]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args19-exp_oids19]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args20-exp_oids20]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args21-exp_oids21]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args22-exp_oids22]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args23-exp_oids23]", "tests/unit/test_hba.py::TestHba::test_hbamanager_list_filter_args[filter_args24-exp_oids24]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props0-None-exp_prop_exc0-stopped-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props0-None-exp_prop_exc0-terminated-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props0-None-exp_prop_exc0-starting-exp_status_exc2]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props0-None-exp_prop_exc0-active-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props0-None-exp_prop_exc0-stopping-exp_status_exc4]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props0-None-exp_prop_exc0-degraded-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props0-None-exp_prop_exc0-reservation-error-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props0-None-exp_prop_exc0-paused-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props1-None-exp_prop_exc1-stopped-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props1-None-exp_prop_exc1-terminated-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props1-None-exp_prop_exc1-starting-exp_status_exc2]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props1-None-exp_prop_exc1-active-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props1-None-exp_prop_exc1-stopping-exp_status_exc4]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props1-None-exp_prop_exc1-degraded-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props1-None-exp_prop_exc1-reservation-error-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props1-None-exp_prop_exc1-paused-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props2-None-exp_prop_exc2-stopped-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props2-None-exp_prop_exc2-terminated-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props2-None-exp_prop_exc2-starting-exp_status_exc2]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props2-None-exp_prop_exc2-active-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props2-None-exp_prop_exc2-stopping-exp_status_exc4]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props2-None-exp_prop_exc2-degraded-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props2-None-exp_prop_exc2-reservation-error-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props2-None-exp_prop_exc2-paused-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props3-exp_prop_names3-None-stopped-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props3-exp_prop_names3-None-terminated-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props3-exp_prop_names3-None-starting-exp_status_exc2]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props3-exp_prop_names3-None-active-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props3-exp_prop_names3-None-stopping-exp_status_exc4]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props3-exp_prop_names3-None-degraded-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props3-exp_prop_names3-None-reservation-error-None]", "tests/unit/test_hba.py::TestHba::test_hbamanager_create[input_props3-exp_prop_names3-None-paused-None]", "tests/unit/test_hba.py::TestHba::test_hba_repr", "tests/unit/test_hba.py::TestHba::test_hba_delete[stopped-None]", "tests/unit/test_hba.py::TestHba::test_hba_delete[terminated-None]", "tests/unit/test_hba.py::TestHba::test_hba_delete[starting-exp_exc2]", "tests/unit/test_hba.py::TestHba::test_hba_delete[active-None]", "tests/unit/test_hba.py::TestHba::test_hba_delete[stopping-exp_exc4]", "tests/unit/test_hba.py::TestHba::test_hba_delete[degraded-None]", "tests/unit/test_hba.py::TestHba::test_hba_delete[reservation-error-None]", "tests/unit/test_hba.py::TestHba::test_hba_delete[paused-None]", "tests/unit/test_hba.py::TestHba::test_hba_delete_create_same_name", "tests/unit/test_hba.py::TestHba::test_hba_update_properties[input_props0]", "tests/unit/test_hba.py::TestHba::test_hba_update_properties[input_props1]", "tests/unit/test_hba.py::TestHba::test_hba_update_properties[input_props2]", "tests/unit/test_hba.py::TestHba::test_hba_update_name", "tests/unit/test_hba.py::TestHba::test_hba_reassign_port[stopped-None]", "tests/unit/test_hba.py::TestHba::test_hba_reassign_port[terminated-None]", "tests/unit/test_hba.py::TestHba::test_hba_reassign_port[starting-exp_exc2]", "tests/unit/test_hba.py::TestHba::test_hba_reassign_port[active-None]", "tests/unit/test_hba.py::TestHba::test_hba_reassign_port[stopping-exp_exc4]", "tests/unit/test_hba.py::TestHba::test_hba_reassign_port[degraded-None]", "tests/unit/test_hba.py::TestHba::test_hba_reassign_port[reservation-error-None]", "tests/unit/test_hba.py::TestHba::test_hba_reassign_port[paused-None]", "tests/unit/test_logging.py::TestLoggingDecorator::test_1a_global_from_global", "tests/unit/test_logging.py::TestLoggingDecorator::test_1b_global_from_method", "tests/unit/test_logging.py::TestLoggingDecorator::test_2a_global_inner1_from_global", "tests/unit/test_logging.py::TestLoggingDecorator::test_2b_global_inner1_from_method", "tests/unit/test_logging.py::TestLoggingDecorator::test_3a_global_inner2_from_global", "tests/unit/test_logging.py::TestLoggingDecorator::test_3b_global_inner1_from_method", "tests/unit/test_logging.py::TestLoggingDecorator::test_4a_method_from_global", "tests/unit/test_logging.py::TestLoggingDecorator::test_4b_method_from_method", "tests/unit/test_logging.py::TestLoggingDecorator::test_5a_method_from_global", "tests/unit/test_logging.py::TestLoggingDecorator::test_5b_method_from_method", "tests/unit/test_logging.py::TestLoggingDecorator::test_decorated_class", "tests/unit/test_logging.py::TestLoggingDecorator::test_decorated_property", "tests/unit/test_logging.py::TestGetLogger::test_root_logger", "tests/unit/test_logging.py::TestGetLogger::test_foo_logger", "tests/unit/test_lpar.py::TestLpar::test_init", "tests/unit/test_lpar.py::TestLpar::test_list_short_ok", "tests/unit/test_lpar.py::TestLpar::test_list_full_ok", "tests/unit/test_lpar.py::TestLpar::test_activate", "tests/unit/test_lpar.py::TestLpar::test_deactivate", "tests/unit/test_lpar.py::TestLpar::test_load", "tests/unit/test_manager.py::TestManager1::test_repr", "tests/unit/test_manager.py::TestManager1::test_init_properties", "tests/unit/test_manager.py::TestManager1::test_invalidate_cache", "tests/unit/test_manager.py::TestManager1::test_flush", "tests/unit/test_manager.py::TestManager1::test_list_not_implemented", "tests/unit/test_manager.py::TestManager2::test_findall_name_none", "tests/unit/test_manager.py::TestManager2::test_findall_name_one", "tests/unit/test_manager.py::TestManager2::test_findall_str_none", "tests/unit/test_manager.py::TestManager2::test_findall_str_one", "tests/unit/test_manager.py::TestManager2::test_findall_str_one_and", "tests/unit/test_manager.py::TestManager2::test_findall_str_two", "tests/unit/test_manager.py::TestManager2::test_findall_str_two_or", "tests/unit/test_manager.py::TestManager2::test_findall_int_none", "tests/unit/test_manager.py::TestManager2::test_findall_int_one", "tests/unit/test_manager.py::TestManager2::test_findall_int_two", "tests/unit/test_manager.py::TestManager2::test_find_name_none", "tests/unit/test_manager.py::TestManager2::test_find_name_one", "tests/unit/test_manager.py::TestManager2::test_find_str_none", "tests/unit/test_manager.py::TestManager2::test_find_str_one", "tests/unit/test_manager.py::TestManager2::test_find_str_two", "tests/unit/test_manager.py::TestManager2::test_find_int_none", "tests/unit/test_manager.py::TestManager2::test_find_int_one", "tests/unit/test_manager.py::TestManager2::test_find_int_two", "tests/unit/test_manager.py::TestManager2::test_find_by_name_none", "tests/unit/test_manager.py::TestManager2::test_find_by_name_one", "tests/unit/test_manager.py::TestNameUriCache::test_initial", "tests/unit/test_manager.py::TestNameUriCache::test_get_no_invalidate", "tests/unit/test_manager.py::TestNameUriCache::test_get_non_existing", "tests/unit/test_manager.py::TestNameUriCache::test_get_auto_invalidate", "tests/unit/test_manager.py::TestNameUriCache::test_get_manual_invalidate", "tests/unit/test_manager.py::TestNameUriCache::test_refresh_empty", "tests/unit/test_manager.py::TestNameUriCache::test_refresh_populated", "tests/unit/test_manager.py::TestNameUriCache::test_delete_existing", "tests/unit/test_manager.py::TestNameUriCache::test_delete_non_existing", "tests/unit/test_manager.py::TestNameUriCache::test_delete_none", "tests/unit/test_manager.py::TestNameUriCache::test_update_from_empty", "tests/unit/test_manager.py::TestNameUriCache::test_update_from_populated_modify_name", "tests/unit/test_manager.py::TestNameUriCache::test_update_empty", "tests/unit/test_manager.py::TestNameUriCache::test_update_empty_empty", "tests/unit/test_manager.py::TestNameUriCache::test_update_empty_none", "tests/unit/test_manager.py::TestNameUriCache::test_update_populated_new", "tests/unit/test_manager.py::TestNameUriCache::test_update_populated_modify", "tests/unit/test_nic.py::TestNic::test_nicmanager_initial_attrs", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_full_properties[full_properties_kwargs0-prop_names0]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_full_properties[full_properties_kwargs1-prop_names1]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_full_properties[full_properties_kwargs2-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args0-exp_oids0]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args1-exp_oids1]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args2-exp_oids2]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args3-exp_oids3]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args4-exp_oids4]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args5-exp_oids5]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args6-exp_oids6]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args7-exp_oids7]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args8-exp_oids8]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args9-exp_oids9]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args10-exp_oids10]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args11-exp_oids11]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args12-exp_oids12]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args13-exp_oids13]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args14-exp_oids14]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args15-exp_oids15]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args16-exp_oids16]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args17-exp_oids17]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args18-exp_oids18]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args19-exp_oids19]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args20-exp_oids20]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args21-exp_oids21]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args22-exp_oids22]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args23-exp_oids23]", "tests/unit/test_nic.py::TestNic::test_nicmanager_list_filter_args[filter_args24-exp_oids24]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props0-None-exp_prop_exc0-stopped-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props0-None-exp_prop_exc0-terminated-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props0-None-exp_prop_exc0-starting-exp_status_exc2]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props0-None-exp_prop_exc0-active-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props0-None-exp_prop_exc0-stopping-exp_status_exc4]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props0-None-exp_prop_exc0-degraded-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props0-None-exp_prop_exc0-reservation-error-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props0-None-exp_prop_exc0-paused-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props1-None-exp_prop_exc1-stopped-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props1-None-exp_prop_exc1-terminated-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props1-None-exp_prop_exc1-starting-exp_status_exc2]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props1-None-exp_prop_exc1-active-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props1-None-exp_prop_exc1-stopping-exp_status_exc4]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props1-None-exp_prop_exc1-degraded-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props1-None-exp_prop_exc1-reservation-error-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props1-None-exp_prop_exc1-paused-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props2-None-exp_prop_exc2-stopped-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props2-None-exp_prop_exc2-terminated-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props2-None-exp_prop_exc2-starting-exp_status_exc2]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props2-None-exp_prop_exc2-active-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props2-None-exp_prop_exc2-stopping-exp_status_exc4]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props2-None-exp_prop_exc2-degraded-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props2-None-exp_prop_exc2-reservation-error-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props2-None-exp_prop_exc2-paused-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props3-None-exp_prop_exc3-stopped-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props3-None-exp_prop_exc3-terminated-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props3-None-exp_prop_exc3-starting-exp_status_exc2]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props3-None-exp_prop_exc3-active-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props3-None-exp_prop_exc3-stopping-exp_status_exc4]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props3-None-exp_prop_exc3-degraded-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props3-None-exp_prop_exc3-reservation-error-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props3-None-exp_prop_exc3-paused-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props4-exp_prop_names4-None-stopped-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props4-exp_prop_names4-None-terminated-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props4-exp_prop_names4-None-starting-exp_status_exc2]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props4-exp_prop_names4-None-active-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props4-exp_prop_names4-None-stopping-exp_status_exc4]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props4-exp_prop_names4-None-degraded-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props4-exp_prop_names4-None-reservation-error-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props4-exp_prop_names4-None-paused-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props5-exp_prop_names5-None-stopped-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props5-exp_prop_names5-None-terminated-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props5-exp_prop_names5-None-starting-exp_status_exc2]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props5-exp_prop_names5-None-active-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props5-exp_prop_names5-None-stopping-exp_status_exc4]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props5-exp_prop_names5-None-degraded-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props5-exp_prop_names5-None-reservation-error-None]", "tests/unit/test_nic.py::TestNic::test_nicmanager_create[input_props5-exp_prop_names5-None-paused-None]", "tests/unit/test_nic.py::TestNic::test_nic_repr", "tests/unit/test_nic.py::TestNic::test_nic_delete[stopped-None]", "tests/unit/test_nic.py::TestNic::test_nic_delete[terminated-None]", "tests/unit/test_nic.py::TestNic::test_nic_delete[starting-exp_exc2]", "tests/unit/test_nic.py::TestNic::test_nic_delete[active-None]", "tests/unit/test_nic.py::TestNic::test_nic_delete[stopping-exp_exc4]", "tests/unit/test_nic.py::TestNic::test_nic_delete[degraded-None]", "tests/unit/test_nic.py::TestNic::test_nic_delete[reservation-error-None]", "tests/unit/test_nic.py::TestNic::test_nic_delete[paused-None]", "tests/unit/test_nic.py::TestNic::test_nic_delete_create_same_name", "tests/unit/test_nic.py::TestNic::test_nic_update_properties[input_props0]", "tests/unit/test_nic.py::TestNic::test_nic_update_properties[input_props1]", "tests/unit/test_nic.py::TestNic::test_nic_update_properties[input_props2]", "tests/unit/test_nic.py::TestNic::test_nic_update_name", "tests/unit/test_nic.py::TestNic::test_nic_object", "tests/unit/test_notification.py::TestNotification::test_no_messages", "tests/unit/test_notification.py::TestNotification::test_one_message", "tests/unit/test_port.py::TestPort::test_init", "tests/unit/test_port.py::TestPort::test_list_short_ok", "tests/unit/test_port.py::TestPort::test_list_full_ok", "tests/unit/test_port.py::TestPort::test_list_filter_name_ok", "tests/unit/test_port.py::TestPort::test_list_filter_elementid_ok", "tests/unit/test_port.py::TestPort::test_update_properties", "tests/unit/test_resource.py::TestInit::test_empty_name", "tests/unit/test_resource.py::TestInit::test_empty_no_name", "tests/unit/test_resource.py::TestInit::test_simple", "tests/unit/test_resource.py::TestInit::test_prop_case", "tests/unit/test_resource.py::TestInit::test_invalid_type", "tests/unit/test_resource.py::TestInit::test_str", "tests/unit/test_resource.py::TestInit::test_repr", "tests/unit/test_resource.py::TestPropertySet::test_add_to_empty", "tests/unit/test_resource.py::TestPropertySet::test_replace_one_add_one", "tests/unit/test_resource.py::TestPropertyDel::test_del_one", "tests/unit/test_resource.py::TestPropertyDel::test_del_all_input", "tests/unit/test_resource.py::TestPropertyDel::test_del_invalid", "tests/unit/test_resource.py::TestPropertyDel::test_clear", "tests/unit/test_resource.py::TestManagerDivideFilter::test_none", "tests/unit/test_resource.py::TestManagerDivideFilter::test_empty", "tests/unit/test_resource.py::TestManagerDivideFilter::test_one_string_qp", "tests/unit/test_resource.py::TestManagerDivideFilter::test_one_string_cf", "tests/unit/test_resource.py::TestManagerDivideFilter::test_one_integer_qp", "tests/unit/test_resource.py::TestManagerDivideFilter::test_one_integer_cf", "tests/unit/test_resource.py::TestManagerDivideFilter::test_one_str_reserved_val_qp", "tests/unit/test_resource.py::TestManagerDivideFilter::test_one_str_reserved_val_cf", "tests/unit/test_resource.py::TestManagerDivideFilter::test_one_str_dash_name_qp", "tests/unit/test_resource.py::TestManagerDivideFilter::test_one_str_reserved_name_qp", "tests/unit/test_resource.py::TestManagerDivideFilter::test_two_qp", "tests/unit/test_resource.py::TestManagerDivideFilter::test_two_qp_cf", "tests/unit/test_resource.py::TestManagerDivideFilter::test_two_cf_qp", "tests/unit/test_resource.py::TestManagerDivideFilter::test_two_two_qp", "tests/unit/test_resource.py::TestManagerDivideFilter::test_two_str_reserved_val_qp", "tests/unit/test_session.py::TestSession::test_init[fake-host-None-None-False-None]", "tests/unit/test_session.py::TestSession::test_init[fake-host-fake-userid-None-False-None]", "tests/unit/test_session.py::TestSession::test_init[fake-host-fake-userid-fake-pw-False-None]", "tests/unit/test_session.py::TestSession::test_init[fake-host-fake-userid-fake-pw-True-None]", "tests/unit/test_session.py::TestSession::test_repr", "tests/unit/test_session.py::TestSession::test_logon[fake-host-None-None-False-ClientAuthError]", "tests/unit/test_session.py::TestSession::test_logon[fake-host-fake-userid-None-False-ClientAuthError]", "tests/unit/test_session.py::TestSession::test_logon[fake-host-fake-userid-fake-pw-False-None]", "tests/unit/test_session.py::TestSession::test_logoff", "tests/unit/test_session.py::TestSession::test_logon_error_invalid_delim", "tests/unit/test_session.py::TestSession::test_logon_error_invalid_quotes", "tests/unit/test_session.py::TestSession::test_logon_error_extra_closing", "tests/unit/test_session.py::TestSession::test_get_notification_topics", "tests/unit/test_session.py::TestJob::test_init", "tests/unit/test_session.py::TestJob::test_check_incomplete", "tests/unit/test_session.py::TestJob::test_check_complete_success_noresult", "tests/unit/test_session.py::TestJob::test_check_complete_success_result", "tests/unit/test_session.py::TestJob::test_check_complete_error1", "tests/unit/test_session.py::TestJob::test_check_complete_error2", "tests/unit/test_session.py::TestJob::test_check_complete_error3", "tests/unit/test_session.py::TestJob::test_check_complete_error4", "tests/unit/test_session.py::TestJob::test_wait_complete1_success_result", "tests/unit/test_session.py::TestJob::test_wait_complete3_success_result", "tests/unit/test_session.py::TestJob::test_wait_complete3_timeout", "tests/unit/test_timestats.py::TestTimeStats::test_enabling", "tests/unit/test_timestats.py::TestTimeStats::test_get", "tests/unit/test_timestats.py::TestTimeStats::test_measure_enabled", "tests/unit/test_timestats.py::TestTimeStats::test_measure_disabled", "tests/unit/test_timestats.py::TestTimeStats::test_snapshot", "tests/unit/test_timestats.py::TestTimeStats::test_measure_avg_min_max", "tests/unit/test_timestats.py::TestTimeStats::test_only_end", "tests/unit/test_timestats.py::TestTimeStats::test_end_after_end", "tests/unit/test_timestats.py::TestTimeStats::test_str_empty", "tests/unit/test_timestats.py::TestTimeStats::test_str_disabled", "tests/unit/test_timestats.py::TestTimeStats::test_str_one", "tests/unit/test_timestats.py::TestTimeStats::test_ts_str", "tests/unit/test_utils.py::TestPythonDatetime::test_gmtime_epoch", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple0-0]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple1-123]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple2-123456]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple3-123456789]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple4-123456789123]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple5-951782400000]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple6-951868800000]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple7-983318400000]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple8-983404800000]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple9-1502755200000]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple10-2147483647000]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple11-2147483647001]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple12-32535215999999]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple13-32535244799000]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple14-32535244800000]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple15-253370678400000]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple16-253402128000000]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_success_datetime_from_timestamp[datetime_tuple17-253402214399000]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_error_datetime_from_timestamp[None-ValueError]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_error_datetime_from_timestamp[-1-ValueError]", "tests/unit/test_utils.py::TestDatetimeFromTimestamp::test_error_datetime_from_timestamp[253402300800000-ValueError]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple0-0-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple0-0-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple0-0-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple0-0-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple1-123-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple1-123-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple1-123-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple1-123-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple2-123456-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple2-123456-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple2-123456-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple2-123456-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple3-123456789-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple3-123456789-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple3-123456789-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple3-123456789-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple4-123456789123-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple4-123456789123-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple4-123456789123-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple4-123456789123-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple5-951782400000-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple5-951782400000-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple5-951782400000-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple5-951782400000-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple6-951868800000-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple6-951868800000-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple6-951868800000-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple6-951868800000-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple7-983318400000-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple7-983318400000-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple7-983318400000-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple7-983318400000-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple8-983404800000-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple8-983404800000-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple8-983404800000-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple8-983404800000-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple9-1502755200000-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple9-1502755200000-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple9-1502755200000-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple9-1502755200000-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple10-2147483647000-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple10-2147483647000-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple10-2147483647000-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple10-2147483647000-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple11-2147483647001-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple11-2147483647001-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple11-2147483647001-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple11-2147483647001-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple12-32535215999999-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple12-32535215999999-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple12-32535215999999-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple12-32535215999999-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple13-32535244799000-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple13-32535244799000-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple13-32535244799000-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple13-32535244799000-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple14-32535244800000-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple14-32535244800000-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple14-32535244800000-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple14-32535244800000-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple15-253370678400000-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple15-253370678400000-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple15-253370678400000-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple15-253370678400000-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple16-253402128000000-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple16-253402128000000-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple16-253402128000000-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple16-253402128000000-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple17-253402214399000-None]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple17-253402214399000-UTC]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple17-253402214399000-US/Eastern]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_success[datetime_tuple17-253402214399000-Europe/Berlin]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_error[None-ValueError]", "tests/unit/test_utils.py::TestTimestampFromDatetime::test_datetime_max", "tests/unit/test_virtual_function.py::TestVirtualFunction::test_init", "tests/unit/test_virtual_function.py::TestVirtualFunction::test_list_short_ok", "tests/unit/test_virtual_function.py::TestVirtualFunction::test_list_full_ok", "tests/unit/test_virtual_function.py::TestVirtualFunction::test_list_filter_name_ok", "tests/unit/test_virtual_function.py::TestVirtualFunction::test_list_filter_elementid_ok", "tests/unit/test_virtual_function.py::TestVirtualFunction::test_create", "tests/unit/test_virtual_function.py::TestVirtualFunction::test_delete", "tests/unit/test_virtual_function.py::TestVirtualFunction::test_update_properties", "tests/unit/test_virtual_switch.py::TestVirtualSwitch::test_init", "tests/unit/test_virtual_switch.py::TestVirtualSwitch::test_list_short_ok", "tests/unit/test_virtual_switch.py::TestVirtualSwitch::test_list_full_ok", "tests/unit/test_virtual_switch.py::TestVirtualSwitch::test_update_properties", "tests/unit/test_virtual_switch.py::TestVirtualSwitch::test_get_connected_nics", "tests/unit/zhmcclient_mock/test_example.py::TestMy::test_session_1", "tests/unit/zhmcclient_mock/test_example.py::TestMy::test_session_2", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedHmc::test_repr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedHmc::test_hmc", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedHmc::test_hmc_1_cpc", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedHmc::test_hmc_2_cpcs", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedHmc::test_res_dict", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedBase::test_resource_repr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedBase::test_manager_repr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedBase::test_manager_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedBase::test_resource_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedActivationProfile::test_profiles_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedActivationProfile::test_profiles_list", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedActivationProfile::test_profiles_add", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedActivationProfile::test_profiles_remove", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedAdapter::test_adapter_repr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedAdapter::test_adapters_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedAdapter::test_adapters_list", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedAdapter::test_adapters_add", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedAdapter::test_adapters_remove", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedCpc::test_cpc_repr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedCpc::test_cpcs_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedCpc::test_cpcs_list", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedCpc::test_cpcs_add", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedCpc::test_cpcs_remove", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedHba::test_hbas_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedHba::test_hbas_list", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedHba::test_hbas_add", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedHba::test_hbas_remove", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedLpar::test_lpars_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedLpar::test_lpars_list", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedLpar::test_lpars_add", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedLpar::test_lpars_remove", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedNic::test_nics_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedNic::test_nics_list", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedNic::test_nics_add", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedNic::test_nics_remove", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedPartition::test_partition_repr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedPartition::test_partitions_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedPartition::test_partitions_list", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedPartition::test_partitions_add", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedPartition::test_partitions_remove", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedPort::test_ports_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedPort::test_ports_list", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedPort::test_ports_add", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedPort::test_ports_remove", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedVirtualFunction::test_virtual_functions_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedVirtualFunction::test_virtual_functions_list", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedVirtualFunction::test_virtual_functions_add", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedVirtualFunction::test_virtual_functions_remove", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedVirtualSwitch::test_virtual_switches_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedVirtualSwitch::test_virtual_switches_list", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedVirtualSwitch::test_virtual_switches_add", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedVirtualSwitch::test_virtual_switches_remove", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedMetricsContext::test_metrics_contexts_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedMetricsContext::test_metrics_contexts_add", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedMetricsContext::test_metrics_contexts_add_get_mg_def", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedMetricsContext::test_metrics_contexts_add_get_metric_values", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedMetricsContext::test_metrics_context_get_mg_defs", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedMetricsContext::test_metrics_context_get_mg_infos", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedMetricsContext::test_metrics_context_get_m_values", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedMetricsContext::test_metrics_context_get_m_values_response", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedMetricGroupDefinition::test_metric_group_definition_attr", "tests/unit/zhmcclient_mock/test_hmc.py::TestFakedMetricObjectValues::test_metric_object_values_attr", "tests/unit/zhmcclient_mock/test_idpool.py::TestIdPool::test_init_error_1", "tests/unit/zhmcclient_mock/test_idpool.py::TestIdPool::test_invalid_free_error_1", "tests/unit/zhmcclient_mock/test_idpool.py::TestIdPool::test_invalid_free_error_2", "tests/unit/zhmcclient_mock/test_idpool.py::TestIdPool::test_all", "tests/unit/zhmcclient_mock/test_urihandler.py::TestHTTPError::test_attributes", "tests/unit/zhmcclient_mock/test_urihandler.py::TestHTTPError::test_response", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConnectionError::test_attributes", "tests/unit/zhmcclient_mock/test_urihandler.py::TestInvalidResourceError::test_attributes_with_handler", "tests/unit/zhmcclient_mock/test_urihandler.py::TestInvalidResourceError::test_attributes_no_handler", "tests/unit/zhmcclient_mock/test_urihandler.py::TestInvalidMethodError::test_attributes_with_handler", "tests/unit/zhmcclient_mock/test_urihandler.py::TestInvalidMethodError::test_attributes_no_handler", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcNotInDpmError::test_attributes", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcInDpmError::test_attributes", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_none", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_empty", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_one_normal", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_two_normal", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_one_trailing_amp", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_one_leading_amp", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_one_missing_value", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_one_missing_name", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_two_same_normal", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_two_same_one_normal", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_space_value_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_space_value_2", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_space_value_3", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_space_value_4", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_space_name_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_space_name_2", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_space_name_3", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_space_name_4", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_invalid_format_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_invalid_format_2", "tests/unit/zhmcclient_mock/test_urihandler.py::TestParseQueryParms::test_invalid_format_3", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerEmpty::test_uris_empty_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerEmpty::test_uris_empty_2", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerSimple::test_ok1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerSimple::test_ok2", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerSimple::test_ok3", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerSimple::test_err_begin_missing", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerSimple::test_err_begin_extra", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerSimple::test_err_end_missing", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerSimple::test_err_end_extra", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerSimple::test_err_end_slash", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerSimple::test_err_end2_slash", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerSimple::test_err_end2_missing", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerHandlerSimple::test_err_end2_extra", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerMethod::test_get_cpcs", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerMethod::test_get_cpc1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerMethod::test_post_cpcs", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUriHandlerMethod::test_delete_cpc2", "tests/unit/zhmcclient_mock/test_urihandler.py::TestGenericGetPropertiesHandler::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestGenericGetPropertiesHandler::test_get_error_offline", "tests/unit/zhmcclient_mock/test_urihandler.py::TestGenericUpdatePropertiesHandler::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestGenericUpdatePropertiesHandler::test_post_error_offline", "tests/unit/zhmcclient_mock/test_urihandler.py::TestGenericDeleteHandler::test_delete", "tests/unit/zhmcclient_mock/test_urihandler.py::TestGenericDeleteHandler::test_delete_error_offline", "tests/unit/zhmcclient_mock/test_urihandler.py::TestVersionHandler::test_get_version", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleHandler::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleRestartHandler::test_restart_success", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleRestartHandler::test_restart_error_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleShutdownHandler::test_shutdown_success", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleShutdownHandler::test_shutdown_error_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleMakePrimaryHandler::test_make_primary_success", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleMakePrimaryHandler::test_make_primary_error_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleReorderUserPatternsHandler::test_reorder_all", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleReorderUserPatternsHandler::test_reorder_error_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleGetAuditLogHandler::test_get_audit_log_success", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleGetAuditLogHandler::test_get_audit_log_error_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleGetSecurityLogHandler::test_get_security_log_success_empty", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleGetSecurityLogHandler::test_get_security_log_error_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleListUnmanagedCpcsHandler::test_list_success_empty", "tests/unit/zhmcclient_mock/test_urihandler.py::TestConsoleListUnmanagedCpcsHandler::test_list_error_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserHandlers::test_list_error_console_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserHandlers::test_create_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserHandlers::test_create_error_console_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserHandlers::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserHandlers::test_delete_verify_all", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserAddUserRoleHandler::test_add_success", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserAddUserRoleHandler::test_add_error_bad_user", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserAddUserRoleHandler::test_add_error_bad_user_role", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRemoveUserRoleHandler::test_remove_success", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRemoveUserRoleHandler::test_remove_error_bad_user", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRemoveUserRoleHandler::test_remove_error_bad_user_role", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRemoveUserRoleHandler::test_remove_error_no_user_role", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleHandlers::test_list_error_console_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleHandlers::test_create_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleHandlers::test_create_error_console_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleHandlers::test_create_error_type", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleHandlers::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleHandlers::test_delete_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleAddPermissionHandler::test_add_all", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleAddPermissionHandler::test_add_error_bad_user_role", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleAddPermissionHandler::test_add_error_system_user_role", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleRemovePermissionHandler::test_remove_all", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleRemovePermissionHandler::test_remove_error_bad_user_role", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserRoleRemovePermissionHandler::test_remove_error_system_user_role", "tests/unit/zhmcclient_mock/test_urihandler.py::TestTaskHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestTaskHandlers::test_list_error_console_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestTaskHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserPatternHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserPatternHandlers::test_list_error_console_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserPatternHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserPatternHandlers::test_create_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserPatternHandlers::test_create_error_console_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserPatternHandlers::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestUserPatternHandlers::test_delete_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPasswordRuleHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPasswordRuleHandlers::test_list_error_console_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPasswordRuleHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPasswordRuleHandlers::test_create_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPasswordRuleHandlers::test_create_error_console_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPasswordRuleHandlers::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPasswordRuleHandlers::test_delete_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestLdapServerDefinitionHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestLdapServerDefinitionHandlers::test_list_error_console_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestLdapServerDefinitionHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestLdapServerDefinitionHandlers::test_create_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestLdapServerDefinitionHandlers::test_create_error_console_not_found", "tests/unit/zhmcclient_mock/test_urihandler.py::TestLdapServerDefinitionHandlers::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestLdapServerDefinitionHandlers::test_delete_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcHandlers::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcStartStopHandler::test_stop_classic", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcStartStopHandler::test_start_classic", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcStartStopHandler::test_stop_start_dpm", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcExportPortNamesListHandler::test_invoke_err_no_input", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcExportPortNamesListHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcImportProfilesHandler::test_invoke_err_no_input", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcImportProfilesHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcExportProfilesHandler::test_invoke_err_no_input", "tests/unit/zhmcclient_mock/test_urihandler.py::TestCpcExportProfilesHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestMetricsContextHandlers::test_create_get_delete_context", "tests/unit/zhmcclient_mock/test_urihandler.py::TestAdapterHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestAdapterHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestAdapterHandlers::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestAdapterChangeCryptoTypeHandler::test_invoke_err_no_body", "tests/unit/zhmcclient_mock/test_urihandler.py::TestAdapterChangeCryptoTypeHandler::test_invoke_err_no_crypto_type_field", "tests/unit/zhmcclient_mock/test_urihandler.py::TestAdapterChangeCryptoTypeHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestNetworkPortHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestNetworkPortHandlers::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestStoragePortHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestStoragePortHandlers::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionHandlers::test_create_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionHandlers::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionHandlers::test_delete_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionStartStopHandler::test_start_stop", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionScsiDumpHandler::test_invoke_err_no_body", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionScsiDumpHandler::test_invoke_err_missing_fields_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionScsiDumpHandler::test_invoke_err_missing_fields_2", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionScsiDumpHandler::test_invoke_err_missing_fields_3", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionScsiDumpHandler::test_invoke_err_status_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionScsiDumpHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionPswRestartHandler::test_invoke_err_status_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionPswRestartHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionMountIsoImageHandler::test_invoke_err_queryparm_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionMountIsoImageHandler::test_invoke_err_queryparm_2", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionMountIsoImageHandler::test_invoke_err_status_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionMountIsoImageHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionUnmountIsoImageHandler::test_invoke_err_status_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionUnmountIsoImageHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionIncreaseCryptoConfigHandler::test_invoke_err_missing_body", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionIncreaseCryptoConfigHandler::test_invoke_err_status_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionIncreaseCryptoConfigHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionDecreaseCryptoConfigHandler::test_invoke_err_missing_body", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionDecreaseCryptoConfigHandler::test_invoke_err_status_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionDecreaseCryptoConfigHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionChangeCryptoConfigHandler::test_invoke_err_missing_body", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionChangeCryptoConfigHandler::test_invoke_err_missing_field_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionChangeCryptoConfigHandler::test_invoke_err_missing_field_2", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionChangeCryptoConfigHandler::test_invoke_err_status_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestPartitionChangeCryptoConfigHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestHbaHandler::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestHbaHandler::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestHbaHandler::test_create_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestHbaHandler::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestHbaHandler::test_delete_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestHbaReassignPortHandler::test_invoke_err_missing_body", "tests/unit/zhmcclient_mock/test_urihandler.py::TestHbaReassignPortHandler::test_invoke_err_missing_field_1", "tests/unit/zhmcclient_mock/test_urihandler.py::TestHbaReassignPortHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestNicHandler::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestNicHandler::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestNicHandler::test_create_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestNicHandler::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestNicHandler::test_delete_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestVirtualFunctionHandler::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestVirtualFunctionHandler::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestVirtualFunctionHandler::test_create_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestVirtualFunctionHandler::test_update_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestVirtualFunctionHandler::test_delete_verify", "tests/unit/zhmcclient_mock/test_urihandler.py::TestVirtualSwitchHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestVirtualSwitchHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestVirtualSwitchGetVnicsHandler::test_invoke_ok", "tests/unit/zhmcclient_mock/test_urihandler.py::TestLparHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestLparHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestLparActLoadDeactHandler::test_start_stop", "tests/unit/zhmcclient_mock/test_urihandler.py::TestResetActProfileHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestResetActProfileHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestImageActProfileHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestImageActProfileHandlers::test_get", "tests/unit/zhmcclient_mock/test_urihandler.py::TestLoadActProfileHandlers::test_list", "tests/unit/zhmcclient_mock/test_urihandler.py::TestLoadActProfileHandlers::test_get" ]
[]
Apache License 2.0
1,748
[ "Makefile", "zhmcclient/_session.py", "docs/changes.rst", ".travis.yml", "appveyor.yml" ]
[ "Makefile", "zhmcclient/_session.py", "docs/changes.rst", ".travis.yml", "appveyor.yml" ]
CartoDB__cartoframes-241
a694cfa6e9f7ff39954ef5045649eb2518632338
2017-10-10 19:24:12
39c14bf3ca697c536823d53d6179fb2ce3bae4b9
diff --git a/cartoframes/context.py b/cartoframes/context.py index e5885a97..c88bee6f 100644 --- a/cartoframes/context.py +++ b/cartoframes/context.py @@ -18,7 +18,7 @@ from carto.sql import SQLClient, BatchSQLClient from carto.exceptions import CartoException from .credentials import Credentials -from .utils import dict_items, normalize_colnames, norm_colname +from .utils import dict_items, normalize_colnames, norm_colname, join_url from .layer import BaseMap from .maps import non_basemap_layers, get_map_name, get_map_template @@ -217,7 +217,7 @@ class CartoContext(object): 'minutes.\n' '\033[1mNote:\033[0m `CartoContext.map` will not work on ' 'this table until its geometries are created.'.format( - table_url='/'.join((self.creds.base_url(), + table_url=join_url((self.creds.base_url(), 'dataset', final_table_name, )), job_id=status.get('job_id'), @@ -227,7 +227,7 @@ class CartoContext(object): self.sql_client.send(query) tqdm.write('Table successfully written to CARTO: {table_url}'.format( - table_url='/'.join((self.creds.base_url(), + table_url=join_url((self.creds.base_url(), 'dataset', final_table_name, )))) @@ -679,7 +679,7 @@ class CartoContext(object): elif not base_layers: # default basemap is dark with labels in back # labels will be changed if all geoms are non-point - layers.insert(0, BaseMap(source='dark', labels='back')) + layers.insert(0, BaseMap()) geoms = set() # Setup layers @@ -734,7 +734,7 @@ class CartoContext(object): options.update(self._get_bounds(nb_layers)) map_name = self._send_map_template(layers, has_zoom=has_zoom) - api_url = '/'.join((self.creds.base_url(), 'api/v1/map', )) + api_url = join_url((self.creds.base_url(), 'api/v1/map', )) static_url = ('{api_url}/static/named/{map_name}' '/{width}/{height}.png?{params}').format( diff --git a/cartoframes/layer.py b/cartoframes/layer.py index 821390a1..6ae19117 100644 --- a/cartoframes/layer.py +++ b/cartoframes/layer.py @@ -6,7 +6,7 @@ for example usage. import pandas as pd import webcolors -from cartoframes.utils import cssify +from cartoframes.utils import cssify, join_url from cartoframes.styling import BinMethod, mint, antique, get_scheme_cartocss # colors map data layers without color specified @@ -53,21 +53,30 @@ class BaseMap(AbstractLayer): """ is_basemap = True - def __init__(self, source='dark', labels='back', only_labels=False): + def __init__(self, source='voyager', labels='back', only_labels=False): if labels not in ('front', 'back', None): raise ValueError("labels must be None, 'front', or 'back'") self.source = source self.labels = labels + stem = 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/' + if source == 'voyager': + stem += 'rastertiles' if self.is_basic(): if only_labels: style = source + '_only_labels' else: - style = source + ('_all' if labels == 'back' else '_nolabels') - - self.url = ('https://cartodb-basemaps-{{s}}.global.ssl.fastly.net/' - '{style}/{{z}}/{{x}}/{{y}}.png').format(style=style) + if source in ('dark', 'light', ): + label_type = '_all' + else: + label_type = '_labels_under' + style = source + (label_type if labels == 'back' + else '_nolabels') + self.url = join_url((stem, + '{style}/{{z}}/{{x}}/{{y}}.png'.format( + style=style) + )) elif self.source.startswith('http'): # TODO: Remove this once baselayer urls can be passed in named # map config @@ -75,16 +84,17 @@ class BaseMap(AbstractLayer): 'moment') # self.url = source else: - raise ValueError("`source` must be one of 'dark' or 'light'") + raise ValueError("`source` must be one of 'dark', 'light', or " + "'voyager'") def is_basic(self): """Does BaseMap pull from CARTO default basemaps? Returns: - bool: `True` if using a CARTO basemap (Dark Matter or Positron), - `False` otherwise. + bool: `True` if using a CARTO basemap (Dark Matter, Positron or + Voyager), `False` otherwise. """ - return self.source in ('dark', 'light') + return self.source in ('dark', 'light', 'voyager', ) class QueryLayer(AbstractLayer): diff --git a/cartoframes/maps.py b/cartoframes/maps.py index b5d33ee0..701cdc1f 100644 --- a/cartoframes/maps.py +++ b/cartoframes/maps.py @@ -19,6 +19,7 @@ def get_map_name(layers, has_zoom): num_layers = len(non_basemap_layers(layers)) has_labels = len(layers) > 1 and layers[-1].is_basemap has_time = has_time_layer(layers) + basemap_id = dict(light=0, dark=1, voyager=2)[layers[0].source] return ('cartoframes_ver{version}' '_layers{layers}' @@ -31,7 +32,7 @@ def get_map_name(layers, has_zoom): has_time=('1' if has_time else '0'), # TODO: Remove this once baselayer urls can be passed in named # map config - baseid=('1' if layers[0].source == 'dark' else '0'), + baseid=basemap_id, has_labels=('1' if has_labels else '0'), has_zoom=('1' if has_zoom else '0') ) diff --git a/cartoframes/utils.py b/cartoframes/utils.py index ae5750e4..5f8b95f3 100644 --- a/cartoframes/utils.py +++ b/cartoframes/utils.py @@ -73,3 +73,8 @@ def norm_colname(colname): if final_name[0].isdigit(): return '_' + final_name return final_name + + +def join_url(parts): + """join parts of URL into complete url""" + return '/'.join(s.strip('/') for s in parts)
include voyager as a basemap option e.g., ``` https://cartodb-basemaps-a.global.ssl.fastly.net/rastertiles/voyager_nolabels/{z}/{x}/{y}.png ```
CartoDB/cartoframes
diff --git a/test/test_context.py b/test/test_context.py index b8abcd28..a5654d01 100644 --- a/test/test_context.py +++ b/test/test_context.py @@ -16,6 +16,7 @@ from carto.sql import SQLClient import pandas as pd WILL_SKIP = False +warnings.filterwarnings("ignore") class TestCartoContext(unittest.TestCase): @@ -533,21 +534,21 @@ class TestCartoContext(unittest.TestCase): # baseid1 = dark, labels1 = labels on top in named map name labels_polygon = cc.map(layers=Layer(self.test_read_table)) self.assertRegexpMatches(labels_polygon.__html__(), - '.*baseid1_labels1.*', + '.*baseid2_labels1.*', msg='labels should be on top since only a ' 'polygon layer is present') - # baseid1 = dark, labels0 = labels on bottom + # baseid2 = voyager, labels0 = labels on bottom labels_point = cc.map(layers=Layer(self.test_point_table)) self.assertRegexpMatches(labels_point.__html__(), - '.*baseid1_labels0.*', + '.*baseid2_labels0.*', msg='labels should be on bottom because a ' 'point layer is present') labels_multi = cc.map(layers=[Layer(self.test_point_table), Layer(self.test_read_table)]) self.assertRegexpMatches(labels_multi.__html__(), - '.*baseid1_labels0.*', + '.*baseid2_labels0.*', msg='labels should be on bottom because a ' 'point layer is present') # create a layer with points and polys, but with more polys @@ -566,7 +567,7 @@ class TestCartoContext(unittest.TestCase): points=self.test_point_table)) multi_geom = cc.map(layers=multi_geom_layer) self.assertRegexpMatches(multi_geom.__html__(), - '.*baseid1_labels1.*', + '.*baseid2_labels1.*', msg='layer has more polys than points, so it ' 'should default to polys labels (on top)') diff --git a/test/test_layer.py b/test/test_layer.py index 428c88a5..e13f140e 100644 --- a/test/test_layer.py +++ b/test/test_layer.py @@ -15,18 +15,23 @@ class TestBaseMap(unittest.TestCase): # basemaps with baked-in labels self.dark_map_all = BaseMap(source='dark') self.light_map_all = BaseMap(source='light') + self.voyager_labels_under = BaseMap(source='voyager') # basemaps with no labels self.dark_map_no_labels = BaseMap(source='dark', labels=None) self.light_map_no_labels = BaseMap(source='light', labels=None) + self.voyager_map_no_labels = BaseMap(source='voyager', + labels=None) # labels with no basemaps self.dark_only_labels = BaseMap(source='dark', only_labels=True) self.light_only_labels = BaseMap(source='light', only_labels=True) + self.voyager_only_labels = BaseMap(source='voyager', + only_labels=True) def test_basemap_invalid(self): """layer.Basemap exceptions on invalid source""" @@ -53,23 +58,34 @@ class TestBaseMap(unittest.TestCase): self.assertEqual(self.light_map_all.url, 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/' 'light_all/{z}/{x}/{y}.png') + self.assertEqual(self.voyager_labels_under.url, + 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/' + 'rastertiles/voyager_labels_under/{z}/{x}/{y}.png') self.assertEqual(self.dark_map_no_labels.url, 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/' 'dark_nolabels/{z}/{x}/{y}.png') self.assertEqual(self.light_map_no_labels.url, 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/' 'light_nolabels/{z}/{x}/{y}.png') + self.assertEqual(self.voyager_map_no_labels.url, + 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/' + 'rastertiles/voyager_nolabels/{z}/{x}/{y}.png') self.assertEqual(self.light_only_labels.url, 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/' 'light_only_labels/{z}/{x}/{y}.png') self.assertEqual(self.dark_only_labels.url, 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/' 'dark_only_labels/{z}/{x}/{y}.png') + self.assertEqual(self.voyager_only_labels.url, + 'https://cartodb-basemaps-{s}.global.ssl.fastly.net/' + 'rastertiles/voyager_only_labels/{z}/{x}/{y}.png') # ensure self.is_basic() works as intended self.assertTrue(self.light_map_all.is_basic(), msg='is a basic carto basemap') self.assertTrue(self.dark_map_all.is_basic()) + self.assertTrue(self.voyager_labels_under.is_basic(), + msg='is a basic carto basemap') class TestQueryLayer(unittest.TestCase):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 4 }
0.2
{ "env_vars": null, "env_yml_path": [], "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "nose-cov", "pytest" ], "pre_install": [], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
appdirs==1.4.4 attrs==22.2.0 backcall==0.2.0 carto==1.11.3 -e git+https://github.com/CartoDB/cartoframes.git@a694cfa6e9f7ff39954ef5045649eb2518632338#egg=cartoframes certifi==2021.5.30 charset-normalizer==2.0.12 cov-core==1.15.0 coverage==6.2 decorator==5.1.1 future==1.0.0 idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 ipython==7.16.3 ipython-genutils==0.2.0 jedi==0.17.2 nose==1.3.7 nose-cov==1.6 numpy==1.19.5 packaging==21.3 pandas==1.1.5 parso==0.7.1 pexpect==4.9.0 pickleshare==0.7.5 pluggy==1.0.0 prompt-toolkit==3.0.36 ptyprocess==0.7.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pyrestcli==0.6.11 pytest==7.0.1 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.27.1 Shapely==1.8.5.post1 six==1.17.0 tomli==1.2.3 tqdm==4.64.1 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 wcwidth==0.2.13 webcolors==1.7 zipp==3.6.0
name: cartoframes channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - appdirs==1.4.4 - attrs==22.2.0 - backcall==0.2.0 - carto==1.11.3 - charset-normalizer==2.0.12 - cov-core==1.15.0 - coverage==6.2 - decorator==5.1.1 - future==1.0.0 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - ipython==7.16.3 - ipython-genutils==0.2.0 - jedi==0.17.2 - nose==1.3.7 - nose-cov==1.6 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - parso==0.7.1 - pexpect==4.9.0 - pickleshare==0.7.5 - pluggy==1.0.0 - prompt-toolkit==3.0.36 - ptyprocess==0.7.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pyrestcli==0.6.11 - pytest==7.0.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.27.1 - shapely==1.8.5.post1 - six==1.17.0 - tomli==1.2.3 - tqdm==4.64.1 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - wcwidth==0.2.13 - webcolors==1.7 - zipp==3.6.0 prefix: /opt/conda/envs/cartoframes
[ "test/test_layer.py::TestBaseMap::test_basemap_invalid", "test/test_layer.py::TestBaseMap::test_basemap_source" ]
[ "test/test_context.py::TestCartoContext::test_add_encoded_geom", "test/test_context.py::TestCartoContext::test_cartocontext", "test/test_context.py::TestCartoContext::test_cartocontext_check_query", "test/test_context.py::TestCartoContext::test_cartocontext_credentials", "test/test_context.py::TestCartoContext::test_cartocontext_delete", "test/test_context.py::TestCartoContext::test_cartocontext_handle_import", "test/test_context.py::TestCartoContext::test_cartocontext_isorguser", "test/test_context.py::TestCartoContext::test_cartocontext_map", "test/test_context.py::TestCartoContext::test_cartocontext_map_geom_type", "test/test_context.py::TestCartoContext::test_cartocontext_mixed_case", "test/test_context.py::TestCartoContext::test_cartocontext_read", "test/test_context.py::TestCartoContext::test_cartocontext_table_exists", "test/test_context.py::TestCartoContext::test_cartocontext_write", "test/test_context.py::TestCartoContext::test_cartoframes_query", "test/test_context.py::TestCartoContext::test_cartoframes_sync", "test/test_context.py::TestCartoContext::test_data_obs_functions", "test/test_context.py::TestCartoContext::test_debug_print", "test/test_context.py::TestCartoContext::test_get_bounds", "test/test_context.py::TestBatchJobStatus::test_batchjobstatus", "test/test_context.py::TestBatchJobStatus::test_batchjobstatus_methods", "test/test_context.py::TestBatchJobStatus::test_batchjobstatus_repr" ]
[ "test/test_context.py::TestCartoContext::test_cartocontext_send_dataframe", "test/test_context.py::TestCartoContext::test_decode_geom", "test/test_context.py::TestCartoContext::test_df2pg_schema", "test/test_context.py::TestCartoContext::test_dtypes2pg", "test/test_context.py::TestCartoContext::test_encode_geom", "test/test_context.py::TestCartoContext::test_pg2dtypes", "test/test_layer.py::TestAbstractLayer::test_class", "test/test_layer.py::TestQueryLayer::test_querylayer_colors", "test/test_layer.py::TestQueryLayer::test_querylayer_size_and_time", "test/test_layer.py::TestQueryLayer::test_querylayer_size_column_key", "test/test_layer.py::TestQueryLayer::test_querylayer_size_default", "test/test_layer.py::TestQueryLayer::test_querylayer_size_defaults", "test/test_layer.py::TestQueryLayer::test_querylayer_time_default", "test/test_layer.py::TestQueryLayer::test_querylayer_time_errors" ]
[]
BSD 3-Clause "New" or "Revised" License
1,749
[ "cartoframes/context.py", "cartoframes/maps.py", "cartoframes/layer.py", "cartoframes/utils.py" ]
[ "cartoframes/context.py", "cartoframes/maps.py", "cartoframes/layer.py", "cartoframes/utils.py" ]
getsentry__responses-176
92b2d4597e03c8dee096329fca2ed7f28c8fefa5
2017-10-11 08:31:07
92b2d4597e03c8dee096329fca2ed7f28c8fefa5
codecov[bot]: # [Codecov](https://codecov.io/gh/getsentry/responses/pull/176?src=pr&el=h1) Report > Merging [#176](https://codecov.io/gh/getsentry/responses/pull/176?src=pr&el=desc) into [master](https://codecov.io/gh/getsentry/responses/commit/92b2d4597e03c8dee096329fca2ed7f28c8fefa5?src=pr&el=desc) will **increase** coverage by `0.75%`. > The diff coverage is `100%`. [![Impacted file tree graph](https://codecov.io/gh/getsentry/responses/pull/176/graphs/tree.svg?token=x2tAGR7rxE&width=650&height=150&src=pr)](https://codecov.io/gh/getsentry/responses/pull/176?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #176 +/- ## ========================================== + Coverage 95.06% 95.81% +0.75% ========================================== Files 1 1 Lines 304 311 +7 ========================================== + Hits 289 298 +9 + Misses 15 13 -2 ``` | [Impacted Files](https://codecov.io/gh/getsentry/responses/pull/176?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [responses.py](https://codecov.io/gh/getsentry/responses/pull/176?src=pr&el=tree#diff-cmVzcG9uc2VzLnB5) | `95.81% <100%> (+0.75%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/getsentry/responses/pull/176?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/getsentry/responses/pull/176?src=pr&el=footer). Last update [92b2d45...9a3cfd3](https://codecov.io/gh/getsentry/responses/pull/176?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/responses.py b/responses.py index c2a3280..1c48310 100644 --- a/responses.py +++ b/responses.py @@ -23,8 +23,9 @@ except ImportError: if six.PY2: from urlparse import urlparse, parse_qsl, urlsplit, urlunsplit + from urllib import quote else: - from urllib.parse import urlparse, parse_qsl, urlsplit, urlunsplit + from urllib.parse import urlparse, parse_qsl, urlsplit, urlunsplit, quote if six.PY2: try: @@ -53,6 +54,21 @@ def _is_string(s): return isinstance(s, six.string_types) +def _has_unicode(s): + return any(ord(char) > 128 for char in s) + + +def _clean_unicode(url): + if isinstance(url.encode('utf8'), six.string_types): + url = url.encode('utf8') + chars = list(url) + for i, x in enumerate(chars): + if ord(x) > 128: + chars[i] = quote(x) + + return ''.join(chars) + + def _is_redirect(response): try: # 2.0.0 <= requests <= 2.2 @@ -173,22 +189,18 @@ class BaseResponse(object): return False for (a_k, a_v), (b_k, b_v) in zip(url_qsl, other_qsl): - if not isinstance(a_k, six.text_type): - a_k = a_k.decode('utf-8') - if not isinstance(b_k, six.text_type): - b_k = b_k.decode('utf-8') if a_k != b_k: return False - if not isinstance(a_v, six.text_type): - a_v = a_v.decode('utf-8') - if not isinstance(b_v, six.text_type): - b_v = b_v.decode('utf-8') if a_v != b_v: return False return True def _url_matches(self, url, other, match_querystring=False): if _is_string(url): + if _has_unicode(url): + url = _clean_unicode(url) + if not isinstance(other, six.text_type): + other = other.encode('ascii').decode('utf8') if match_querystring: return self._url_matches_strict(url, other) else:
Issues with Unicode in path I saw that you fixed #153 by handling Unicode characters in the querystring, but there still appear to be issues when said Unicode characters are in the URL path itself. If you pass a mock URL with a Unicode character in it to Responses and then request the same URL via Requests, you get a connection refused error. [This is the line](https://github.com/getsentry/responses/blob/master/responses.py#L166) where it fails, when I stepped through the debugger. I'm using Python 2.7.13 and Responses 0.8.0. Please let me know if I need to clarify this issue. Thanks!
getsentry/responses
diff --git a/test_responses.py b/test_responses.py index 6965a1c..a7c3ed2 100644 --- a/test_responses.py +++ b/test_responses.py @@ -648,7 +648,7 @@ def test_allow_redirects_samehost(): assert_reset() -def test_handles_chinese_url(): +def test_handles_unicode_querystring(): url = u'http://example.com/test?type=2&ie=utf8&query=汉字' @responses.activate @@ -663,6 +663,21 @@ def test_handles_chinese_url(): assert_reset() +def test_handles_unicode_url(): + url = u'https://hi.wikipedia.org/wiki/दिलवाले_दुल्हनिया_ले_जाएंगे' + + @responses.activate + def run(): + responses.add(responses.GET, url, body='test') + + resp = requests.get(url) + + assert_response(resp, 'test') + + run() + assert_reset() + + def test_headers(): @responses.activate def run():
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_issue_reference" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 1 }
0.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[tests]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-localserver flake8" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 charset-normalizer==3.4.1 cookies==2.2.1 coverage==4.5.4 exceptiongroup==1.2.2 flake8==7.2.0 idna==3.10 iniconfig==2.1.0 MarkupSafe==3.0.2 mccabe==0.7.0 packaging==24.2 pluggy==1.5.0 pycodestyle==2.13.0 pyflakes==3.3.2 pytest==8.3.5 pytest-cov==2.10.1 pytest-localserver==0.9.0.post0 requests==2.32.3 -e git+https://github.com/getsentry/responses.git@92b2d4597e03c8dee096329fca2ed7f28c8fefa5#egg=responses six==1.17.0 tomli==2.2.1 urllib3==2.3.0 Werkzeug==3.1.3
name: responses channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - cookies==2.2.1 - coverage==4.5.4 - exceptiongroup==1.2.2 - flake8==7.2.0 - idna==3.10 - iniconfig==2.1.0 - markupsafe==3.0.2 - mccabe==0.7.0 - packaging==24.2 - pluggy==1.5.0 - pycodestyle==2.13.0 - pyflakes==3.3.2 - pytest==8.3.5 - pytest-cov==2.10.1 - pytest-localserver==0.9.0.post0 - requests==2.32.3 - six==1.17.0 - tomli==2.2.1 - urllib3==2.3.0 - werkzeug==3.1.3 prefix: /opt/conda/envs/responses
[ "test_responses.py::test_handles_unicode_url" ]
[ "test_responses.py::test_replace[http://example.com/two-http://example.com/two]", "test_responses.py::test_replace[original1-replacement1]", "test_responses.py::test_replace[http://example\\\\.com/two-http://example\\\\.com/two]", "test_responses.py::test_replace_error[http://example.com/one-http://example\\\\.com/one]", "test_responses.py::test_replace_error[http://example\\\\.com/one-http://example.com/one]", "test_responses.py::test_remove", "test_responses.py::test_response_equality[args10-kwargs10-args20-kwargs20-True]", "test_responses.py::test_response_equality[args11-kwargs11-args21-kwargs21-False]", "test_responses.py::test_response_equality[args13-kwargs13-args23-kwargs23-True]", "test_responses.py::test_match_querystring_regex", "test_responses.py::test_match_querystring_error_regex", "test_responses.py::test_arbitrary_status_code", "test_responses.py::test_regular_expression_url", "test_responses.py::test_allow_redirects_samehost" ]
[ "test_responses.py::test_response", "test_responses.py::test_response_with_instance", "test_responses.py::test_response_equality[args12-kwargs12-args22-kwargs22-False]", "test_responses.py::test_response_equality_different_objects", "test_responses.py::test_connection_error", "test_responses.py::test_match_querystring", "test_responses.py::test_match_empty_querystring", "test_responses.py::test_match_querystring_error", "test_responses.py::test_accept_string_body", "test_responses.py::test_accept_json_body", "test_responses.py::test_no_content_type", "test_responses.py::test_throw_connection_error_explicit", "test_responses.py::test_callback", "test_responses.py::test_callback_no_content_type", "test_responses.py::test_custom_adapter", "test_responses.py::test_responses_as_context_manager", "test_responses.py::test_activate_doesnt_change_signature", "test_responses.py::test_activate_doesnt_change_signature_for_method", "test_responses.py::test_response_cookies", "test_responses.py::test_response_callback", "test_responses.py::test_response_filebody", "test_responses.py::test_assert_all_requests_are_fired", "test_responses.py::test_handles_unicode_querystring", "test_responses.py::test_headers", "test_responses.py::test_legacy_adding_headers", "test_responses.py::test_multiple_responses", "test_responses.py::test_multiple_urls", "test_responses.py::test_passthru", "test_responses.py::test_method_named_param" ]
[]
Apache License 2.0
1,750
[ "responses.py" ]
[ "responses.py" ]
bmcfee__pumpp-91
460d07619ca6d1916150f56b45f333ba500d94a3
2017-10-12 14:21:54
68a14caccc9acdfc280c98fed85f6c1ee2596702
diff --git a/pumpp/task/beat.py b/pumpp/task/beat.py index 2d4b56b..cd5a0f3 100644 --- a/pumpp/task/beat.py +++ b/pumpp/task/beat.py @@ -4,6 +4,7 @@ import numpy as np +from librosa import time_to_frames import jams from mir_eval.util import boundaries_to_intervals, adjust_intervals from sklearn.preprocessing import LabelBinarizer, LabelEncoder @@ -95,7 +96,11 @@ class BeatTransformer(BaseTaskTransformer): ann = jams.Annotation(namespace=self.namespace, duration=duration) - beat_times = [t for t, _ in self.decode_events(encoded) if _] + beat_times = np.asarray([t for t, _ in self.decode_events(encoded) if _]) + beat_frames = time_to_frames(beat_times, + sr=self.sr, + hop_length=self.hop_length) + if downbeat is not None: downbeat_times = set([t for t, _ in self.decode_events(downbeat) if _]) @@ -106,12 +111,16 @@ class BeatTransformer(BaseTaskTransformer): pickup_beats = 0 value = - pickup_beats - 1 - for beat in beat_times: - if beat in downbeat_times: + for beat_t, beat_f in zip(beat_times, beat_frames): + if beat_t in downbeat_times: value = 1 else: value += 1 - ann.append(time=beat, duration=0, value=value) + confidence = encoded[beat_f] + ann.append(time=beat_t, + duration=0, + value=value, + confidence=confidence) return ann diff --git a/pumpp/task/chord.py b/pumpp/task/chord.py index 6396124..e07cce2 100644 --- a/pumpp/task/chord.py +++ b/pumpp/task/chord.py @@ -9,6 +9,7 @@ import numpy as np from sklearn.preprocessing import LabelBinarizer, LabelEncoder from sklearn.preprocessing import MultiLabelBinarizer +from librosa import time_to_frames import mir_eval import jams @@ -492,12 +493,30 @@ class ChordTagTransformer(BaseTaskTransformer): duration=duration, multi=False, sparse=self.sparse): + + # Map start:end to frames + f_start, f_end = time_to_frames([start, end], + sr=self.sr, + hop_length=self.hop_length) + + # Reverse the index if self.sparse: + # Compute the confidence + if encoded.shape[1] == 1: + # This case is for full-confidence prediction (just the index) + confidence = 1. + else: + confidence = np.mean(encoded[f_start:f_end+1, value]) + value_dec = self.encoder.inverse_transform(value) else: + confidence = np.mean(encoded[f_start:f_end+1, np.argmax(value)]) value_dec = self.encoder.inverse_transform(np.atleast_2d(value)) for vd in value_dec: - ann.append(time=start, duration=end-start, value=vd) + ann.append(time=start, + duration=end-start, + value=vd, + confidence=float(confidence)) return ann diff --git a/pumpp/task/tags.py b/pumpp/task/tags.py index 3cbf6a9..8b2f14e 100644 --- a/pumpp/task/tags.py +++ b/pumpp/task/tags.py @@ -5,6 +5,8 @@ import numpy as np from sklearn.preprocessing import MultiLabelBinarizer +from librosa import time_to_frames + import jams from .base import BaseTaskTransformer @@ -106,10 +108,20 @@ class DynamicLabelTransformer(BaseTaskTransformer): ann = jams.Annotation(namespace=self.namespace, duration=duration) for start, end, value in self.decode_intervals(encoded, duration=duration): + # Map start:end to frames + f_start, f_end = time_to_frames([start, end], + sr=self.sr, + hop_length=self.hop_length) + + confidence = np.mean(encoded[f_start:f_end+1, value]) + value_dec = self.encoder.inverse_transform(np.atleast_2d(value))[0] for vd in value_dec: - ann.append(time=start, duration=end-start, value=vd) + ann.append(time=start, + duration=end-start, + value=vd, + confidence=confidence) return ann @@ -185,9 +197,14 @@ class StaticLabelTransformer(BaseTaskTransformer): ann = jams.Annotation(namespace=self.namespace, duration=duration) if np.isrealobj(encoded): - encoded = (encoded >= 0.5) - - for vd in self.encoder.inverse_transform(np.atleast_2d(encoded))[0]: - ann.append(time=0, duration=duration, value=vd) - + detected = (encoded >= 0.5) + else: + detected = encoded + + for vd in self.encoder.inverse_transform(np.atleast_2d(detected))[0]: + vid = np.flatnonzero(self.encoder.transform(np.atleast_2d(vd))) + ann.append(time=0, + duration=duration, + value=vd, + confidence=encoded[vid]) return ann
Automatic confidence in task inversion #### Description The prediction inverters / jams converters could populate the `confidence` field of the jams annotations. They don't currently, but it would be easy to do so.
bmcfee/pumpp
diff --git a/tests/test_decode.py b/tests/test_decode.py index f0ffe6b..cfe297c 100644 --- a/tests/test_decode.py +++ b/tests/test_decode.py @@ -101,6 +101,8 @@ def test_decode_tags_dynamic_hard(sr, hop_length, ann_tag): data = tc.transform_annotation(ann_tag, ann_tag.duration) inverse = tc.inverse(data['tags'], duration=ann_tag.duration) + for obs in inverse: + assert 0. <= obs.confidence <= 1. data2 = tc.transform_annotation(inverse, ann_tag.duration) assert np.allclose(data['tags'], data2['tags']) @@ -119,6 +121,8 @@ def test_decode_tags_dynamic_soft(sr, hop_length, ann_tag): # Soften the data, but preserve the decisions tags_predict = data['tags'] * 0.51 + 0.1 inverse = tc.inverse(tags_predict, duration=ann_tag.duration) + for obs in inverse: + assert 0. <= obs.confidence <= 1. data2 = tc.transform_annotation(inverse, ann_tag.duration) assert np.allclose(data['tags'], data2['tags']) @@ -130,6 +134,8 @@ def test_decode_tags_static_hard(ann_tag): data = tc.transform_annotation(ann_tag, ann_tag.duration) inverse = tc.inverse(data['tags'], ann_tag.duration) + for obs in inverse: + assert 0. <= obs.confidence <= 1. data2 = tc.transform_annotation(inverse, ann_tag.duration) assert np.allclose(data['tags'], data2['tags']) @@ -143,6 +149,8 @@ def test_decode_tags_static_soft(ann_tag): tags_predict = data['tags'] * 0.51 + 0.1 inverse = tc.inverse(tags_predict, ann_tag.duration) + for obs in inverse: + assert 0. <= obs.confidence <= 1. data2 = tc.transform_annotation(inverse, ann_tag.duration) assert np.allclose(data['tags'], data2['tags']) @@ -154,6 +162,8 @@ def test_decode_beat_hard(sr, hop_length, ann_beat): data = tc.transform_annotation(ann_beat, ann_beat.duration) inverse = tc.inverse(data['beat'], duration=ann_beat.duration) + for obs in inverse: + assert 0. <= obs.confidence <= 1. data2 = tc.transform_annotation(inverse, ann_beat.duration) assert np.allclose(data['beat'], data2['beat']) @@ -167,6 +177,8 @@ def test_decode_beat_soft(sr, hop_length, ann_beat): beat_pred = data['beat'] * 0.51 + 0.1 inverse = tc.inverse(beat_pred, duration=ann_beat.duration) + for obs in inverse: + assert 0. <= obs.confidence <= 1. data2 = tc.transform_annotation(inverse, ann_beat.duration) assert np.allclose(data['beat'], data2['beat']) @@ -179,6 +191,8 @@ def test_decode_beat_downbeat_hard(sr, hop_length, ann_beat): data = tc.transform_annotation(ann_beat, ann_beat.duration) inverse = tc.inverse(data['beat'], downbeat=data['downbeat'], duration=ann_beat.duration) + for obs in inverse: + assert 0. <= obs.confidence <= 1. data2 = tc.transform_annotation(inverse, ann_beat.duration) assert np.allclose(data['beat'], data2['beat']) @@ -193,6 +207,8 @@ def test_decode_beat_downbeat_soft(sr, hop_length, ann_beat): dbeat_pred = data['downbeat'] * 0.51 + 0.1 inverse = tc.inverse(beat_pred, downbeat=dbeat_pred, duration=ann_beat.duration) + for obs in inverse: + assert 0. <= obs.confidence <= 1. data2 = tc.transform_annotation(inverse, ann_beat.duration) assert np.allclose(data['beat'], data2['beat']) @@ -250,6 +266,8 @@ def test_decode_chordtag_hard_dense(sr, hop_length, ann_chord): data = tc.transform_annotation(ann_chord, ann_chord.duration) inverse = tc.inverse(data['chord'], duration=ann_chord.duration) + for obs in inverse: + assert 0 <= obs.confidence <= 1. data2 = tc.transform_annotation(inverse, ann_chord.duration) assert np.allclose(data['chord'], data2['chord']) @@ -267,6 +285,10 @@ def test_decode_chordtag_soft_dense(sr, hop_length, ann_chord): chord_predict = data['chord'] * 0.51 + 0.1 inverse = tc.inverse(chord_predict, duration=ann_chord.duration) + + for obs in inverse: + assert 0 <= obs.confidence <= 1. + data2 = tc.transform_annotation(inverse, ann_chord.duration) assert np.allclose(data['chord'], data2['chord']) @@ -283,6 +305,8 @@ def test_decode_chordtag_hard_sparse_sparse(sr, hop_length, ann_chord): data = tc.transform_annotation(ann_chord, ann_chord.duration) inverse = tc.inverse(data['chord'], duration=ann_chord.duration) + for obs in inverse: + assert 0 <= obs.confidence <= 1. data2 = tc.transform_annotation(inverse, ann_chord.duration) assert np.allclose(data['chord'], data2['chord']) @@ -305,6 +329,8 @@ def test_decode_chordtag_hard_dense_sparse(sr, hop_length, ann_chord): # Invert using the sparse encoder inverse = tcs.inverse(data['chord'], duration=ann_chord.duration) + for obs in inverse: + assert 0 <= obs.confidence <= 1. data2 = tcs.transform_annotation(inverse, ann_chord.duration) dense_positions = np.where(data['chord'])[1] @@ -330,6 +356,8 @@ def test_decode_chordtag_soft_dense_sparse(sr, hop_length, ann_chord): chord_predict = data['chord'] * 0.51 + 0.1 # Invert using the sparse encoder inverse = tcs.inverse(chord_predict, duration=ann_chord.duration) + for obs in inverse: + assert 0 <= obs.confidence <= 1. data2 = tcs.transform_annotation(inverse, ann_chord.duration) dense_positions = np.where(data['chord'])[1]
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 3 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[docs,tests]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pyflakes" ], "pre_install": [ "apt-get update", "apt-get install -y ffmpeg" ], "python": "3.5", "reqs_path": [ "docs/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
absl-py==0.15.0 alabaster==0.7.13 appdirs==1.4.4 astunparse==1.6.3 attrs==22.2.0 audioread==3.0.1 Babel==2.11.0 cached-property==1.5.2 cachetools==4.2.4 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 clang==5.0 coverage==6.2 dataclasses==0.8 decorator==5.1.1 docutils==0.18.1 flatbuffers==1.12 gast==0.4.0 google-auth==1.35.0 google-auth-oauthlib==0.4.6 google-pasta==0.2.0 grpcio==1.48.2 h5py==3.1.0 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 jams==0.3.4 Jinja2==3.0.3 joblib==1.1.1 jsonschema==3.2.0 keras==2.6.0 Keras-Preprocessing==1.1.2 librosa==0.9.2 llvmlite==0.36.0 Markdown==3.3.7 MarkupSafe==2.0.1 mir_eval==0.8.2 numba==0.53.1 numpy==1.19.5 numpydoc==1.1.0 oauthlib==3.2.2 opt-einsum==3.3.0 packaging==21.3 pandas==1.1.5 pluggy==1.0.0 pooch==1.6.0 protobuf==3.19.6 -e git+https://github.com/bmcfee/pumpp.git@460d07619ca6d1916150f56b45f333ba500d94a3#egg=pumpp py==1.11.0 pyasn1==0.5.1 pyasn1-modules==0.3.0 pycparser==2.21 pyflakes==3.0.1 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.27.1 requests-oauthlib==2.0.0 resampy==0.4.3 rsa==4.9 scikit-learn==0.24.2 scipy==1.5.4 six==1.15.0 snowballstemmer==2.2.0 sortedcontainers==2.4.0 soundfile==0.13.1 Sphinx==5.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tensorboard==2.6.0 tensorboard-data-server==0.6.1 tensorboard-plugin-wit==1.8.1 tensorflow==2.6.2 tensorflow-estimator==2.6.0 termcolor==1.1.0 threadpoolctl==3.1.0 tomli==1.2.3 typing-extensions==3.7.4.3 urllib3==1.26.20 Werkzeug==2.0.3 wrapt==1.12.1 zipp==3.6.0
name: pumpp channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - absl-py==0.15.0 - alabaster==0.7.13 - appdirs==1.4.4 - astunparse==1.6.3 - attrs==22.2.0 - audioread==3.0.1 - babel==2.11.0 - cached-property==1.5.2 - cachetools==4.2.4 - cffi==1.15.1 - charset-normalizer==2.0.12 - clang==5.0 - coverage==6.2 - dataclasses==0.8 - decorator==5.1.1 - docutils==0.18.1 - flatbuffers==1.12 - gast==0.4.0 - google-auth==1.35.0 - google-auth-oauthlib==0.4.6 - google-pasta==0.2.0 - grpcio==1.48.2 - h5py==3.1.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - jams==0.3.4 - jinja2==3.0.3 - joblib==1.1.1 - jsonschema==3.2.0 - keras==2.6.0 - keras-preprocessing==1.1.2 - librosa==0.9.2 - llvmlite==0.36.0 - markdown==3.3.7 - markupsafe==2.0.1 - mir-eval==0.8.2 - numba==0.53.1 - numpy==1.19.5 - numpydoc==1.1.0 - oauthlib==3.2.2 - opt-einsum==3.3.0 - packaging==21.3 - pandas==1.1.5 - pluggy==1.0.0 - pooch==1.6.0 - protobuf==3.19.6 - py==1.11.0 - pyasn1==0.5.1 - pyasn1-modules==0.3.0 - pycparser==2.21 - pyflakes==3.0.1 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.27.1 - requests-oauthlib==2.0.0 - resampy==0.4.3 - rsa==4.9 - scikit-learn==0.24.2 - scipy==1.5.4 - six==1.15.0 - snowballstemmer==2.2.0 - sortedcontainers==2.4.0 - soundfile==0.13.1 - sphinx==5.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tensorboard==2.6.0 - tensorboard-data-server==0.6.1 - tensorboard-plugin-wit==1.8.1 - tensorflow==2.6.2 - tensorflow-estimator==2.6.0 - termcolor==1.1.0 - threadpoolctl==3.1.0 - tomli==1.2.3 - typing-extensions==3.7.4.3 - urllib3==1.26.20 - werkzeug==2.0.3 - wrapt==1.12.1 - zipp==3.6.0 prefix: /opt/conda/envs/pumpp
[ "tests/test_decode.py::test_decode_tags_dynamic_hard", "tests/test_decode.py::test_decode_tags_dynamic_soft", "tests/test_decode.py::test_decode_tags_static_hard", "tests/test_decode.py::test_decode_tags_static_soft", "tests/test_decode.py::test_decode_beat_hard", "tests/test_decode.py::test_decode_beat_soft", "tests/test_decode.py::test_decode_beat_downbeat_hard", "tests/test_decode.py::test_decode_beat_downbeat_soft", "tests/test_decode.py::test_decode_chordtag_hard_dense", "tests/test_decode.py::test_decode_chordtag_soft_dense", "tests/test_decode.py::test_decode_chordtag_hard_sparse_sparse", "tests/test_decode.py::test_decode_chordtag_hard_dense_sparse", "tests/test_decode.py::test_decode_chordtag_soft_dense_sparse" ]
[]
[ "tests/test_decode.py::test_decode_vector" ]
[]
ISC License
1,751
[ "pumpp/task/tags.py", "pumpp/task/chord.py", "pumpp/task/beat.py" ]
[ "pumpp/task/tags.py", "pumpp/task/chord.py", "pumpp/task/beat.py" ]
bmcfee__pumpp-92
0d6ef78f21fb7bf4736b2ca383c3377f7e0f8f9d
2017-10-12 15:13:19
68a14caccc9acdfc280c98fed85f6c1ee2596702
diff --git a/pumpp/core.py b/pumpp/core.py index 0db13b0..227cb81 100644 --- a/pumpp/core.py +++ b/pumpp/core.py @@ -11,6 +11,7 @@ Core functionality import librosa import jams +import six from .base import Slicer from .exceptions import ParameterError @@ -225,3 +226,28 @@ class Pump(Slicer): def __call__(self, *args, **kwargs): return self.transform(*args, **kwargs) + + def __str__(self): + rstr = '<Pump [{:d} operators, {:d} fields]>'.format(len(self.ops), + len(self.fields)) + for key in self.opmap: + rstr += "\n - '{}': {}".format(key, type(self.opmap[key])) + for field in self.opmap[key].fields: + rstr += "\n - '{}': {}".format(field, self.opmap[key].fields[field]) + return rstr + + def _repr_html_(self): + + rstr = '<dl class="row">' + for key in self.opmap: + rstr += '\n <dt class="col-sm-3">{:s}</dt>'.format(key) + rstr += '\n <dd class="col-sm-9">{}'.format(self.opmap[key]) + + rstr += '<ul>' + for fkey, field in six.iteritems(self.opmap[key].fields): + rstr += '\n <li>{:s} [shape={}, dtype={}]</li>'.format(fkey, + field.shape, + field.dtype.__name__) + rstr += '</ul></dd>' + rstr += '</dl>' + return rstr
Meaningful repr for pump objects #### Description It would be useful if `repr(Pump)` showed the operator map directly.
bmcfee/pumpp
diff --git a/tests/test_core.py b/tests/test_core.py index b728699..ae5b1fc 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -212,3 +212,45 @@ def test_pump_layers(sr, hop_length): assert L1[k].dtype == L2[k].dtype for d1, d2 in zip(L1[k].shape, L2[k].shape): assert str(d1) == str(d2) + + +def test_pump_str(sr, hop_length): + + ops = [pumpp.feature.STFT(name='stft', sr=sr, + hop_length=hop_length, + n_fft=2*hop_length), + + pumpp.task.BeatTransformer(name='beat', sr=sr, + hop_length=hop_length), + + pumpp.task.ChordTransformer(name='chord', sr=sr, + hop_length=hop_length), + + pumpp.task.StaticLabelTransformer(name='tags', + namespace='tag_open', + labels=['rock', 'jazz'])] + + pump = pumpp.Pump(*ops) + + assert isinstance(str(pump), str) + + +def test_pump_repr_html(sr, hop_length): + + ops = [pumpp.feature.STFT(name='stft', sr=sr, + hop_length=hop_length, + n_fft=2*hop_length), + + pumpp.task.BeatTransformer(name='beat', sr=sr, + hop_length=hop_length), + + pumpp.task.ChordTransformer(name='chord', sr=sr, + hop_length=hop_length), + + pumpp.task.StaticLabelTransformer(name='tags', + namespace='tag_open', + labels=['rock', 'jazz'])] + + pump = pumpp.Pump(*ops) + + assert isinstance(pump._repr_html_(), str)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[tests]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y ffmpeg" ], "python": "3.6", "reqs_path": [ "docs/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
absl-py==0.15.0 alabaster==0.7.13 appdirs==1.4.4 astunparse==1.6.3 attrs==22.2.0 audioread==3.0.1 Babel==2.11.0 cached-property==1.5.2 cachetools==4.2.4 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 clang==5.0 coverage==6.2 dataclasses==0.8 decorator==5.1.1 docutils==0.18.1 flatbuffers==1.12 gast==0.4.0 google-auth==1.35.0 google-auth-oauthlib==0.4.6 google-pasta==0.2.0 grpcio==1.48.2 h5py==3.1.0 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 jams==0.3.4 Jinja2==3.0.3 joblib==1.1.1 jsonschema==3.2.0 keras==2.6.0 Keras-Preprocessing==1.1.2 librosa==0.9.2 llvmlite==0.36.0 Markdown==3.3.7 MarkupSafe==2.0.1 mir_eval==0.8.2 numba==0.53.1 numpy==1.19.5 numpydoc==1.1.0 oauthlib==3.2.2 opt-einsum==3.3.0 packaging==21.3 pandas==1.1.5 pluggy==1.0.0 pooch==1.6.0 protobuf==3.19.6 -e git+https://github.com/bmcfee/pumpp.git@0d6ef78f21fb7bf4736b2ca383c3377f7e0f8f9d#egg=pumpp py==1.11.0 pyasn1==0.5.1 pyasn1-modules==0.3.0 pycparser==2.21 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.27.1 requests-oauthlib==2.0.0 resampy==0.4.3 rsa==4.9 scikit-learn==0.24.2 scipy==1.5.4 six==1.15.0 snowballstemmer==2.2.0 sortedcontainers==2.4.0 soundfile==0.13.1 Sphinx==5.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tensorboard==2.6.0 tensorboard-data-server==0.6.1 tensorboard-plugin-wit==1.8.1 tensorflow==2.6.2 tensorflow-estimator==2.6.0 termcolor==1.1.0 threadpoolctl==3.1.0 tomli==1.2.3 typing-extensions==3.7.4.3 urllib3==1.26.20 Werkzeug==2.0.3 wrapt==1.12.1 zipp==3.6.0
name: pumpp channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - absl-py==0.15.0 - alabaster==0.7.13 - appdirs==1.4.4 - astunparse==1.6.3 - attrs==22.2.0 - audioread==3.0.1 - babel==2.11.0 - cached-property==1.5.2 - cachetools==4.2.4 - cffi==1.15.1 - charset-normalizer==2.0.12 - clang==5.0 - coverage==6.2 - dataclasses==0.8 - decorator==5.1.1 - docutils==0.18.1 - flatbuffers==1.12 - gast==0.4.0 - google-auth==1.35.0 - google-auth-oauthlib==0.4.6 - google-pasta==0.2.0 - grpcio==1.48.2 - h5py==3.1.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - jams==0.3.4 - jinja2==3.0.3 - joblib==1.1.1 - jsonschema==3.2.0 - keras==2.6.0 - keras-preprocessing==1.1.2 - librosa==0.9.2 - llvmlite==0.36.0 - markdown==3.3.7 - markupsafe==2.0.1 - mir-eval==0.8.2 - numba==0.53.1 - numpy==1.19.5 - numpydoc==1.1.0 - oauthlib==3.2.2 - opt-einsum==3.3.0 - packaging==21.3 - pandas==1.1.5 - pluggy==1.0.0 - pooch==1.6.0 - protobuf==3.19.6 - py==1.11.0 - pyasn1==0.5.1 - pyasn1-modules==0.3.0 - pycparser==2.21 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.27.1 - requests-oauthlib==2.0.0 - resampy==0.4.3 - rsa==4.9 - scikit-learn==0.24.2 - scipy==1.5.4 - six==1.15.0 - snowballstemmer==2.2.0 - sortedcontainers==2.4.0 - soundfile==0.13.1 - sphinx==5.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tensorboard==2.6.0 - tensorboard-data-server==0.6.1 - tensorboard-plugin-wit==1.8.1 - tensorflow==2.6.2 - tensorflow-estimator==2.6.0 - termcolor==1.1.0 - threadpoolctl==3.1.0 - tomli==1.2.3 - typing-extensions==3.7.4.3 - urllib3==1.26.20 - werkzeug==2.0.3 - wrapt==1.12.1 - zipp==3.6.0 prefix: /opt/conda/envs/pumpp
[ "tests/test_core.py::test_pump_repr_html[11025-128]", "tests/test_core.py::test_pump_repr_html[11025-512]", "tests/test_core.py::test_pump_repr_html[22050-128]", "tests/test_core.py::test_pump_repr_html[22050-512]" ]
[]
[ "tests/test_core.py::test_pump[None-11025-128-False-None-None-None]", "tests/test_core.py::test_pump[None-11025-128-False-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-128-False-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-11025-128-False-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-128-False-22050-None-None]", "tests/test_core.py::test_pump[None-11025-128-False-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-128-False-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-11025-128-False-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-128-False-44100-None-None]", "tests/test_core.py::test_pump[None-11025-128-False-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-128-False-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-11025-128-False-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-128-True-None-None-None]", "tests/test_core.py::test_pump[None-11025-128-True-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-128-True-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-11025-128-True-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-128-True-22050-None-None]", "tests/test_core.py::test_pump[None-11025-128-True-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-128-True-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-11025-128-True-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-128-True-44100-None-None]", "tests/test_core.py::test_pump[None-11025-128-True-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-128-True-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-11025-128-True-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-512-False-None-None-None]", "tests/test_core.py::test_pump[None-11025-512-False-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-512-False-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-11025-512-False-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-512-False-22050-None-None]", "tests/test_core.py::test_pump[None-11025-512-False-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-512-False-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-11025-512-False-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-512-False-44100-None-None]", "tests/test_core.py::test_pump[None-11025-512-False-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-512-False-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-11025-512-False-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-512-True-None-None-None]", "tests/test_core.py::test_pump[None-11025-512-True-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-512-True-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-11025-512-True-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-512-True-22050-None-None]", "tests/test_core.py::test_pump[None-11025-512-True-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-512-True-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-11025-512-True-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-512-True-44100-None-None]", "tests/test_core.py::test_pump[None-11025-512-True-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-11025-512-True-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-11025-512-True-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-128-False-None-None-None]", "tests/test_core.py::test_pump[None-22050-128-False-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-128-False-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-22050-128-False-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-128-False-22050-None-None]", "tests/test_core.py::test_pump[None-22050-128-False-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-128-False-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-22050-128-False-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-128-False-44100-None-None]", "tests/test_core.py::test_pump[None-22050-128-False-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-128-False-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-22050-128-False-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-128-True-None-None-None]", "tests/test_core.py::test_pump[None-22050-128-True-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-128-True-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-22050-128-True-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-128-True-22050-None-None]", "tests/test_core.py::test_pump[None-22050-128-True-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-128-True-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-22050-128-True-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-128-True-44100-None-None]", "tests/test_core.py::test_pump[None-22050-128-True-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-128-True-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-22050-128-True-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-512-False-None-None-None]", "tests/test_core.py::test_pump[None-22050-512-False-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-512-False-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-22050-512-False-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-512-False-22050-None-None]", "tests/test_core.py::test_pump[None-22050-512-False-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-512-False-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-22050-512-False-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-512-False-44100-None-None]", "tests/test_core.py::test_pump[None-22050-512-False-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-512-False-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-22050-512-False-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-512-True-None-None-None]", "tests/test_core.py::test_pump[None-22050-512-True-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-512-True-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-22050-512-True-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-512-True-22050-None-None]", "tests/test_core.py::test_pump[None-22050-512-True-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-512-True-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-22050-512-True-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-512-True-44100-None-None]", "tests/test_core.py::test_pump[None-22050-512-True-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[None-22050-512-True-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[None-22050-512-True-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-None-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-22050-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-44100-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-False-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-None-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-22050-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-44100-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-128-True-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-None-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-22050-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-44100-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-False-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-None-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-22050-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-44100-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-11025-512-True-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-None-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-22050-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-44100-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-False-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-None-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-22050-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-44100-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-128-True-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-None-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-22050-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-44100-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-False-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-None-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-22050-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-44100-None-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[tests/data/test.jams-22050-512-True-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-128-False-None-None-None]", "tests/test_core.py::test_pump[jam2-11025-128-False-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-128-False-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-11025-128-False-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-128-False-22050-None-None]", "tests/test_core.py::test_pump[jam2-11025-128-False-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-128-False-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-11025-128-False-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-128-False-44100-None-None]", "tests/test_core.py::test_pump[jam2-11025-128-False-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-128-False-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-11025-128-False-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-128-True-None-None-None]", "tests/test_core.py::test_pump[jam2-11025-128-True-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-128-True-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-11025-128-True-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-128-True-22050-None-None]", "tests/test_core.py::test_pump[jam2-11025-128-True-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-128-True-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-11025-128-True-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-128-True-44100-None-None]", "tests/test_core.py::test_pump[jam2-11025-128-True-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-128-True-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-11025-128-True-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-512-False-None-None-None]", "tests/test_core.py::test_pump[jam2-11025-512-False-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-512-False-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-11025-512-False-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-512-False-22050-None-None]", "tests/test_core.py::test_pump[jam2-11025-512-False-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-512-False-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-11025-512-False-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-512-False-44100-None-None]", "tests/test_core.py::test_pump[jam2-11025-512-False-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-512-False-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-11025-512-False-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-512-True-None-None-None]", "tests/test_core.py::test_pump[jam2-11025-512-True-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-512-True-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-11025-512-True-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-512-True-22050-None-None]", "tests/test_core.py::test_pump[jam2-11025-512-True-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-512-True-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-11025-512-True-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-512-True-44100-None-None]", "tests/test_core.py::test_pump[jam2-11025-512-True-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-11025-512-True-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-11025-512-True-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-128-False-None-None-None]", "tests/test_core.py::test_pump[jam2-22050-128-False-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-128-False-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-22050-128-False-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-128-False-22050-None-None]", "tests/test_core.py::test_pump[jam2-22050-128-False-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-128-False-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-22050-128-False-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-128-False-44100-None-None]", "tests/test_core.py::test_pump[jam2-22050-128-False-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-128-False-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-22050-128-False-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-128-True-None-None-None]", "tests/test_core.py::test_pump[jam2-22050-128-True-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-128-True-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-22050-128-True-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-128-True-22050-None-None]", "tests/test_core.py::test_pump[jam2-22050-128-True-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-128-True-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-22050-128-True-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-128-True-44100-None-None]", "tests/test_core.py::test_pump[jam2-22050-128-True-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-128-True-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-22050-128-True-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-512-False-None-None-None]", "tests/test_core.py::test_pump[jam2-22050-512-False-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-512-False-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-22050-512-False-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-512-False-22050-None-None]", "tests/test_core.py::test_pump[jam2-22050-512-False-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-512-False-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-22050-512-False-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-512-False-44100-None-None]", "tests/test_core.py::test_pump[jam2-22050-512-False-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-512-False-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-22050-512-False-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-512-True-None-None-None]", "tests/test_core.py::test_pump[jam2-22050-512-True-None-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-512-True-None-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-22050-512-True-None-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-512-True-22050-None-None]", "tests/test_core.py::test_pump[jam2-22050-512-True-22050-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-512-True-22050-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-22050-512-True-22050-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-512-True-44100-None-None]", "tests/test_core.py::test_pump[jam2-22050-512-True-44100-None-tests/data/test.ogg]", "tests/test_core.py::test_pump[jam2-22050-512-True-44100-tests/data/test.ogg-None]", "tests/test_core.py::test_pump[jam2-22050-512-True-44100-tests/data/test.ogg-tests/data/test.ogg]", "tests/test_core.py::test_pump_empty[None-11025-128-tests/data/test.ogg]", "tests/test_core.py::test_pump_empty[None-11025-512-tests/data/test.ogg]", "tests/test_core.py::test_pump_empty[None-22050-128-tests/data/test.ogg]", "tests/test_core.py::test_pump_empty[None-22050-512-tests/data/test.ogg]", "tests/test_core.py::test_pump_empty[tests/data/test.jams-11025-128-tests/data/test.ogg]", "tests/test_core.py::test_pump_empty[tests/data/test.jams-11025-512-tests/data/test.ogg]", "tests/test_core.py::test_pump_empty[tests/data/test.jams-22050-128-tests/data/test.ogg]", "tests/test_core.py::test_pump_empty[tests/data/test.jams-22050-512-tests/data/test.ogg]", "tests/test_core.py::test_pump_empty[jam2-11025-128-tests/data/test.ogg]", "tests/test_core.py::test_pump_empty[jam2-11025-512-tests/data/test.ogg]", "tests/test_core.py::test_pump_empty[jam2-22050-128-tests/data/test.ogg]", "tests/test_core.py::test_pump_empty[jam2-22050-512-tests/data/test.ogg]", "tests/test_core.py::test_pump_add[11025-128]", "tests/test_core.py::test_pump_add[11025-512]", "tests/test_core.py::test_pump_add[22050-128]", "tests/test_core.py::test_pump_add[22050-512]", "tests/test_core.py::test_pump_sampler[11025-128-None-1-None]", "tests/test_core.py::test_pump_sampler[11025-128-None-1-10]", "tests/test_core.py::test_pump_sampler[11025-128-None-5-None]", "tests/test_core.py::test_pump_sampler[11025-128-None-5-10]", "tests/test_core.py::test_pump_sampler[11025-128-1-1-None]", "tests/test_core.py::test_pump_sampler[11025-128-1-1-10]", "tests/test_core.py::test_pump_sampler[11025-128-1-5-None]", "tests/test_core.py::test_pump_sampler[11025-128-1-5-10]", "tests/test_core.py::test_pump_sampler[11025-512-None-1-None]", "tests/test_core.py::test_pump_sampler[11025-512-None-1-10]", "tests/test_core.py::test_pump_sampler[11025-512-None-5-None]", "tests/test_core.py::test_pump_sampler[11025-512-None-5-10]", "tests/test_core.py::test_pump_sampler[11025-512-1-1-None]", "tests/test_core.py::test_pump_sampler[11025-512-1-1-10]", "tests/test_core.py::test_pump_sampler[11025-512-1-5-None]", "tests/test_core.py::test_pump_sampler[11025-512-1-5-10]", "tests/test_core.py::test_pump_sampler[22050-128-None-1-None]", "tests/test_core.py::test_pump_sampler[22050-128-None-1-10]", "tests/test_core.py::test_pump_sampler[22050-128-None-5-None]", "tests/test_core.py::test_pump_sampler[22050-128-None-5-10]", "tests/test_core.py::test_pump_sampler[22050-128-1-1-None]", "tests/test_core.py::test_pump_sampler[22050-128-1-1-10]", "tests/test_core.py::test_pump_sampler[22050-128-1-5-None]", "tests/test_core.py::test_pump_sampler[22050-128-1-5-10]", "tests/test_core.py::test_pump_sampler[22050-512-None-1-None]", "tests/test_core.py::test_pump_sampler[22050-512-None-1-10]", "tests/test_core.py::test_pump_sampler[22050-512-None-5-None]", "tests/test_core.py::test_pump_sampler[22050-512-None-5-10]", "tests/test_core.py::test_pump_sampler[22050-512-1-1-None]", "tests/test_core.py::test_pump_sampler[22050-512-1-1-10]", "tests/test_core.py::test_pump_sampler[22050-512-1-5-None]", "tests/test_core.py::test_pump_sampler[22050-512-1-5-10]", "tests/test_core.py::test_pump_str[11025-128]", "tests/test_core.py::test_pump_str[11025-512]", "tests/test_core.py::test_pump_str[22050-128]", "tests/test_core.py::test_pump_str[22050-512]" ]
[]
ISC License
1,752
[ "pumpp/core.py" ]
[ "pumpp/core.py" ]
chimpler__pyhocon-133
8609e4f810ca47a3b573d8b79fa39760e97714b5
2017-10-12 18:45:20
4683937b1d195ce2f53ca78987571e41bfe273e7
diff --git a/pyhocon/config_tree.py b/pyhocon/config_tree.py index 075bae1..354bfb6 100644 --- a/pyhocon/config_tree.py +++ b/pyhocon/config_tree.py @@ -93,7 +93,7 @@ class ConfigTree(OrderedDict): self._push_history(key_elt, value) self[key_elt] = value elif isinstance(l, list): - l += value + self[key_elt] = l + value self._push_history(key_elt, l) elif l is None: self._push_history(key_elt, value) @@ -144,6 +144,8 @@ class ConfigTree(OrderedDict): if key_index == len(key_path) - 1: if isinstance(elt, NoneValue): return None + elif isinstance(elt, list): + return [None if isinstance(x, NoneValue) else x for x in elt] else: return elt elif isinstance(elt, ConfigTree):
get_list() returns NoneValue's, not None's Given the following pyhocon file: ``` single_value = null list_value = [null] ``` And the following code: ```python from pyhocon import ConfigFactory config = ConfigFactory.parse_file("test.conf") single_value = config.get("single_value") print single_value, single_value is None list_value = config.get_list("list_value")[0] print list_value, list_value is None print single_value == list_value ``` You get as output: ``` None True <pyhocon.config_tree.NoneValue object at 0xe20ad0> False False ``` I expected both values to be Python's `None`.
chimpler/pyhocon
diff --git a/tests/test_config_parser.py b/tests/test_config_parser.py index ca08db6..8d9dffd 100644 --- a/tests/test_config_parser.py +++ b/tests/test_config_parser.py @@ -212,9 +212,11 @@ class TestConfigParser(object): config = ConfigFactory.parse_string( """ a = null + b = [null] """ ) assert config.get('a') is None + assert config.get('b')[0] is None def test_parse_override(self): config = ConfigFactory.parse_string( diff --git a/tests/test_config_tree.py b/tests/test_config_tree.py index 97a8e17..3d194de 100644 --- a/tests/test_config_tree.py +++ b/tests/test_config_tree.py @@ -9,7 +9,7 @@ except ImportError: # pragma: no cover from ordereddict import OrderedDict -class TestConfigParser(object): +class TestConfigTree(object): def test_config_tree_quoted_string(self): config_tree = ConfigTree()
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 1 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "mock", "coveralls" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 coveralls==3.3.1 docopt==0.6.2 idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mock==5.2.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work -e git+https://github.com/chimpler/pyhocon.git@8609e4f810ca47a3b573d8b79fa39760e97714b5#egg=pyhocon pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 requests==2.27.1 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: pyhocon channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - charset-normalizer==2.0.12 - coverage==6.2 - coveralls==3.3.1 - docopt==0.6.2 - idna==3.10 - mock==5.2.0 - requests==2.27.1 - urllib3==1.26.20 prefix: /opt/conda/envs/pyhocon
[ "tests/test_config_parser.py::TestConfigParser::test_parse_null" ]
[]
[ "tests/test_config_parser.py::TestConfigParser::test_parse_simple_value", "tests/test_config_parser.py::TestConfigParser::test_parse_with_enclosing_brace", "tests/test_config_parser.py::TestConfigParser::test_parse_with_enclosing_square_bracket", "tests/test_config_parser.py::TestConfigParser::test_quoted_key_with_dots", "tests/test_config_parser.py::TestConfigParser::test_dotted_notation_merge", "tests/test_config_parser.py::TestConfigParser::test_comma_to_separate_expr", "tests/test_config_parser.py::TestConfigParser::test_dict_merge", "tests/test_config_parser.py::TestConfigParser::test_parse_with_comments", "tests/test_config_parser.py::TestConfigParser::test_missing_config", "tests/test_config_parser.py::TestConfigParser::test_parse_override", "tests/test_config_parser.py::TestConfigParser::test_concat_dict", "tests/test_config_parser.py::TestConfigParser::test_concat_string", "tests/test_config_parser.py::TestConfigParser::test_concat_list", "tests/test_config_parser.py::TestConfigParser::test_bad_concat", "tests/test_config_parser.py::TestConfigParser::test_string_substitutions", "tests/test_config_parser.py::TestConfigParser::test_string_substitutions_with_no_space", "tests/test_config_parser.py::TestConfigParser::test_int_substitutions", "tests/test_config_parser.py::TestConfigParser::test_cascade_string_substitutions", "tests/test_config_parser.py::TestConfigParser::test_multiple_substitutions", "tests/test_config_parser.py::TestConfigParser::test_dict_substitutions", "tests/test_config_parser.py::TestConfigParser::test_dos_chars_with_unquoted_string_noeol", "tests/test_config_parser.py::TestConfigParser::test_dos_chars_with_quoted_string_noeol", "tests/test_config_parser.py::TestConfigParser::test_dos_chars_with_triple_quoted_string_noeol", "tests/test_config_parser.py::TestConfigParser::test_dos_chars_with_int_noeol", "tests/test_config_parser.py::TestConfigParser::test_dos_chars_with_float_noeol", "tests/test_config_parser.py::TestConfigParser::test_list_substitutions", "tests/test_config_parser.py::TestConfigParser::test_list_element_substitution", "tests/test_config_parser.py::TestConfigParser::test_substitution_list_with_append", "tests/test_config_parser.py::TestConfigParser::test_substitution_list_with_append_substitution", "tests/test_config_parser.py::TestConfigParser::test_non_existent_substitution", "tests/test_config_parser.py::TestConfigParser::test_non_compatible_substitution", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_array", "tests/test_config_parser.py::TestConfigParser::test_self_append_array", "tests/test_config_parser.py::TestConfigParser::test_self_append_string", "tests/test_config_parser.py::TestConfigParser::test_self_append_non_existent_string", "tests/test_config_parser.py::TestConfigParser::test_self_append_nonexistent_array", "tests/test_config_parser.py::TestConfigParser::test_self_append_object", "tests/test_config_parser.py::TestConfigParser::test_self_append_nonexistent_object", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_array_to_dict", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitiotion_dict_in_array", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_dict_path", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_dict_path_hide", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_dict_recurse", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_dict_recurse2", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_dict_merge", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_dict_otherfield", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_dict_otherfield_merged_in", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_dict_otherfield_merged_in_mutual", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_string_opt_concat", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_dict_recurse_part", "tests/test_config_parser.py::TestConfigParser::test_self_ref_substitution_object", "tests/test_config_parser.py::TestConfigParser::test_concat_multi_line_string", "tests/test_config_parser.py::TestConfigParser::test_concat_multi_line_list", "tests/test_config_parser.py::TestConfigParser::test_concat_multi_line_dict", "tests/test_config_parser.py::TestConfigParser::test_parse_URL_from_samples", "tests/test_config_parser.py::TestConfigParser::test_parse_URL_from_invalid", "tests/test_config_parser.py::TestConfigParser::test_include_dict_from_samples", "tests/test_config_parser.py::TestConfigParser::test_list_of_dicts", "tests/test_config_parser.py::TestConfigParser::test_list_of_lists", "tests/test_config_parser.py::TestConfigParser::test_list_of_dicts_with_merge", "tests/test_config_parser.py::TestConfigParser::test_list_of_lists_with_merge", "tests/test_config_parser.py::TestConfigParser::test_invalid_assignment", "tests/test_config_parser.py::TestConfigParser::test_invalid_dict", "tests/test_config_parser.py::TestConfigParser::test_include_file", "tests/test_config_parser.py::TestConfigParser::test_include_missing_file", "tests/test_config_parser.py::TestConfigParser::test_include_required_file", "tests/test_config_parser.py::TestConfigParser::test_include_missing_required_file", "tests/test_config_parser.py::TestConfigParser::test_include_dict", "tests/test_config_parser.py::TestConfigParser::test_include_substitution", "tests/test_config_parser.py::TestConfigParser::test_var_with_include_keyword", "tests/test_config_parser.py::TestConfigParser::test_substitution_override", "tests/test_config_parser.py::TestConfigParser::test_substitution_flat_override", "tests/test_config_parser.py::TestConfigParser::test_substitution_nested_override", "tests/test_config_parser.py::TestConfigParser::test_optional_substitution", "tests/test_config_parser.py::TestConfigParser::test_cascade_optional_substitution", "tests/test_config_parser.py::TestConfigParser::test_substitution_cycle", "tests/test_config_parser.py::TestConfigParser::test_assign_number_with_eol", "tests/test_config_parser.py::TestConfigParser::test_assign_strings_with_eol", "tests/test_config_parser.py::TestConfigParser::test_assign_list_numbers_with_eol", "tests/test_config_parser.py::TestConfigParser::test_assign_list_strings_with_eol", "tests/test_config_parser.py::TestConfigParser::test_assign_dict_strings_with_equal_sign_with_eol", "tests/test_config_parser.py::TestConfigParser::test_assign_dict_strings_no_equal_sign_with_eol", "tests/test_config_parser.py::TestConfigParser::test_substitutions_overwrite", "tests/test_config_parser.py::TestConfigParser::test_fallback_substitutions_overwrite", "tests/test_config_parser.py::TestConfigParser::test_fallback_substitutions_overwrite_file", "tests/test_config_parser.py::TestConfigParser::test_fallback_self_ref_substitutions_append", "tests/test_config_parser.py::TestConfigParser::test_fallback_self_ref_substitutions_append_plus_equals", "tests/test_config_parser.py::TestConfigParser::test_self_merge_ref_substitutions_object", "tests/test_config_parser.py::TestConfigParser::test_self_merge_ref_substitutions_object2", "tests/test_config_parser.py::TestConfigParser::test_self_merge_ref_substitutions_object3", "tests/test_config_parser.py::TestConfigParser::test_fallback_self_ref_substitutions_merge", "tests/test_config_parser.py::TestConfigParser::test_fallback_self_ref_substitutions_concat_string", "tests/test_config_parser.py::TestConfigParser::test_object_field_substitution", "tests/test_config_parser.py::TestConfigParser::test_one_line_quote_escape", "tests/test_config_parser.py::TestConfigParser::test_multi_line_escape", "tests/test_config_parser.py::TestConfigParser::test_multiline_with_backslash", "tests/test_config_parser.py::TestConfigParser::test_from_dict_with_dict", "tests/test_config_parser.py::TestConfigParser::test_from_dict_with_ordered_dict", "tests/test_config_parser.py::TestConfigParser::test_from_dict_with_nested_dict", "tests/test_config_parser.py::TestConfigParser::test_object_concat", "tests/test_config_parser.py::TestConfigParser::test_issue_75", "tests/test_config_parser.py::TestConfigParser::test_plain_ordered_dict", "tests/test_config_parser.py::TestConfigParser::test_quoted_strings_with_ws", "tests/test_config_parser.py::TestConfigParser::test_unquoted_strings_with_ws", "tests/test_config_parser.py::TestConfigParser::test_quoted_unquoted_strings_with_ws", "tests/test_config_parser.py::TestConfigParser::test_quoted_unquoted_strings_with_ws_substitutions", "tests/test_config_parser.py::TestConfigParser::test_assign_next_line", "tests/test_config_parser.py::TestConfigParser::test_string_from_environment", "tests/test_config_parser.py::TestConfigParser::test_bool_from_environment", "tests/test_config_parser.py::TestConfigParser::test_int_from_environment", "tests/test_config_parser.py::TestConfigParser::test_unicode_dict_key", "tests/test_config_parser.py::TestConfigParser::test_with_comment_on_last_line", "tests/test_config_parser.py::TestConfigParser::test_triple_quotes_same_line", "tests/test_config_parser.py::TestConfigParser::test_pop", "tests/test_config_tree.py::TestConfigTree::test_config_tree_quoted_string", "tests/test_config_tree.py::TestConfigTree::test_config_list", "tests/test_config_tree.py::TestConfigTree::test_config_tree_number", "tests/test_config_tree.py::TestConfigTree::test_config_tree_iterator", "tests/test_config_tree.py::TestConfigTree::test_config_logging", "tests/test_config_tree.py::TestConfigTree::test_config_tree_null", "tests/test_config_tree.py::TestConfigTree::test_getters", "tests/test_config_tree.py::TestConfigTree::test_getters_with_default", "tests/test_config_tree.py::TestConfigTree::test_getter_type_conversion_string_to_bool", "tests/test_config_tree.py::TestConfigTree::test_getter_type_conversion_bool_to_string", "tests/test_config_tree.py::TestConfigTree::test_getter_type_conversion_number_to_string", "tests/test_config_tree.py::TestConfigTree::test_overrides_int_with_config_no_append", "tests/test_config_tree.py::TestConfigTree::test_overrides_int_with_config_append", "tests/test_config_tree.py::TestConfigTree::test_plain_ordered_dict", "tests/test_config_tree.py::TestConfigTree::test_contains", "tests/test_config_tree.py::TestConfigTree::test_contains_with_quoted_keys", "tests/test_config_tree.py::TestConfigTree::test_configtree_pop", "tests/test_config_tree.py::TestConfigTree::test_keyerror_raised" ]
[]
Apache License 2.0
1,753
[ "pyhocon/config_tree.py" ]
[ "pyhocon/config_tree.py" ]
Azure__msrest-for-python-62
a34b2d5521e9fa12f3b9d40c42ed783c06afedf1
2017-10-12 21:12:34
24deba7a7a9e335314058ec2d0b39a710f61be60
diff --git a/msrest/authentication.py b/msrest/authentication.py index 48c90ae..8d21624 100644 --- a/msrest/authentication.py +++ b/msrest/authentication.py @@ -79,6 +79,14 @@ class BasicTokenAuthentication(Authentication): self.scheme = 'Bearer' self.token = token + def set_token(self): + """Should be used to define the self.token attribute. + + In this implementation, does nothing since the token is statically provided + at creation. + """ + pass + def signed_session(self): """Create requests session with any required auth headers applied. @@ -91,7 +99,7 @@ class BasicTokenAuthentication(Authentication): return session -class OAuthTokenAuthentication(Authentication): +class OAuthTokenAuthentication(BasicTokenAuthentication): """OAuth Token Authentication. Requires that supplied token contains an expires_in field.
Make BasicTokenAuthentication the new base class of OAuthTokenAuthentication To enable some KV scenarios releated to MSI
Azure/msrest-for-python
diff --git a/tests/test_auth.py b/tests/test_auth.py index e573a02..32f67f5 100644 --- a/tests/test_auth.py +++ b/tests/test_auth.py @@ -39,6 +39,7 @@ except ImportError: from msrest.authentication import ( BasicAuthentication, + BasicTokenAuthentication, OAuthTokenAuthentication) from requests import Request @@ -71,6 +72,19 @@ class TestAuthentication(unittest.TestCase): self.assertTrue('Authorization' in req.headers) self.assertTrue(req.headers['Authorization'].startswith('Basic ')) + def test_basic_token_auth(self): + + token = { + 'access_token': '123456789' + } + basic = BasicTokenAuthentication(token) + basic.set_token() # Just check that this does not raise + session = basic.signed_session() + + req = session.prepare_request(self.request) + self.assertTrue('Authorization' in req.headers) + self.assertEquals(req.headers['Authorization'], 'Bearer 123456789') + def test_token_auth(self): token = {"my_token":123}
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "dev_requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 exceptiongroup==1.2.2 httpretty==1.1.4 idna==3.10 iniconfig==2.1.0 isodate==0.7.2 -e git+https://github.com/Azure/msrest-for-python.git@a34b2d5521e9fa12f3b9d40c42ed783c06afedf1#egg=msrest oauthlib==3.2.2 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-cov==6.0.0 requests==2.32.3 requests-oauthlib==2.0.0 tomli==2.2.1 urllib3==2.3.0
name: msrest-for-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - exceptiongroup==1.2.2 - httpretty==1.1.4 - idna==3.10 - iniconfig==2.1.0 - isodate==0.7.2 - oauthlib==3.2.2 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==6.0.0 - requests==2.32.3 - requests-oauthlib==2.0.0 - tomli==2.2.1 - urllib3==2.3.0 prefix: /opt/conda/envs/msrest-for-python
[ "tests/test_auth.py::TestAuthentication::test_basic_token_auth" ]
[]
[ "tests/test_auth.py::TestAuthentication::test_basic_auth", "tests/test_auth.py::TestAuthentication::test_token_auth" ]
[]
MIT License
1,754
[ "msrest/authentication.py" ]
[ "msrest/authentication.py" ]
wright-group__WrightTools-339
219ad0c41c0286461e4085e6f563ca695b37e2bd
2017-10-12 21:59:18
592649ce55c9fa7847325c9e9b15b320a38f1389
diff --git a/WrightTools/data/_join.py b/WrightTools/data/_join.py index 1f40775..56ae809 100644 --- a/WrightTools/data/_join.py +++ b/WrightTools/data/_join.py @@ -11,8 +11,10 @@ from ._data import Channel, Data # --- define -------------------------------------------------------------------------------------- + __all__ = ['join'] + # --- functions ----------------------------------------------------------------------------------- @@ -32,21 +34,11 @@ def join(datas, method='first', verbose=True, **kwargs): verbose : bool (optional) Toggle talkback. Default is True. - Keyword Arguments - ----------------- - axis objects - The axes of the new data object. If not supplied, the points of the - new axis will be guessed from the given datas. - Returns ------- - data - A Data instance. + WrightTools.data.Data + A new Data instance. """ - # TODO: a proper treatment of joining datas that have different dimensions - # with intellegent treatment of their constant dimensions. perhaps changing - # map_axis would be good for this. - Blaise 2015.10.31 - # copy datas so original objects are not changed datas = [d.copy() for d in datas] # get scanned dimensions @@ -63,7 +55,6 @@ def join(datas, method='first', verbose=True, **kwargs): axis_names.append(axis.name) axis_units.append(axis.units) axis_objects.append(axis) - # TODO: transpose to same dimension orders # convert into same units for data in datas: for axis_name, axis_unit in zip(axis_names, axis_units): @@ -73,26 +64,11 @@ def join(datas, method='first', verbose=True, **kwargs): # get axis points axis_points = [] # list of 1D arrays for axis_name in axis_names: - if axis_name in kwargs.keys(): - axis_points.append(kwargs[axis_name].points) - continue - all_points = np.array([]) - step_sizes = [] + points = np.full((0), np.nan) for data in datas: - for axis in data.axes: - if axis.name == axis_name: - all_points = np.concatenate([all_points, axis.points]) - this_axis_min = np.nanmin(axis.points) - this_axis_max = np.nanmax(axis.points) - this_axis_number = float(axis.points.size) - 1 - step_size = (this_axis_max - this_axis_min) / this_axis_number - step_sizes.append(step_size) - axis_min = np.nanmin(all_points) - axis_max = np.nanmax(all_points) - axis_step_size = min(step_sizes) - axis_n_points = np.ceil((axis_max - axis_min) / axis_step_size) - points = np.linspace(axis_min, axis_max, axis_n_points + 1) - axis_points.append(points) + index = data.axis_names.index(axis_name) + points = np.hstack((points, data.axes[index].points)) + axis_points.append(np.unique(points)) # map datas to new points for axis_index, axis_name in enumerate(axis_names): for data in datas:
[before h5] join takes set of points along each axis currently join looks at each axis and tries to guess what the best evenly spaced points are moving forward, join will simply take the set of all points in all data objects along each axis interpolation will still be used for points that are not contained in any data set (within the convex hull, of course)
wright-group/WrightTools
diff --git a/tests/data/join.py b/tests/data/join.py index 3d77f1f..72b9bc0 100644 --- a/tests/data/join.py +++ b/tests/data/join.py @@ -1,8 +1,18 @@ +"""Test join.""" + + +# --- import -------------------------------------------------------------------------------------- + + import numpy as np import WrightTools as wt from WrightTools import datasets + +# --- test ---------------------------------------------------------------------------------------- + + def test_wm_w2_w1(): p = datasets.PyCMDS.wm_w2_w1_000 a = wt.data.from_PyCMDS(p)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 1 }
2.13
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
appdirs==1.4.4 attrs==22.2.0 cached-property==1.5.2 certifi==2021.5.30 cycler==0.11.0 h5py==3.1.0 imageio==2.15.0 importlib-metadata==4.8.3 iniconfig==1.1.1 kiwisolver==1.3.1 matplotlib==3.3.4 numpy==1.19.5 packaging==21.3 Pillow==8.4.0 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 python-dateutil==2.9.0.post0 pytz==2025.2 scipy==1.5.4 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 -e git+https://github.com/wright-group/WrightTools.git@219ad0c41c0286461e4085e6f563ca695b37e2bd#egg=WrightTools zipp==3.6.0
name: WrightTools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - appdirs==1.4.4 - attrs==22.2.0 - cached-property==1.5.2 - cycler==0.11.0 - h5py==3.1.0 - imageio==2.15.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - kiwisolver==1.3.1 - matplotlib==3.3.4 - numpy==1.19.5 - packaging==21.3 - pillow==8.4.0 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - scipy==1.5.4 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/WrightTools
[ "tests/data/join.py::test_wm_w2_w1" ]
[]
[]
[]
MIT License
1,755
[ "WrightTools/data/_join.py" ]
[ "WrightTools/data/_join.py" ]
asottile__all-repos-22
5c980cde22d85a79eec64f25fb0e31ffa0990c16
2017-10-13 04:31:31
6835d50a1d65e98a44a21386ce6ec37703ce8f93
diff --git a/all_repos/autofix_lib.py b/all_repos/autofix_lib.py index 8407bee..8b2372e 100644 --- a/all_repos/autofix_lib.py +++ b/all_repos/autofix_lib.py @@ -27,7 +27,8 @@ class Commit(collections.namedtuple( class AutofixSettings(collections.namedtuple( - 'AutofixSettings', ('jobs', 'color', 'limit', 'dry_run'), + 'AutofixSettings', + ('jobs', 'color', 'limit', 'dry_run', 'interactive'), )): __slots__ = () @@ -35,7 +36,7 @@ class AutofixSettings(collections.namedtuple( def from_cli(cls, args): return cls( jobs=args.jobs, color=args.color, limit=args.limit, - dry_run=args.dry_run, + dry_run=args.dry_run, interactive=args.interactive, ) @@ -112,6 +113,43 @@ def repo_context(repo, *, use_color): traceback.print_exc() +def _interactive_check(*, use_color): + def _quit(): + print('Goodbye!') + raise SystemExit() + + while True: + try: + s = input(color.fmt( + '***Looks good [y,n,s,q,?]? ', + color.BLUE_B, use_color=use_color, + )) + except (EOFError, KeyboardInterrupt): + _quit() + + s = s.strip().lower() + if s in {'y', 'yes'}: + return True + elif s in {'n', 'no'}: + return False + elif s in {'s', 'shell'}: + print('Opening an interactive shell, type `exit` to continue.') + print('Any modifications will be committed.') + subprocess.call(os.environ.get('SHELL', 'bash')) + elif s in {'q', 'quit'}: + _quit() + else: + if s not in {'?', 'help'}: + print(color.fmt( + f'Unexpected input: {s}', color.RED, use_color=use_color, + )) + print('y (yes): yes it looks good, commit and continue.') + print('n (no): no, do not commit this repository.') + print('s (shell): open an interactive shell in the repo.') + print('q (quit, ^C): early exit from the autofixer.') + print('? (help): show this help message.') + + def _fix_inner(repo, apply_fix, check_fix, config, commit, autofix_settings): with repo_context(repo, use_color=autofix_settings.color): branch_name = f'all-repos_autofix_{commit.branch_name}' @@ -125,6 +163,12 @@ def _fix_inner(repo, apply_fix, check_fix, config, commit, autofix_settings): check_fix() + if ( + autofix_settings.interactive and + not _interactive_check(use_color=autofix_settings.color) + ): + return + commit_message = ( f'{commit.msg}\n\n' f'Committed via https://github.com/asottile/all-repos' @@ -153,6 +197,7 @@ def fix( commit: Commit, autofix_settings: AutofixSettings, ): + assert not autofix_settings.interactive or autofix_settings.jobs == 1 repos = tuple(repos)[:autofix_settings.limit] func = functools.partial( _fix_inner, diff --git a/all_repos/cli.py b/all_repos/cli.py index 40f5177..2357878 100644 --- a/all_repos/cli.py +++ b/all_repos/cli.py @@ -40,8 +40,16 @@ def add_color_arg(parser): def add_fixer_args(parser): add_config_arg(parser) - add_jobs_arg(parser, default=1) add_color_arg(parser) + + mutex = parser.add_mutually_exclusive_group() + mutex.add_argument('--dry-run', action='store_true') + mutex.add_argument( + '-i', '--interactive', help='Interactively approve / deny fixes.', + ) + add_jobs_arg(mutex, default=1) + + parser.add_argument('--limit', type=int, default=None) parser.add_argument( '--author', help=( @@ -50,6 +58,4 @@ def add_fixer_args(parser): "An example: `--author='Herp Derp <[email protected]>'`" ), ) - parser.add_argument('--dry-run', action='store_true') - parser.add_argument('--limit', type=int, default=None) parser.add_argument('--repos', nargs='*') diff --git a/all_repos/color.py b/all_repos/color.py index f2def75..0f9a61d 100644 --- a/all_repos/color.py +++ b/all_repos/color.py @@ -1,4 +1,5 @@ BLUE_B = '\033[1;34m' +RED = '\033[31m' RED_H = '\033[41m' TURQUOISE = '\033[36m' TURQUOISE_H = '\033[46;30m'
autofix_lib: --interactive Add an interactive mode for autofixers which utilize `autofix_lib`
asottile/all-repos
diff --git a/tests/autofix_lib_test.py b/tests/autofix_lib_test.py index 9a1a57c..b3ab17d 100644 --- a/tests/autofix_lib_test.py +++ b/tests/autofix_lib_test.py @@ -1,5 +1,7 @@ +import builtins import os import subprocess +from unittest import mock import pytest from pre_commit.constants import VERSION as PRE_COMMIT_VERSION @@ -90,6 +92,120 @@ def test_repo_context_errors(file_config_files, capsys): assert 'assert False' in err +def _get_input_side_effect(*inputs): + it = iter(inputs) + + def side_effect(s): + print(s, end='') + ret = next(it) + if ret in (EOFError, KeyboardInterrupt): + print({EOFError: '^D', KeyboardInterrupt: '^C'}[ret]) + raise ret + else: + print(f'<<{ret}') + return ret + return side_effect + + [email protected]_fixture +def mock_input(): + with mock.patch.object(builtins, 'input') as mck: + yield mck + + +def test_interactive_control_c(mock_input, capfd): + mock_input.side_effect = _get_input_side_effect(KeyboardInterrupt) + with pytest.raises(SystemExit): + autofix_lib._interactive_check(use_color=False) + out, _ = capfd.readouterr() + assert out == ( + '***Looks good [y,n,s,q,?]? ^C\n' + 'Goodbye!\n' + ) + + +def test_interactive_eof(mock_input, capfd): + mock_input.side_effect = _get_input_side_effect(EOFError) + with pytest.raises(SystemExit): + autofix_lib._interactive_check(use_color=False) + out, _ = capfd.readouterr() + assert out == ( + '***Looks good [y,n,s,q,?]? ^D\n' + 'Goodbye!\n' + ) + + +def test_interactive_quit(mock_input, capfd): + mock_input.side_effect = _get_input_side_effect('q') + with pytest.raises(SystemExit): + autofix_lib._interactive_check(use_color=False) + out, _ = capfd.readouterr() + assert out == ( + '***Looks good [y,n,s,q,?]? <<q\n' + 'Goodbye!\n' + ) + + +def test_interactive_yes(mock_input, capfd): + mock_input.side_effect = _get_input_side_effect('y') + assert autofix_lib._interactive_check(use_color=False) is True + out, _ = capfd.readouterr() + assert out == '***Looks good [y,n,s,q,?]? <<y\n' + + +def test_interactive_no(mock_input, capfd): + mock_input.side_effect = _get_input_side_effect('n') + assert autofix_lib._interactive_check(use_color=False) is False + out, _ = capfd.readouterr() + assert out == '***Looks good [y,n,s,q,?]? <<n\n' + + +def test_interactive_shell(mock_input, capfd): + mock_input.side_effect = _get_input_side_effect('s', 'n') + with mock.patch.dict(os.environ, {'SHELL': 'echo'}): + assert autofix_lib._interactive_check(use_color=False) is False + out, _ = capfd.readouterr() + assert out == ( + '***Looks good [y,n,s,q,?]? <<s\n' + 'Opening an interactive shell, type `exit` to continue.\n' + 'Any modifications will be committed.\n' + # A newline from echo + '\n' + '***Looks good [y,n,s,q,?]? <<n\n' + ) + + +def test_interactive_help(mock_input, capfd): + mock_input.side_effect = _get_input_side_effect('?', 'n') + assert autofix_lib._interactive_check(use_color=False) is False + out, _ = capfd.readouterr() + assert out == ( + '***Looks good [y,n,s,q,?]? <<?\n' + 'y (yes): yes it looks good, commit and continue.\n' + 'n (no): no, do not commit this repository.\n' + 's (shell): open an interactive shell in the repo.\n' + 'q (quit, ^C): early exit from the autofixer.\n' + '? (help): show this help message.\n' + '***Looks good [y,n,s,q,?]? <<n\n' + ) + + +def test_interactive_garbage(mock_input, capfd): + mock_input.side_effect = _get_input_side_effect('garbage', 'n') + assert autofix_lib._interactive_check(use_color=False) is False + out, _ = capfd.readouterr() + assert out == ( + '***Looks good [y,n,s,q,?]? <<garbage\n' + 'Unexpected input: garbage\n' + 'y (yes): yes it looks good, commit and continue.\n' + 'n (no): no, do not commit this repository.\n' + 's (shell): open an interactive shell in the repo.\n' + 'q (quit, ^C): early exit from the autofixer.\n' + '? (help): show this help message.\n' + '***Looks good [y,n,s,q,?]? <<n\n' + ) + + def lower_case_f(): f_contents = open('f').read() with open('f', 'w') as f: @@ -110,7 +226,7 @@ def test_fix_dry_run_no_change(file_config_files, capfd): config=load_config(file_config_files.cfg), commit=autofix_lib.Commit('message!', 'test-branch', None), autofix_settings=autofix_lib.AutofixSettings( - jobs=1, color=False, limit=None, dry_run=True, + jobs=1, color=False, limit=None, dry_run=True, interactive=False, ), ) @@ -136,7 +252,7 @@ def test_fix_with_limit(file_config_files, capfd): config=load_config(file_config_files.cfg), commit=autofix_lib.Commit('message!', 'test-branch', None), autofix_settings=autofix_lib.AutofixSettings( - jobs=1, color=False, limit=1, dry_run=True, + jobs=1, color=False, limit=1, dry_run=True, interactive=False, ), ) @@ -148,6 +264,25 @@ def test_fix_with_limit(file_config_files, capfd): assert '-OHELLO\n+ohello\n' not in out +def test_fix_interactive(file_config_files, capfd, mock_input): + mock_input.side_effect = _get_input_side_effect('y', 'n') + autofix_lib.fix( + ( + str(file_config_files.output_dir.join('repo1')), + str(file_config_files.output_dir.join('repo2')), + ), + apply_fix=lower_case_f, + config=load_config(file_config_files.cfg), + commit=autofix_lib.Commit('message!', 'test-branch', None), + autofix_settings=autofix_lib.AutofixSettings( + jobs=1, color=False, limit=None, dry_run=False, interactive=True, + ), + ) + + assert file_config_files.dir1.join('f').read() == 'ohai\n' + assert file_config_files.dir2.join('f').read() == 'OHELLO\n' + + def test_autofix_makes_commits(file_config_files, capfd): autofix_lib.fix( ( @@ -158,7 +293,7 @@ def test_autofix_makes_commits(file_config_files, capfd): config=load_config(file_config_files.cfg), commit=autofix_lib.Commit('message!', 'test-branch', 'A B <[email protected]>'), autofix_settings=autofix_lib.AutofixSettings( - jobs=1, color=False, limit=None, dry_run=False, + jobs=1, color=False, limit=None, dry_run=False, interactive=False, ), ) @@ -201,7 +336,7 @@ def test_fix_failing_check_no_changes(file_config_files, capfd): config=load_config(file_config_files.cfg), commit=autofix_lib.Commit('message!', 'test-branch', None), autofix_settings=autofix_lib.AutofixSettings( - jobs=1, color=False, limit=None, dry_run=False, + jobs=1, color=False, limit=None, dry_run=False, interactive=False, ), ) @@ -226,7 +361,7 @@ def test_noop_does_not_commit(file_config_files, capfd): config=load_config(file_config_files.cfg), commit=autofix_lib.Commit('message!', 'test-branch', None), autofix_settings=autofix_lib.AutofixSettings( - jobs=1, color=False, limit=None, dry_run=False, + jobs=1, color=False, limit=None, dry_run=False, interactive=False, ), ) rev_after1 = testing.git.revparse(file_config_files.dir1)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 3 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-env" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/asottile/all-repos.git@5c980cde22d85a79eec64f25fb0e31ffa0990c16#egg=all_repos certifi==2025.1.31 cfgv==3.4.0 charset-normalizer==3.4.1 coverage==7.8.0 distlib==0.3.9 exceptiongroup==1.2.2 filelock==3.18.0 flake8==7.2.0 identify==2.6.9 idna==3.10 iniconfig==2.1.0 mccabe==0.7.0 nodeenv==1.9.1 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pre_commit==4.2.0 pycodestyle==2.13.0 pyflakes==3.3.2 pytest==8.3.5 pytest-env==1.1.5 PyYAML==6.0.2 requests==2.32.3 tomli==2.2.1 urllib3==2.3.0 virtualenv==20.29.3
name: all-repos channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - cfgv==3.4.0 - charset-normalizer==3.4.1 - coverage==7.8.0 - distlib==0.3.9 - exceptiongroup==1.2.2 - filelock==3.18.0 - flake8==7.2.0 - identify==2.6.9 - idna==3.10 - iniconfig==2.1.0 - mccabe==0.7.0 - nodeenv==1.9.1 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pre-commit==4.2.0 - pycodestyle==2.13.0 - pyflakes==3.3.2 - pytest==8.3.5 - pytest-env==1.1.5 - pyyaml==6.0.2 - requests==2.32.3 - tomli==2.2.1 - urllib3==2.3.0 - virtualenv==20.29.3 prefix: /opt/conda/envs/all-repos
[ "tests/autofix_lib_test.py::test_interactive_control_c", "tests/autofix_lib_test.py::test_interactive_eof", "tests/autofix_lib_test.py::test_interactive_quit", "tests/autofix_lib_test.py::test_interactive_yes", "tests/autofix_lib_test.py::test_interactive_no", "tests/autofix_lib_test.py::test_interactive_shell", "tests/autofix_lib_test.py::test_interactive_help", "tests/autofix_lib_test.py::test_interactive_garbage", "tests/autofix_lib_test.py::test_fix_dry_run_no_change", "tests/autofix_lib_test.py::test_fix_with_limit", "tests/autofix_lib_test.py::test_fix_interactive", "tests/autofix_lib_test.py::test_autofix_makes_commits", "tests/autofix_lib_test.py::test_fix_failing_check_no_changes", "tests/autofix_lib_test.py::test_noop_does_not_commit" ]
[]
[ "tests/autofix_lib_test.py::test_filter_repos[None-expected0]", "tests/autofix_lib_test.py::test_filter_repos[cli_repos1-expected1]", "tests/autofix_lib_test.py::test_filter_repos[cli_repos2-expected2]", "tests/autofix_lib_test.py::test_assert_importable_is_importable", "tests/autofix_lib_test.py::test_assert_importable_not_importable", "tests/autofix_lib_test.py::test_require_version_new_enough", "tests/autofix_lib_test.py::test_require_version_not_new_enough", "tests/autofix_lib_test.py::test_run", "tests/autofix_lib_test.py::test_cwd", "tests/autofix_lib_test.py::test_repo_context_success", "tests/autofix_lib_test.py::test_repo_context_errors" ]
[]
MIT License
1,756
[ "all_repos/color.py", "all_repos/cli.py", "all_repos/autofix_lib.py" ]
[ "all_repos/color.py", "all_repos/cli.py", "all_repos/autofix_lib.py" ]
Duke-GCB__DukeDSClient-179
72ec820e45dd44ba14ff86476c71f2dc40f2088e
2017-10-13 13:55:59
bffebebd86d09f5924461959401ef3698b4e47d5
diff --git a/ddsc/cmdparser.py b/ddsc/cmdparser.py index dd3c1bf..8cc145e 100644 --- a/ddsc/cmdparser.py +++ b/ddsc/cmdparser.py @@ -6,7 +6,7 @@ import argparse import six from builtins import str - +DESCRIPTION_STR = "DukeDSClient ({}) Manage projects/folders/files in the duke-data-service" INVALID_PATH_CHARS = (':', '/', '\\') @@ -321,8 +321,8 @@ class CommandParser(object): You must register external functions to called for the various commands. Commands must be registered to appear in help. """ - def __init__(self): - self.parser = argparse.ArgumentParser() + def __init__(self, version_str): + self.parser = argparse.ArgumentParser(description=DESCRIPTION_STR.format(version_str)) _skip_config_file_permission_check(self.parser) self.subparsers = self.parser.add_subparsers() self.upload_func = None diff --git a/ddsc/ddsclient.py b/ddsc/ddsclient.py index cc80a70..2015f17 100644 --- a/ddsc/ddsclient.py +++ b/ddsc/ddsclient.py @@ -11,7 +11,7 @@ from ddsc.cmdparser import CommandParser, path_does_not_exist_or_is_empty, repla from ddsc.core.download import ProjectDownload from ddsc.core.util import ProjectDetailsList, verify_terminal_encoding from ddsc.core.pathfilter import PathFilter -from ddsc.versioncheck import check_version, VersionException +from ddsc.versioncheck import check_version, VersionException, get_internal_version_str from ddsc.config import create_config NO_PROJECTS_FOUND_MESSAGE = 'No projects found.' @@ -38,7 +38,7 @@ class DDSClient(object): Create a parser hooking up the command methods below to be run when chosen. :return: CommandParser parser with commands attached. """ - parser = CommandParser() + parser = CommandParser(get_internal_version_str()) parser.register_list_command(self._setup_run_command(ListCommand)) parser.register_upload_command(self._setup_run_command(UploadCommand)) parser.register_add_user_command(self._setup_run_command(AddUserCommand))
Show the ddsclient version in help or with a --version argument When running ddsclient, it will print a message about a newer version, but I can't find any command-line option to print the current version. Obviously I can `pip freeze` but the tool should be able to report its own version
Duke-GCB/DukeDSClient
diff --git a/ddsc/tests/test_cmdparser.py b/ddsc/tests/test_cmdparser.py index 669b140..a3b2147 100644 --- a/ddsc/tests/test_cmdparser.py +++ b/ddsc/tests/test_cmdparser.py @@ -16,7 +16,7 @@ class TestCommandParser(TestCase): self.parsed_args = args def test_register_add_user_command_project_name(self): - command_parser = CommandParser() + command_parser = CommandParser(version_str='1.0') command_parser.register_add_user_command(self.set_parsed_args) self.assertEqual(['add-user'], list(command_parser.subparsers.choices.keys())) command_parser.run_command(['add-user', '-p', 'myproj', '--user', 'joe123']) @@ -24,7 +24,7 @@ class TestCommandParser(TestCase): self.assertEqual(None, self.parsed_args.project_id) def test_register_add_user_command_project_id(self): - command_parser = CommandParser() + command_parser = CommandParser(version_str='1.0') command_parser.register_add_user_command(self.set_parsed_args) self.assertEqual(['add-user'], list(command_parser.subparsers.choices.keys())) command_parser.run_command(['add-user', '-i', '123', '--user', 'joe123']) @@ -32,7 +32,7 @@ class TestCommandParser(TestCase): self.assertEqual('123', self.parsed_args.project_id) def test_register_remove_user_command_project_name(self): - command_parser = CommandParser() + command_parser = CommandParser(version_str='1.0') command_parser.register_remove_user_command(self.set_parsed_args) self.assertEqual(['remove-user'], list(command_parser.subparsers.choices.keys())) command_parser.run_command(['remove-user', '-p', 'myproj', '--user', 'joe123']) @@ -40,7 +40,7 @@ class TestCommandParser(TestCase): self.assertEqual(None, self.parsed_args.project_id) def test_register_remove_user_command_project_id(self): - command_parser = CommandParser() + command_parser = CommandParser(version_str='1.0') command_parser.register_remove_user_command(self.set_parsed_args) self.assertEqual(['remove-user'], list(command_parser.subparsers.choices.keys())) command_parser.run_command(['remove-user', '-i', '456', '--user', 'joe123']) @@ -48,7 +48,7 @@ class TestCommandParser(TestCase): self.assertEqual('456', self.parsed_args.project_id) def test_deliver_no_msg(self): - command_parser = CommandParser() + command_parser = CommandParser(version_str='1.0') command_parser.register_deliver_command(self.set_parsed_args) self.assertEqual(['deliver'], list(command_parser.subparsers.choices.keys())) command_parser.run_command(['deliver', '-p', 'someproject', '--user', 'joe123']) @@ -57,7 +57,7 @@ class TestCommandParser(TestCase): self.assertEqual(None, self.parsed_args.project_id) def test_deliver_with_msg(self): - command_parser = CommandParser() + command_parser = CommandParser(version_str='1.0') command_parser.register_deliver_command(self.set_parsed_args) self.assertEqual(['deliver'], list(command_parser.subparsers.choices.keys())) command_parser.run_command(['deliver', '-i', '123', '--user', 'joe123', '--msg-file', 'setup.py']) @@ -66,7 +66,7 @@ class TestCommandParser(TestCase): self.assertEqual('123', self.parsed_args.project_id) def test_share_no_msg(self): - command_parser = CommandParser() + command_parser = CommandParser(version_str='1.0') command_parser.register_share_command(self.set_parsed_args) self.assertEqual(['share'], list(command_parser.subparsers.choices.keys())) command_parser.run_command(['share', '-p', 'someproject2', '--user', 'joe123']) @@ -75,7 +75,7 @@ class TestCommandParser(TestCase): self.assertEqual(None, self.parsed_args.project_id) def test_share_with_msg(self): - command_parser = CommandParser() + command_parser = CommandParser(version_str='1.0') command_parser.register_share_command(self.set_parsed_args) self.assertEqual(['share'], list(command_parser.subparsers.choices.keys())) command_parser.run_command(['share', '-i', '456', '--user', 'joe123', '--msg-file', 'setup.py']) @@ -85,7 +85,7 @@ class TestCommandParser(TestCase): def test_list_command(self): func = Mock() - command_parser = CommandParser() + command_parser = CommandParser(version_str='1.0') command_parser.register_list_command(func) self.assertEqual(['list'], list(command_parser.subparsers.choices.keys())) @@ -113,7 +113,7 @@ class TestCommandParser(TestCase): self.assertEqual(args[0].project_name, None) def test_list_command_long(self): - command_parser = CommandParser() + command_parser = CommandParser(version_str='1.0') command_parser.register_list_command(self.set_parsed_args) command_parser.run_command(['list']) self.assertEqual(False, self.parsed_args.long_format) @@ -144,3 +144,8 @@ class TestCommandParser(TestCase): self.assertEqual(True, self.parsed_args.long_format) self.assertEqual('mouse', self.parsed_args.project_name) self.assertEqual(None, self.parsed_args.project_id) + + def test_description(self): + expected_description = 'DukeDSClient (1.0) Manage projects/folders/files in the duke-data-service' + command_parser = CommandParser(version_str='1.0') + self.assertEqual(expected_description, command_parser.parser.description)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 2 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "mock", "flake8", "nose", "pytest" ], "pre_install": null, "python": "3.5", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 -e git+https://github.com/Duke-GCB/DukeDSClient.git@72ec820e45dd44ba14ff86476c71f2dc40f2088e#egg=DukeDSClient flake8==5.0.4 future==0.16.0 importlib-metadata==4.2.0 iniconfig==1.1.1 mccabe==0.7.0 mock==5.2.0 nose==1.3.7 packaging==21.3 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 pyparsing==3.1.4 pytest==7.0.1 pytz==2025.2 PyYAML==3.12 requests==2.13.0 six==1.10.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: DukeDSClient channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - flake8==5.0.4 - future==0.16.0 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - mccabe==0.7.0 - mock==5.2.0 - nose==1.3.7 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytz==2025.2 - pyyaml==3.12 - requests==2.13.0 - six==1.10.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/DukeDSClient
[ "ddsc/tests/test_cmdparser.py::TestCommandParser::test_deliver_no_msg", "ddsc/tests/test_cmdparser.py::TestCommandParser::test_deliver_with_msg", "ddsc/tests/test_cmdparser.py::TestCommandParser::test_description", "ddsc/tests/test_cmdparser.py::TestCommandParser::test_list_command", "ddsc/tests/test_cmdparser.py::TestCommandParser::test_list_command_long", "ddsc/tests/test_cmdparser.py::TestCommandParser::test_register_add_user_command_project_id", "ddsc/tests/test_cmdparser.py::TestCommandParser::test_register_add_user_command_project_name", "ddsc/tests/test_cmdparser.py::TestCommandParser::test_register_remove_user_command_project_id", "ddsc/tests/test_cmdparser.py::TestCommandParser::test_register_remove_user_command_project_name", "ddsc/tests/test_cmdparser.py::TestCommandParser::test_share_no_msg", "ddsc/tests/test_cmdparser.py::TestCommandParser::test_share_with_msg" ]
[]
[]
[]
MIT License
1,757
[ "ddsc/cmdparser.py", "ddsc/ddsclient.py" ]
[ "ddsc/cmdparser.py", "ddsc/ddsclient.py" ]
pysmt__pysmt-445
35662f00367f07d398255e3f3d6ff5eb32c3f2a8
2017-10-13 14:54:10
b0324e68bee72c862db9620206a34f2e20c38160
diff --git a/pysmt/fnode.py b/pysmt/fnode.py index 9649768..a56693e 100644 --- a/pysmt/fnode.py +++ b/pysmt/fnode.py @@ -650,27 +650,33 @@ class FNode(object): # Infix Notation @assert_infix_enabled def _apply_infix(self, right, function, bv_function=None): - mgr = _mgr() - # BVs # Default bv_function to function - if bv_function is None: bv_function = function + if bv_function is None: + bv_function = function + right = self._infix_prepare_arg(right, self.get_type()) if self.get_type().is_bv_type(): - if is_python_integer(right): - right = mgr.BV(right, width=self.bv_width()) return bv_function(self, right) - # Boolean, Integer and Arithmetic - if is_python_boolean(right): - right = mgr.Bool(right) - elif is_python_integer(right): - ty = self.get_type() - if ty.is_real_type(): - right = mgr.Real(right) - else: - right = mgr.Int(right) - elif is_python_rational(right): - right = mgr.Real(right) return function(self, right) + @assert_infix_enabled + def _infix_prepare_arg(self, arg, expected_type): + mgr = _mgr() + if isinstance(arg, FNode): + return arg + + # BVs + if expected_type.is_bv_type(): + return mgr.BV(arg, width=expected_type.width) + # Boolean, Integer and Arithmetic + elif expected_type.is_bool_type(): + return mgr.Bool(arg) + elif expected_type.is_int_type(): + return mgr.Int(arg) + elif expected_type.is_real_type(): + return mgr.Real(arg) + else: + raise PysmtValueError("Unsupported value '%s' in infix operator" % str(arg)) + def Implies(self, right): return self._apply_infix(right, _mgr().Implies) @@ -854,6 +860,20 @@ class FNode(object): def __mod__(self, right): return self._apply_infix(right, None, bv_function=_mgr().BVURem) + + @assert_infix_enabled + def __call__(self, *args): + if self.is_symbol() and self.symbol_type().is_function_type(): + types = self.symbol_type().param_types + if (len(types) != len(args)): + raise PysmtValueError("Wrong number of parameters passed in " + "infix 'call' operator") + args = [self._infix_prepare_arg(x, t) for x,t in zip(args, types)] + return _mgr().Function(self, args) + else: + raise PysmtValueError("Call operator can be applied to symbol " + "types having function type only") + # EOC FNode
Overload infix notation for EUF The only theory that does not currently support of infix notation is UF. This issue is to discuss possible problems and syntax issues related to extending UF with infix notation. In particular, a possible idea is to use a syntax like ``` f(3) #if f is a function symbol, this should return the application of 3 to f ``` It is unclear how much interest there is in this. Also, a few examples would be useful to better understand the implications of this.
pysmt/pysmt
diff --git a/pysmt/test/test_euf.py b/pysmt/test/test_euf.py index da17970..0ff1ca6 100644 --- a/pysmt/test/test_euf.py +++ b/pysmt/test/test_euf.py @@ -16,10 +16,11 @@ # limitations under the License. # from pysmt.shortcuts import * -from pysmt.typing import INT, REAL, FunctionType +from pysmt.typing import INT, REAL, FunctionType, BV16 from pysmt.logics import UFLRA, UFLIRA from pysmt.test import TestCase, main -from pysmt.test import skipIfSolverNotAvailable, skipIfNoSolverForLogic +from pysmt.test import skipIfNoSolverForLogic +from pysmt.exceptions import PysmtModeError, PysmtValueError class TestEUF(TestCase): @@ -37,6 +38,38 @@ class TestEUF(TestCase): self.assertSat(check, logic=UFLIRA, msg="Formula was expected to be sat") + + def test_infix(self): + ftype1 = FunctionType(REAL, [REAL]) + ftype2 = FunctionType(REAL, [REAL, INT]) + f = Symbol("f", ftype1) + g = Symbol("g", ftype2) + + with self.assertRaises(PysmtModeError): + f(1.0) + + get_env().enable_infix_notation = True + + infix = Equals(f(1.0), g(2.0, 4)) + explicit = Equals(Function(f, [Real(1.0)]), Function(g, [Real(2.0), Int(4)])) + self.assertEqual(infix, explicit) + + ftype1 = FunctionType(REAL, [BV16]) + ftype2 = FunctionType(BV16, [INT, BV16]) + f = Symbol("bvf", ftype1) + g = Symbol("bvg", ftype2) + infix = Equals(f(g(2, 6)), Real(0)) + explicit = Equals(Function(f, [Function(g, [Int(2), BV(6, 16)])]), Real(0)) + self.assertEqual(infix, explicit) + + with self.assertRaises(PysmtValueError): + f(BV(6, 16), BV(8, 16)) + + ftype3 = FunctionType(REAL, []) + h = Symbol("h", ftype3) + with self.assertRaises(PysmtValueError): + h() + @skipIfNoSolverForLogic(UFLRA) def test_quantified_euf(self): ftype1 = FunctionType(REAL, [REAL, REAL])
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 0 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "dev-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 iniconfig==2.1.0 nose==1.3.7 packaging==24.2 pluggy==1.5.0 -e git+https://github.com/pysmt/pysmt.git@35662f00367f07d398255e3f3d6ff5eb32c3f2a8#egg=PySMT pytest==8.3.5 six==1.17.0 tomli==2.2.1
name: pysmt channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - nose==1.3.7 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - six==1.17.0 - tomli==2.2.1 prefix: /opt/conda/envs/pysmt
[ "pysmt/test/test_euf.py::TestEUF::test_infix" ]
[]
[ "pysmt/test/test_euf.py::TestEUF::test_simplify" ]
[]
Apache License 2.0
1,758
[ "pysmt/fnode.py" ]
[ "pysmt/fnode.py" ]
pynamodb__PynamoDB-374
c4d89e95bc747173651d5a529992a2ca8f03bddb
2017-10-13 20:51:49
1828bda52376a4b0313146b64ffb447e5392f467
diff --git a/pynamodb/models.py b/pynamodb/models.py index 2425106..1e539de 100644 --- a/pynamodb/models.py +++ b/pynamodb/models.py @@ -344,7 +344,6 @@ class Model(AttributeContainer): attribute_cls = None for attr_name, attr_cls in self._get_attributes().items(): if attr_name == attribute: - value = attr_cls.serialize(value) attribute_cls = attr_cls break if not attribute_cls: @@ -360,8 +359,10 @@ class Model(AttributeContainer): ACTION: action.upper() if action else None, } } - if action is not None and action.upper() != DELETE: - kwargs[pythonic(ATTR_UPDATES)][attribute_cls.attr_name][VALUE] = {ATTR_TYPE_MAP[attribute_cls.attr_type]: value} + if value is not None: + kwargs[pythonic(ATTR_UPDATES)][attribute_cls.attr_name][VALUE] = { + ATTR_TYPE_MAP[attribute_cls.attr_type]: attribute_cls.serialize(value) + } kwargs[pythonic(RETURN_VALUES)] = ALL_NEW kwargs.update(conditional_operator=conditional_operator) kwargs.update(condition=condition) @@ -415,7 +416,7 @@ class Model(AttributeContainer): attribute_cls = attrs[attr] action = params['action'] and params['action'].upper() attr_values = {ACTION: action} - if action != DELETE: + if 'value' in params: attr_values[VALUE] = self._serialize_value(attribute_cls, params['value']) kwargs[pythonic(ATTR_UPDATES)][attribute_cls.attr_name] = attr_values
update_item delete item from set fails Here is a simple test case that attempts to remove an value from a UnicodeSet attribute: ``` python from pynamodb.models import Model from pynamodb.attributes import NumberAttribute, UnicodeSetAttribute class UserModel(Model): class Meta: table_name = 'User' region = 'us-west-2' read_capacity_units = 1 write_capacity_units = 1 host = 'http://localhost:8000' id = NumberAttribute(hash_key=True) nicknames = UnicodeSetAttribute(null=True) UserModel.create_table(wait=True) try: original_nicknames = {'name1', 'name2', 'name3'} nicknames_to_remove = {'name2'} expected_result_nicknames = original_nicknames - nicknames_to_remove user = UserModel(id=1, nicknames=original_nicknames) user.save() user = UserModel.get(1) print('original nicknames:', user.nicknames) assert original_nicknames == user.nicknames print('nicknames to remove:', nicknames_to_remove) user.update_item('nicknames', value=nicknames_to_remove, action='delete') user = UserModel.get(1) print('expected result nicknames:', expected_result_nicknames) print(' actual result nicknames:', user.nicknames) print() assert expected_result_nicknames == user.nicknames finally: UserModel.delete_table() ``` When running this test case, the output is: ``` original nicknames: {'name2', 'name3', 'name1'} nicknames to remove: {'name2'} expected result nicknames: {'name3', 'name1'} actual result nicknames: None Traceback (most recent call last): File "mytest.py", line 34, in <module> assert expected_result_nicknames == user.nicknames AssertionError ``` I expected 'name2' to be removed from the `{'name1', 'name2', 'name3'}` set, but instead all the values were removed. This bug exists since 1.4.4 (1.4.3 works fine), and is still present in 1.6.0. The associated pull request has been updated to be be based on the post 1.6.0 devel branch, and it includes a unit test that fails with only the 'pynamodb/tests' commits applied but succeeds with the changes to connection/base.py and models.py (for update_item of connection and model, respectively).
pynamodb/PynamoDB
diff --git a/pynamodb/tests/test_model.py b/pynamodb/tests/test_model.py index 571c1e3..2688fb4 100644 --- a/pynamodb/tests/test_model.py +++ b/pynamodb/tests/test_model.py @@ -1080,6 +1080,44 @@ class ModelTestCase(TestCase): assert item.views is None self.assertEquals(set(['bob']), item.custom_aliases) + # Reproduces https://github.com/pynamodb/PynamoDB/issues/132 + with patch(PATCH_METHOD) as req: + req.return_value = { + ATTRIBUTES: { + "aliases": { + "SS": set(["alias1", "alias3"]) + } + } + } + item.update({ + 'custom_aliases': {'value': set(['alias2']), 'action': 'delete'}, + }) + + args = req.call_args[0][1] + params = { + 'TableName': 'SimpleModel', + 'ReturnValues': 'ALL_NEW', + 'Key': { + 'user_name': { + 'S': 'foo' + } + }, + 'UpdateExpression': 'DELETE #0 :0', + 'ExpressionAttributeNames': { + '#0': 'aliases' + }, + 'ExpressionAttributeValues': { + ':0': { + 'SS': set(['alias2']) + } + }, + 'ReturnConsumedCapacity': 'TOTAL' + } + deep_eq(args, params, _assert=True) + + assert item.views is None + self.assertEquals(set(['alias1', 'alias3']), item.custom_aliases) + def test_update_item(self): """ Model.update_item @@ -1577,6 +1615,38 @@ class ModelTestCase(TestCase): } deep_eq(args, params, _assert=True) + # Reproduces https://github.com/pynamodb/PynamoDB/issues/132 + with patch(PATCH_METHOD) as req: + req.return_value = { + ATTRIBUTES: { + "aliases": { + "SS": set(["alias1", "alias3"]) + } + } + } + item.update_item('custom_aliases', set(['alias2']), action='delete') + args = req.call_args[0][1] + params = { + 'TableName': 'SimpleModel', + 'ReturnValues': 'ALL_NEW', + 'Key': { + 'user_name': { + 'S': 'foo' + } + }, + 'UpdateExpression': 'DELETE #0 :0', + 'ExpressionAttributeNames': { + '#0': 'aliases' + }, + 'ExpressionAttributeValues': { + ':0': { + 'SS': set(['alias2']) + } + }, + 'ReturnConsumedCapacity': 'TOTAL' + } + deep_eq(args, params, _assert=True) + self.assertEqual(set(["alias1", "alias3"]), item.custom_aliases) def test_save(self): """
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
3.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 botocore==1.2.0 certifi==2021.5.30 docutils==0.18.1 importlib-metadata==4.8.3 iniconfig==1.1.1 jmespath==0.7.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 -e git+https://github.com/pynamodb/PynamoDB.git@c4d89e95bc747173651d5a529992a2ca8f03bddb#egg=pynamodb pyparsing==3.1.4 pytest==7.0.1 python-dateutil==2.9.0.post0 six==1.9.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: PynamoDB channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - botocore==1.2.0 - docutils==0.18.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jmespath==0.7.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-dateutil==2.9.0.post0 - six==1.9.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/PynamoDB
[ "pynamodb/tests/test_model.py::ModelTestCase::test_update", "pynamodb/tests/test_model.py::ModelTestCase::test_update_item" ]
[]
[ "pynamodb/tests/test_model.py::ModelTestCase::test_batch_get", "pynamodb/tests/test_model.py::ModelTestCase::test_batch_write", "pynamodb/tests/test_model.py::ModelTestCase::test_batch_write_with_unprocessed", "pynamodb/tests/test_model.py::ModelTestCase::test_car_model_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_car_model_with_null_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_complex_key", "pynamodb/tests/test_model.py::ModelTestCase::test_complex_model_is_complex", "pynamodb/tests/test_model.py::ModelTestCase::test_complex_model_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_conditional_operator_map_attribute", "pynamodb/tests/test_model.py::ModelTestCase::test_count", "pynamodb/tests/test_model.py::ModelTestCase::test_count_no_hash_key", "pynamodb/tests/test_model.py::ModelTestCase::test_create_model", "pynamodb/tests/test_model.py::ModelTestCase::test_delete", "pynamodb/tests/test_model.py::ModelTestCase::test_delete_doesnt_do_validation_on_null_attributes", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_map_four_layers_deep_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_new_style_bool_false_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_new_style_bool_true_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_old_style_bool_false_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_old_style_bool_true_works", "pynamodb/tests/test_model.py::ModelTestCase::test_dumps", "pynamodb/tests/test_model.py::ModelTestCase::test_explicit_raw_map_serialize_pass", "pynamodb/tests/test_model.py::ModelTestCase::test_filter_count", "pynamodb/tests/test_model.py::ModelTestCase::test_get", "pynamodb/tests/test_model.py::ModelTestCase::test_global_index", "pynamodb/tests/test_model.py::ModelTestCase::test_index_count", "pynamodb/tests/test_model.py::ModelTestCase::test_index_multipage_count", "pynamodb/tests/test_model.py::ModelTestCase::test_index_queries", "pynamodb/tests/test_model.py::ModelTestCase::test_invalid_car_model_with_null_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_invalid_map_model_raises", "pynamodb/tests/test_model.py::ModelTestCase::test_list_of_map_works_like_list_of_map", "pynamodb/tests/test_model.py::ModelTestCase::test_list_works_like_list", "pynamodb/tests/test_model.py::ModelTestCase::test_loads", "pynamodb/tests/test_model.py::ModelTestCase::test_local_index", "pynamodb/tests/test_model.py::ModelTestCase::test_model_attrs", "pynamodb/tests/test_model.py::ModelTestCase::test_model_subclass_attributes_inherited_on_create", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_invalid_data_does_not_validate", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_of_map", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_of_map_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_with_nulls_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_with_pythonic_attributes", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_nulls_validates", "pynamodb/tests/test_model.py::ModelTestCase::test_model_works_like_model", "pynamodb/tests/test_model.py::ModelTestCase::test_multiple_indices_share_non_key_attribute", "pynamodb/tests/test_model.py::ModelTestCase::test_new_style_boolean_serializes_as_bool", "pynamodb/tests/test_model.py::ModelTestCase::test_old_style_boolean_serializes_as_bool", "pynamodb/tests/test_model.py::ModelTestCase::test_old_style_model_exception", "pynamodb/tests/test_model.py::ModelTestCase::test_overidden_defaults", "pynamodb/tests/test_model.py::ModelTestCase::test_overidden_session", "pynamodb/tests/test_model.py::ModelTestCase::test_overridden_attr_name", "pynamodb/tests/test_model.py::ModelTestCase::test_projections", "pynamodb/tests/test_model.py::ModelTestCase::test_query", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_and_page_size", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_multiple_page", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_single_page", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_identical_to_available_items_single_page", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_less_than_available_and_page_size", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_less_than_available_items_multiple_page", "pynamodb/tests/test_model.py::ModelTestCase::test_rate_limited_scan", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_deserialize", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_from_raw_data_works", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_serialize_pass", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_deserializes", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_from_raw_data_works", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_serialize_fun_one", "pynamodb/tests/test_model.py::ModelTestCase::test_refresh", "pynamodb/tests/test_model.py::ModelTestCase::test_result_set_init", "pynamodb/tests/test_model.py::ModelTestCase::test_result_set_iter", "pynamodb/tests/test_model.py::ModelTestCase::test_save", "pynamodb/tests/test_model.py::ModelTestCase::test_scan", "pynamodb/tests/test_model.py::ModelTestCase::test_scan_limit", "pynamodb/tests/test_model.py::ModelTestCase::test_scan_limit_with_page_size", "pynamodb/tests/test_model.py::ModelTestCase::test_throttle", "pynamodb/tests/test_model.py::ModelInitTestCase::test_raw_map_attribute_with_dict_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_raw_map_attribute_with_initialized_instance_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_dict_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_initialized_instance_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_map_attribute_member_with_initialized_instance_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_map_attributes_member_with_dict_init" ]
[]
MIT License
1,759
[ "pynamodb/models.py" ]
[ "pynamodb/models.py" ]
dask__dask-2781
9e9fa10ef11bfe86a52214d8a2cda19508a4ee37
2017-10-13 22:41:50
c560965c8fc0da7cbc0920d43b7011d2721307d3
diff --git a/dask/array/core.py b/dask/array/core.py index 4a28b01fe..50c066574 100644 --- a/dask/array/core.py +++ b/dask/array/core.py @@ -2497,8 +2497,8 @@ def broadcast_shapes(*shapes): return shapes[0] out = [] for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1): - dim = max(sizes) - if any(i != -1 and i != 1 and i != dim and not np.isnan(i) for i in sizes): + dim = 0 if 0 in sizes else max(sizes) + if any(i not in [-1, 0, 1, dim] and not np.isnan(i) for i in sizes): raise ValueError("operands could not be broadcast together with " "shapes {0}".format(' '.join(map(str, shapes)))) out.append(dim) diff --git a/dask/array/random.py b/dask/array/random.py index ad5ef1954..7ce9d2549 100644 --- a/dask/array/random.py +++ b/dask/array/random.py @@ -1,11 +1,12 @@ from __future__ import absolute_import, division, print_function from itertools import product +from numbers import Integral from operator import getitem import numpy as np -from .core import (normalize_chunks, Array, slices_from_chunks, +from .core import (normalize_chunks, Array, slices_from_chunks, asarray, broadcast_shapes, broadcast_to) from .. import sharedict from ..base import tokenize @@ -170,8 +171,62 @@ class RandomState(object): with ignoring(AttributeError): @doc_wraps(np.random.RandomState.choice) def choice(self, a, size=None, replace=True, p=None, chunks=None): - return self._wrap(np.random.RandomState.choice, a, - size=size, replace=True, p=None, chunks=chunks) + dsks = [] + # Normalize and validate `a` + if isinstance(a, Integral): + # On windows the output dtype differs if p is provided or + # absent, see https://github.com/numpy/numpy/issues/9867 + dummy_p = np.array([1]) if p is not None else p + dtype = np.random.choice(1, size=(), p=dummy_p).dtype + len_a = a + if a < 0: + raise ValueError("a must be greater than 0") + else: + a = asarray(a).rechunk(a.shape) + dtype = a.dtype + if a.ndim != 1: + raise ValueError("a must be one dimensional") + len_a = len(a) + dsks.append(a.dask) + a = a._keys()[0] + + # Normalize and validate `p` + if p is not None: + if not isinstance(p, Array): + # If p is not a dask array, first check the sum is close + # to 1 before converting. + p = np.asarray(p) + if not np.isclose(p.sum(), 1, rtol=1e-7, atol=0): + raise ValueError("probabilities do not sum to 1") + p = asarray(p) + else: + p = p.rechunk(p.shape) + + if p.ndim != 1: + raise ValueError("p must be one dimensional") + if len(p) != len_a: + raise ValueError("a and p must have the same size") + + dsks.append(p.dask) + p = p._keys()[0] + + if size is None: + size = () + elif not isinstance(size, (tuple, list)): + size = (size,) + + chunks = normalize_chunks(chunks, size) + sizes = list(product(*chunks)) + state_data = random_state_data(len(sizes), self._numpy_state) + + name = 'da.random.choice-%s' % tokenize(state_data, size, chunks, + a, replace, p) + keys = product([name], *(range(len(bd)) for bd in chunks)) + dsk = {k: (_choice, state, a, size, replace, p) for + k, state, size in zip(keys, state_data, sizes)} + + return Array(sharedict.merge((name, dsk), *dsks), + name, chunks, dtype=dtype) # @doc_wraps(np.random.RandomState.dirichlet) # def dirichlet(self, alpha, size=None, chunks=None): @@ -352,6 +407,11 @@ class RandomState(object): size=size, chunks=chunks) +def _choice(state_data, a, size, replace, p): + state = np.random.RandomState(state_data) + return state.choice(a, size=size, replace=replace, p=p) + + def _apply_random(func, state_data, size, args, kwargs): """Apply RandomState method with seed""" state = np.random.RandomState(state_data) @@ -368,6 +428,8 @@ seed = _state.seed beta = _state.beta binomial = _state.binomial chisquare = _state.chisquare +if hasattr(_state, 'choice'): + choice = _state.choice exponential = _state.exponential f = _state.f gamma = _state.gamma diff --git a/dask/array/routines.py b/dask/array/routines.py index ed1ed2482..edb76bfcb 100644 --- a/dask/array/routines.py +++ b/dask/array/routines.py @@ -14,7 +14,7 @@ from toolz import concat, sliding_window, interleave from .. import sharedict from ..core import flatten -from ..base import tokenize, compute_as_if_collection +from ..base import tokenize from . import numpy_compat, chunk from .core import (Array, map_blocks, elemwise, from_array, asarray, @@ -530,11 +530,18 @@ def round(a, decimals=0): @wraps(np.unique) def unique(x): - name = 'unique-' + x.name - dsk = {(name, i): (np.unique, key) for i, key in enumerate(x.__dask_keys__())} - parts = compute_as_if_collection(Array, sharedict.merge((name, dsk), x.dask), - list(dsk.keys())) - return np.unique(np.concatenate(parts)) + x = x.ravel() + + out = atop(np.unique, "i", x, "i", dtype=x.dtype) + out._chunks = tuple((np.nan,) * len(c) for c in out.chunks) + + name = 'unique-aggregate-' + out.name + dsk = {(name, 0): (np.unique, (np.concatenate, out._keys()))} + out = Array( + sharedict.merge((name, dsk), out.dask), name, ((np.nan,),), out.dtype + ) + + return out @wraps(np.roll) diff --git a/dask/dataframe/io/io.py b/dask/dataframe/io/io.py index e2d0a9633..7c5942eba 100644 --- a/dask/dataframe/io/io.py +++ b/dask/dataframe/io/io.py @@ -240,7 +240,7 @@ def from_bcolz(x, chunksize=None, categorize=True, index=None, lock=lock, np.issubdtype(x.dtype[name], np.unicode_) or np.issubdtype(x.dtype[name], np.object_)): a = da.from_array(x[name], chunks=(chunksize * len(x.names),)) - categories[name] = da.unique(a) + categories[name] = da.unique(a).compute() columns = tuple(x.dtype.names) divisions = tuple(range(0, len(x), chunksize)) diff --git a/dask/diagnostics/profile_visualize.py b/dask/diagnostics/profile_visualize.py index dee158e7b..b66691432 100644 --- a/dask/diagnostics/profile_visualize.py +++ b/dask/diagnostics/profile_visualize.py @@ -237,6 +237,7 @@ def plot_tasks(results, dsk, palette='Viridis', label_size=60, **kwargs): defaults = dict(title="Profile Results", tools="hover,save,reset,xwheel_zoom,xpan", + toolbar_location='above', plot_width=800, plot_height=300) defaults.update((k, v) for (k, v) in kwargs.items() if k in _get_figure_keywords()) @@ -317,6 +318,7 @@ def plot_resources(results, palette='Viridis', **kwargs): defaults = dict(title="Profile Results", tools="save,reset,xwheel_zoom,xpan", + toolbar_location='above', plot_width=800, plot_height=300) defaults.update((k, v) for (k, v) in kwargs.items() if k in _get_figure_keywords()) @@ -324,15 +326,18 @@ def plot_resources(results, palette='Viridis', **kwargs): t, mem, cpu = zip(*results) left, right = min(t), max(t) t = [i - left for i in t] - p = bp.figure(y_range=(0, max(cpu)), x_range=(0, right - left), **defaults) + p = bp.figure(y_range=fix_bounds(0, max(cpu), 100), + x_range=fix_bounds(0, right - left, 1), + **defaults) else: t = mem = cpu = [] - p = bp.figure(y_range=(0, 100), x_range=(0, 10), **defaults) + p = bp.figure(y_range=(0, 100), x_range=(0, 1), **defaults) colors = palettes.all_palettes[palette][6] p.line(t, cpu, color=colors[0], line_width=4, legend='% CPU') p.yaxis.axis_label = "% CPU" - p.extra_y_ranges = {'memory': Range1d(start=(min(mem) if mem else 0), - end=(max(mem) if mem else 100))} + p.extra_y_ranges = {'memory': Range1d(*fix_bounds(min(mem) if mem else 0, + max(mem) if mem else 100, + 100))} p.line(t, mem, color=colors[2], y_range_name='memory', line_width=4, legend='Memory') p.add_layout(LinearAxis(y_range_name='memory', axis_label='Memory (MB)'), @@ -341,6 +346,11 @@ def plot_resources(results, palette='Viridis', **kwargs): return p +def fix_bounds(start, end, min_span): + """Adjust end point to ensure span of at least `min_span`""" + return start, max(end, start + min_span) + + def plot_cache(results, dsk, start_time, metric_name, palette='Viridis', label_size=60, **kwargs): """Visualize the results of profiling in a bokeh plot. @@ -374,6 +384,7 @@ def plot_cache(results, dsk, start_time, metric_name, palette='Viridis', defaults = dict(title="Profile Results", tools="hover,save,reset,wheel_zoom,xpan", + toolbar_location='above', plot_width=800, plot_height=300) defaults.update((k, v) for (k, v) in kwargs.items() if k in _get_figure_keywords()) diff --git a/docs/source/array-api.rst b/docs/source/array-api.rst index d3c760b01..b6e18ff5e 100644 --- a/docs/source/array-api.rst +++ b/docs/source/array-api.rst @@ -228,6 +228,7 @@ Random random.beta random.binomial random.chisquare + random.choice random.exponential random.f random.gamma diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst index 62253d869..f10ec3098 100644 --- a/docs/source/changelog.rst +++ b/docs/source/changelog.rst @@ -11,6 +11,8 @@ Array - Add ``allclose`` (:pr:`2771`) - Remove ``random.different_seeds`` from Dask Array API docs (:pr:`2772`) - Deprecate ``vnorm`` in favor of ``dask.array.linalg.norm`` (:pr:`2773`) +- Reimplement ``unique`` to be lazy (:pr:`2775`) +- Support broadcasting of Dask Arrays with 0-length dimensions (:pr:`2784`) DataFrame +++++++++ @@ -37,6 +39,7 @@ Core Array +++++ +- ``da.random.choice`` now works with array arguments (:pr:`2781`) - Support indexing in arrays with np.int (fixes regression) (:pr:`2719`) - Handle zero dimension with rechunking (:pr:`2747`) - Support -1 as an alias for "size of the dimension" in ``chunks`` (:pr:`2749`)
Wrapper always pass None to random.choice From dask/array/random.py: > with ignoring(AttributeError): > @doc_wraps(np.random.RandomState.choice) > def choice(self, a, size=None, replace=True, p=None, chunks=None): > return self._wrap(np.random.RandomState.choice, a, > size=size, replace=True, p=None, chunks=chunks) Wrapper gets non-None p argument -> Wrapper passes None to p kwarg of np.random.RandomState.choice anyway. Test: > import dask > import dask.array as da > import numpy as np > import dask.array.random as dar > import matplotlib.pyplot as plt > > p = np.arange(12) > p = p*1.0 / np.sum(p) > > p = da.from_array(p, chunks=6) > s = dar.RandomState() > > c = s.choice(len(p), p=p,size=10000,chunks=8) > cc = c.compute() > b = np.bincount(cc) > print(b) > > c = np.random.choice(len(p), p=np.array(p), size=10000) > b = np.bincount(c) > print(b) > Result from numpy and dask are different. > [806 815 833 873 877 809 850 818 837 792 892 798] > [ 0 117 332 467 637 731 901 1079 1251 1316 1483 1686]
dask/dask
diff --git a/dask/array/tests/test_array_core.py b/dask/array/tests/test_array_core.py index 141a65cba..29bbce220 100644 --- a/dask/array/tests/test_array_core.py +++ b/dask/array/tests/test_array_core.py @@ -407,6 +407,7 @@ def test_binops(): def test_broadcast_shapes(): + assert (0, 5) == broadcast_shapes((0, 1), (1, 5)) assert (3, 4, 5) == broadcast_shapes((3, 4, 5), (4, 1), ()) assert (3, 4) == broadcast_shapes((3, 1), (1, 4), (4,)) assert (5, 6, 7, 3, 4) == broadcast_shapes((3, 1), (), (5, 6, 7, 1, 4)) @@ -574,7 +575,7 @@ def test_broadcast_to(): x = np.random.randint(10, size=(5, 1, 6)) a = from_array(x, chunks=(3, 1, 3)) - for shape in [a.shape, (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]: + for shape in [a.shape, (5, 0, 6), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]: xb = chunk.broadcast_to(x, shape) ab = broadcast_to(a, shape) @@ -590,7 +591,7 @@ def test_broadcast_to(): def test_broadcast_to_array(): x = np.random.randint(10, size=(5, 1, 6)) - for shape in [(5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]: + for shape in [(5, 0, 6), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]: a = np.broadcast_to(x, shape) d = broadcast_to(x, shape) @@ -600,13 +601,35 @@ def test_broadcast_to_array(): def test_broadcast_to_scalar(): x = 5 - for shape in [tuple(), (2, 3), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]: + for shape in [tuple(), (0,), (2, 3), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]: a = np.broadcast_to(x, shape) d = broadcast_to(x, shape) assert_eq(a, d) [email protected]('u_shape, v_shape', [ + [tuple(), (2, 3)], + [(1,), (2, 3)], + [(1, 1), (2, 3)], + [(0, 3), (1, 3)], + [(2, 0), (2, 1)], + [(1, 0), (2, 1)], + [(0, 1), (1, 3)], +]) +def test_broadcast_operator(u_shape, v_shape): + u = np.random.random(u_shape) + v = np.random.random(v_shape) + + d_u = from_array(u, chunks=1) + d_v = from_array(v, chunks=1) + + w = u * v + d_w = d_u * d_v + + assert_eq(w, d_w) + + @pytest.mark.parametrize('original_shape,new_shape,chunks', [ ((10,), (10,), (3, 3, 4)), ((10,), (10, 1, 1), 5), diff --git a/dask/array/tests/test_random.py b/dask/array/tests/test_random.py index 38e0a73c6..fa25de9ef 100644 --- a/dask/array/tests/test_random.py +++ b/dask/array/tests/test_random.py @@ -216,3 +216,52 @@ def test_multinomial(): y = np.random.multinomial(20, [1 / 6.] * 6, size=size) assert x.shape == y.shape == x.compute().shape + + +def test_choice(): + np_dtype = np.random.choice(1, size=()).dtype + size = (10, 3) + chunks = 4 + x = da.random.choice(3, size=size, chunks=chunks) + assert x.dtype == np_dtype + assert x.shape == size + res = x.compute() + assert res.dtype == np_dtype + assert res.shape == size + + np_a = np.array([1, 3, 5, 7, 9], dtype='f8') + da_a = da.from_array(np_a, chunks=2) + + for a in [np_a, da_a]: + x = da.random.choice(a, size=size, chunks=chunks) + res = x.compute() + assert x.dtype == np_a.dtype + assert res.dtype == np_a.dtype + assert set(np.unique(res)).issubset(np_a) + + np_p = np.array([0, 0.2, 0.2, 0.3, 0.3]) + da_p = da.from_array(np_p, chunks=2) + + for a, p in [(da_a, np_p), (np_a, da_p)]: + x = da.random.choice(a, size=size, chunks=chunks, p=p) + res = x.compute() + assert x.dtype == np_a.dtype + assert res.dtype == np_a.dtype + assert set(np.unique(res)).issubset(np_a[1:]) + + np_dtype = np.random.choice(1, size=(), p=np.array([1])).dtype + x = da.random.choice(5, size=size, chunks=chunks, p=np_p) + res = x.compute() + assert x.dtype == np_dtype + assert res.dtype == np_dtype + + errs = [(-1, None), # negative a + (np_a[:, None], None), # a must be 1D + (np_a, np_p[:, None]), # p must be 1D + (np_a, np_p[:-2]), # a and p must match + (3, np_p), # a and p must match + (4, [0.2, 0.2, 0.3])] # p must sum to 1 + + for (a, p) in errs: + with pytest.raises(ValueError): + da.random.choice(a, size=size, chunks=chunks, p=p) diff --git a/dask/array/tests/test_routines.py b/dask/array/tests/test_routines.py index 68ce4c332..f91d1cfe6 100644 --- a/dask/array/tests/test_routines.py +++ b/dask/array/tests/test_routines.py @@ -473,9 +473,15 @@ def test_round(): def test_unique(): - x = np.array([1, 2, 4, 4, 5, 2]) - d = da.from_array(x, chunks=(3,)) - assert_eq(da.unique(d), np.unique(x)) + a = np.array([1, 2, 4, 4, 5, 2]) + d = da.from_array(a, chunks=(3,)) + + r_a = np.unique(a) + r_d = da.unique(d) + + assert isinstance(r_d, da.Array) + + assert_eq(r_d, r_a) def _maybe_len(l): diff --git a/dask/diagnostics/tests/test_profiler.py b/dask/diagnostics/tests/test_profiler.py index b42a85fdc..5c062640b 100644 --- a/dask/diagnostics/tests/test_profiler.py +++ b/dask/diagnostics/tests/test_profiler.py @@ -245,13 +245,21 @@ def test_resource_profiler_plot(): assert len(p.tools) == 1 assert isinstance(p.tools[0], bokeh.models.HoverTool) assert check_title(p, "Not the default") - # Test empty, checking for errors - rprof.clear() - - with pytest.warns(None) as record: - rprof.visualize(show=False, save=False) - assert len(record) == 0 + # Test with empty and one point, checking for errors + rprof.clear() + for results in [[], [(1.0, 0, 0)]]: + rprof.results = results + with pytest.warns(None) as record: + p = rprof.visualize(show=False, save=False) + assert len(record) == 0 + # Check bounds are valid + assert p.x_range.start == 0 + assert p.x_range.end == 1 + assert p.y_range.start == 0 + assert p.y_range.end == 100 + assert p.extra_y_ranges['memory'].start == 0 + assert p.extra_y_ranges['memory'].end == 100 @pytest.mark.skipif("not bokeh")
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 7 }
0.15
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[complete]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-xdist", "flake8" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 click==8.0.4 cloudpickle==2.2.1 -e git+https://github.com/dask/dask.git@9e9fa10ef11bfe86a52214d8a2cda19508a4ee37#egg=dask distributed==1.19.3 execnet==1.9.0 flake8==5.0.4 HeapDict==1.0.1 importlib-metadata==4.2.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work locket==1.0.0 mccabe==0.7.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work msgpack-python==0.5.6 numpy==1.19.5 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pandas==1.1.5 partd==1.2.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work psutil==7.0.0 py @ file:///opt/conda/conda-bld/py_1644396412707/work pycodestyle==2.9.1 pyflakes==2.5.0 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 pytz==2025.2 six==1.17.0 sortedcontainers==2.4.0 tblib==1.7.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work toolz==0.12.0 tornado==6.1 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zict==2.1.0 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: dask channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - click==8.0.4 - cloudpickle==2.2.1 - distributed==1.19.3 - execnet==1.9.0 - flake8==5.0.4 - heapdict==1.0.1 - importlib-metadata==4.2.0 - locket==1.0.0 - mccabe==0.7.0 - msgpack-python==0.5.6 - numpy==1.19.5 - pandas==1.1.5 - partd==1.2.0 - psutil==7.0.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - pytz==2025.2 - six==1.17.0 - sortedcontainers==2.4.0 - tblib==1.7.0 - toolz==0.12.0 - tornado==6.1 - zict==2.1.0 prefix: /opt/conda/envs/dask
[ "dask/array/tests/test_array_core.py::test_broadcast_shapes", "dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape3-v_shape3]", "dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape4-v_shape4]", "dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape5-v_shape5]", "dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape6-v_shape6]", "dask/array/tests/test_random.py::test_choice", "dask/array/tests/test_routines.py::test_unique" ]
[ "dask/array/tests/test_array_core.py::test_concatenate_unknown_axes", "dask/array/tests/test_array_core.py::test_field_access", "dask/array/tests/test_array_core.py::test_field_access_with_shape", "dask/array/tests/test_array_core.py::test_matmul", "dask/array/tests/test_array_core.py::test_to_dask_dataframe" ]
[ "dask/array/tests/test_array_core.py::test_getem", "dask/array/tests/test_array_core.py::test_top", "dask/array/tests/test_array_core.py::test_top_supports_broadcasting_rules", "dask/array/tests/test_array_core.py::test_top_literals", "dask/array/tests/test_array_core.py::test_atop_literals", "dask/array/tests/test_array_core.py::test_concatenate3_on_scalars", "dask/array/tests/test_array_core.py::test_chunked_dot_product", "dask/array/tests/test_array_core.py::test_chunked_transpose_plus_one", "dask/array/tests/test_array_core.py::test_broadcast_dimensions_works_with_singleton_dimensions", "dask/array/tests/test_array_core.py::test_broadcast_dimensions", "dask/array/tests/test_array_core.py::test_Array", "dask/array/tests/test_array_core.py::test_uneven_chunks", "dask/array/tests/test_array_core.py::test_numblocks_suppoorts_singleton_block_dims", "dask/array/tests/test_array_core.py::test_keys", "dask/array/tests/test_array_core.py::test_Array_computation", "dask/array/tests/test_array_core.py::test_stack", "dask/array/tests/test_array_core.py::test_short_stack", "dask/array/tests/test_array_core.py::test_stack_scalars", "dask/array/tests/test_array_core.py::test_stack_promote_type", "dask/array/tests/test_array_core.py::test_stack_rechunk", "dask/array/tests/test_array_core.py::test_concatenate", "dask/array/tests/test_array_core.py::test_concatenate_rechunk", "dask/array/tests/test_array_core.py::test_concatenate_fixlen_strings", "dask/array/tests/test_array_core.py::test_binops", "dask/array/tests/test_array_core.py::test_elemwise_on_scalars", "dask/array/tests/test_array_core.py::test_elemwise_with_ndarrays", "dask/array/tests/test_array_core.py::test_elemwise_differently_chunked", "dask/array/tests/test_array_core.py::test_elemwise_dtype", "dask/array/tests/test_array_core.py::test_operators", "dask/array/tests/test_array_core.py::test_operator_dtype_promotion", "dask/array/tests/test_array_core.py::test_T", "dask/array/tests/test_array_core.py::test_norm", "dask/array/tests/test_array_core.py::test_broadcast_to", "dask/array/tests/test_array_core.py::test_broadcast_to_array", "dask/array/tests/test_array_core.py::test_broadcast_to_scalar", "dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape0-v_shape0]", "dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape1-v_shape1]", "dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape2-v_shape2]", "dask/array/tests/test_array_core.py::test_reshape[original_shape0-new_shape0-chunks0]", "dask/array/tests/test_array_core.py::test_reshape[original_shape1-new_shape1-5]", "dask/array/tests/test_array_core.py::test_reshape[original_shape2-new_shape2-5]", "dask/array/tests/test_array_core.py::test_reshape[original_shape3-new_shape3-12]", "dask/array/tests/test_array_core.py::test_reshape[original_shape4-new_shape4-12]", "dask/array/tests/test_array_core.py::test_reshape[original_shape5-new_shape5-chunks5]", "dask/array/tests/test_array_core.py::test_reshape[original_shape6-new_shape6-4]", "dask/array/tests/test_array_core.py::test_reshape[original_shape7-new_shape7-4]", "dask/array/tests/test_array_core.py::test_reshape[original_shape8-new_shape8-4]", "dask/array/tests/test_array_core.py::test_reshape[original_shape9-new_shape9-2]", "dask/array/tests/test_array_core.py::test_reshape[original_shape10-new_shape10-2]", "dask/array/tests/test_array_core.py::test_reshape[original_shape11-new_shape11-2]", "dask/array/tests/test_array_core.py::test_reshape[original_shape12-new_shape12-2]", "dask/array/tests/test_array_core.py::test_reshape[original_shape13-new_shape13-2]", "dask/array/tests/test_array_core.py::test_reshape[original_shape14-new_shape14-2]", "dask/array/tests/test_array_core.py::test_reshape[original_shape15-new_shape15-2]", "dask/array/tests/test_array_core.py::test_reshape[original_shape16-new_shape16-chunks16]", "dask/array/tests/test_array_core.py::test_reshape[original_shape17-new_shape17-3]", "dask/array/tests/test_array_core.py::test_reshape[original_shape18-new_shape18-4]", "dask/array/tests/test_array_core.py::test_reshape[original_shape19-new_shape19-chunks19]", "dask/array/tests/test_array_core.py::test_reshape[original_shape20-new_shape20-1]", "dask/array/tests/test_array_core.py::test_reshape[original_shape21-new_shape21-1]", "dask/array/tests/test_array_core.py::test_reshape[original_shape22-new_shape22-24]", "dask/array/tests/test_array_core.py::test_reshape[original_shape23-new_shape23-6]", "dask/array/tests/test_array_core.py::test_reshape[original_shape24-new_shape24-6]", "dask/array/tests/test_array_core.py::test_reshape[original_shape25-new_shape25-6]", "dask/array/tests/test_array_core.py::test_reshape[original_shape26-new_shape26-chunks26]", "dask/array/tests/test_array_core.py::test_reshape[original_shape27-new_shape27-chunks27]", "dask/array/tests/test_array_core.py::test_reshape[original_shape28-new_shape28-chunks28]", "dask/array/tests/test_array_core.py::test_reshape[original_shape29-new_shape29-chunks29]", "dask/array/tests/test_array_core.py::test_reshape[original_shape30-new_shape30-chunks30]", "dask/array/tests/test_array_core.py::test_reshape[original_shape31-new_shape31-chunks31]", "dask/array/tests/test_array_core.py::test_reshape[original_shape32-new_shape32-chunks32]", "dask/array/tests/test_array_core.py::test_reshape[original_shape33-new_shape33-chunks33]", "dask/array/tests/test_array_core.py::test_reshape[original_shape34-new_shape34-chunks34]", "dask/array/tests/test_array_core.py::test_reshape_exceptions", "dask/array/tests/test_array_core.py::test_reshape_splat", "dask/array/tests/test_array_core.py::test_reshape_fails_for_dask_only", "dask/array/tests/test_array_core.py::test_reshape_unknown_dimensions", "dask/array/tests/test_array_core.py::test_full", "dask/array/tests/test_array_core.py::test_map_blocks", "dask/array/tests/test_array_core.py::test_map_blocks2", "dask/array/tests/test_array_core.py::test_map_blocks_with_constants", "dask/array/tests/test_array_core.py::test_map_blocks_with_kwargs", "dask/array/tests/test_array_core.py::test_map_blocks_with_chunks", "dask/array/tests/test_array_core.py::test_map_blocks_dtype_inference", "dask/array/tests/test_array_core.py::test_from_function_requires_block_args", "dask/array/tests/test_array_core.py::test_repr", "dask/array/tests/test_array_core.py::test_slicing_with_ellipsis", "dask/array/tests/test_array_core.py::test_slicing_with_ndarray", "dask/array/tests/test_array_core.py::test_dtype", "dask/array/tests/test_array_core.py::test_blockdims_from_blockshape", "dask/array/tests/test_array_core.py::test_coerce", "dask/array/tests/test_array_core.py::test_store_delayed_target", "dask/array/tests/test_array_core.py::test_store", "dask/array/tests/test_array_core.py::test_store_regions", "dask/array/tests/test_array_core.py::test_store_compute_false", "dask/array/tests/test_array_core.py::test_store_locks", "dask/array/tests/test_array_core.py::test_np_array_with_zero_dimensions", "dask/array/tests/test_array_core.py::test_dtype_complex", "dask/array/tests/test_array_core.py::test_astype", "dask/array/tests/test_array_core.py::test_arithmetic", "dask/array/tests/test_array_core.py::test_elemwise_consistent_names", "dask/array/tests/test_array_core.py::test_optimize", "dask/array/tests/test_array_core.py::test_slicing_with_non_ndarrays", "dask/array/tests/test_array_core.py::test_getter", "dask/array/tests/test_array_core.py::test_size", "dask/array/tests/test_array_core.py::test_nbytes", "dask/array/tests/test_array_core.py::test_itemsize", "dask/array/tests/test_array_core.py::test_Array_normalizes_dtype", "dask/array/tests/test_array_core.py::test_from_array_with_lock", "dask/array/tests/test_array_core.py::test_from_array_no_asarray", "dask/array/tests/test_array_core.py::test_from_array_getitem", "dask/array/tests/test_array_core.py::test_from_array_minus_one", "dask/array/tests/test_array_core.py::test_asarray", "dask/array/tests/test_array_core.py::test_asanyarray", "dask/array/tests/test_array_core.py::test_from_func", "dask/array/tests/test_array_core.py::test_concatenate3_2", "dask/array/tests/test_array_core.py::test_map_blocks3", "dask/array/tests/test_array_core.py::test_from_array_with_missing_chunks", "dask/array/tests/test_array_core.py::test_normalize_chunks", "dask/array/tests/test_array_core.py::test_raise_on_no_chunks", "dask/array/tests/test_array_core.py::test_chunks_is_immutable", "dask/array/tests/test_array_core.py::test_raise_on_bad_kwargs", "dask/array/tests/test_array_core.py::test_long_slice", "dask/array/tests/test_array_core.py::test_ellipsis_slicing", "dask/array/tests/test_array_core.py::test_point_slicing", "dask/array/tests/test_array_core.py::test_point_slicing_with_full_slice", "dask/array/tests/test_array_core.py::test_slice_with_floats", "dask/array/tests/test_array_core.py::test_slice_with_integer_types", "dask/array/tests/test_array_core.py::test_index_with_integer_types", "dask/array/tests/test_array_core.py::test_vindex_basic", "dask/array/tests/test_array_core.py::test_vindex_nd", "dask/array/tests/test_array_core.py::test_vindex_errors", "dask/array/tests/test_array_core.py::test_vindex_merge", "dask/array/tests/test_array_core.py::test_empty_array", "dask/array/tests/test_array_core.py::test_memmap", "dask/array/tests/test_array_core.py::test_to_npy_stack", "dask/array/tests/test_array_core.py::test_view", "dask/array/tests/test_array_core.py::test_view_fortran", "dask/array/tests/test_array_core.py::test_map_blocks_with_changed_dimension", "dask/array/tests/test_array_core.py::test_broadcast_chunks", "dask/array/tests/test_array_core.py::test_chunks_error", "dask/array/tests/test_array_core.py::test_array_compute_forward_kwargs", "dask/array/tests/test_array_core.py::test_dont_fuse_outputs", "dask/array/tests/test_array_core.py::test_dont_dealias_outputs", "dask/array/tests/test_array_core.py::test_timedelta_op", "dask/array/tests/test_array_core.py::test_to_delayed", "dask/array/tests/test_array_core.py::test_to_delayed_optimizes", "dask/array/tests/test_array_core.py::test_cumulative", "dask/array/tests/test_array_core.py::test_atop_names", "dask/array/tests/test_array_core.py::test_atop_new_axes", "dask/array/tests/test_array_core.py::test_atop_kwargs", "dask/array/tests/test_array_core.py::test_atop_chunks", "dask/array/tests/test_array_core.py::test_from_delayed", "dask/array/tests/test_array_core.py::test_A_property", "dask/array/tests/test_array_core.py::test_copy_mutate", "dask/array/tests/test_array_core.py::test_npartitions", "dask/array/tests/test_array_core.py::test_astype_gh1151", "dask/array/tests/test_array_core.py::test_elemwise_name", "dask/array/tests/test_array_core.py::test_map_blocks_name", "dask/array/tests/test_array_core.py::test_from_array_names", "dask/array/tests/test_array_core.py::test_array_picklable", "dask/array/tests/test_array_core.py::test_from_array_raises_on_bad_chunks", "dask/array/tests/test_array_core.py::test_concatenate_axes", "dask/array/tests/test_array_core.py::test_atop_concatenate", "dask/array/tests/test_array_core.py::test_common_blockdim", "dask/array/tests/test_array_core.py::test_uneven_chunks_that_fit_neatly", "dask/array/tests/test_array_core.py::test_elemwise_uneven_chunks", "dask/array/tests/test_array_core.py::test_uneven_chunks_atop", "dask/array/tests/test_array_core.py::test_warn_bad_rechunking", "dask/array/tests/test_array_core.py::test_optimize_fuse_keys", "dask/array/tests/test_array_core.py::test_concatenate_stack_dont_warn", "dask/array/tests/test_array_core.py::test_map_blocks_delayed", "dask/array/tests/test_array_core.py::test_no_chunks", "dask/array/tests/test_array_core.py::test_no_chunks_2d", "dask/array/tests/test_array_core.py::test_no_chunks_yes_chunks", "dask/array/tests/test_array_core.py::test_raise_informative_errors_no_chunks", "dask/array/tests/test_array_core.py::test_no_chunks_slicing_2d", "dask/array/tests/test_array_core.py::test_index_array_with_array_1d", "dask/array/tests/test_array_core.py::test_index_array_with_array_2d", "dask/array/tests/test_array_core.py::test_setitem_1d", "dask/array/tests/test_array_core.py::test_setitem_2d", "dask/array/tests/test_array_core.py::test_setitem_errs", "dask/array/tests/test_array_core.py::test_zero_slice_dtypes", "dask/array/tests/test_array_core.py::test_zero_sized_array_rechunk", "dask/array/tests/test_array_core.py::test_atop_zero_shape", "dask/array/tests/test_array_core.py::test_atop_zero_shape_new_axes", "dask/array/tests/test_array_core.py::test_broadcast_against_zero_shape", "dask/array/tests/test_array_core.py::test_from_array_name", "dask/array/tests/test_array_core.py::test_concatenate_errs", "dask/array/tests/test_array_core.py::test_stack_errs", "dask/array/tests/test_array_core.py::test_atop_with_numpy_arrays", "dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-100]", "dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-6]", "dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-100]", "dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-6]", "dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-100]", "dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-6]", "dask/array/tests/test_array_core.py::test_constructor_plugin", "dask/array/tests/test_array_core.py::test_no_warnings_on_metadata", "dask/array/tests/test_array_core.py::test_delayed_array_key_hygeine", "dask/array/tests/test_random.py::test_RandomState", "dask/array/tests/test_random.py::test_concurrency", "dask/array/tests/test_random.py::test_doc_randomstate", "dask/array/tests/test_random.py::test_serializability", "dask/array/tests/test_random.py::test_determinisim_through_dask_values", "dask/array/tests/test_random.py::test_randomstate_consistent_names", "dask/array/tests/test_random.py::test_random", "dask/array/tests/test_random.py::test_parametrized_random_function", "dask/array/tests/test_random.py::test_kwargs", "dask/array/tests/test_random.py::test_unique_names", "dask/array/tests/test_random.py::test_docs", "dask/array/tests/test_random.py::test_can_make_really_big_random_array", "dask/array/tests/test_random.py::test_random_seed", "dask/array/tests/test_random.py::test_consistent_across_sizes", "dask/array/tests/test_random.py::test_random_all", "dask/array/tests/test_random.py::test_array_broadcasting", "dask/array/tests/test_random.py::test_multinomial", "dask/array/tests/test_routines.py::test_array", "dask/array/tests/test_routines.py::test_atleast_nd_no_args[atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_no_args[atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_no_args[atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape0-chunks0-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape0-chunks0-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape0-chunks0-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape1-chunks1-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape1-chunks1-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape1-chunks1-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape2-chunks2-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape2-chunks2-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape2-chunks2-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape3-chunks3-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape3-chunks3-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape3-chunks3-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape4-chunks4-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape4-chunks4-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_one_arg[shape4-chunks4-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape10-shape20-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape10-shape20-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape10-shape20-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape11-shape21-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape11-shape21-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape11-shape21-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape12-shape22-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape12-shape22-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape12-shape22-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape13-shape23-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape13-shape23-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape13-shape23-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape14-shape24-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape14-shape24-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape14-shape24-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape15-shape25-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape15-shape25-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape15-shape25-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape16-shape26-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape16-shape26-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape16-shape26-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape17-shape27-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape17-shape27-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape17-shape27-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape18-shape28-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape18-shape28-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape18-shape28-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape19-shape29-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape19-shape29-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape19-shape29-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape110-shape210-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape110-shape210-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape110-shape210-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape111-shape211-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape111-shape211-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape111-shape211-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape112-shape212-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape112-shape212-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape112-shape212-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape113-shape213-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape113-shape213-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape113-shape213-atleast_3d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape114-shape214-atleast_1d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape114-shape214-atleast_2d]", "dask/array/tests/test_routines.py::test_atleast_nd_two_args[shape114-shape214-atleast_3d]", "dask/array/tests/test_routines.py::test_transpose", "dask/array/tests/test_routines.py::test_transpose_negative_axes", "dask/array/tests/test_routines.py::test_swapaxes", "dask/array/tests/test_routines.py::test_tensordot", "dask/array/tests/test_routines.py::test_tensordot_2[0]", "dask/array/tests/test_routines.py::test_tensordot_2[1]", "dask/array/tests/test_routines.py::test_tensordot_2[axes2]", "dask/array/tests/test_routines.py::test_tensordot_2[axes3]", "dask/array/tests/test_routines.py::test_tensordot_2[axes4]", "dask/array/tests/test_routines.py::test_tensordot_2[axes5]", "dask/array/tests/test_routines.py::test_tensordot_2[axes6]", "dask/array/tests/test_routines.py::test_dot_method", "dask/array/tests/test_routines.py::test_apply_along_axis[shape0-0-ndim-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape0-0-sum-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape0-0-range-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape0-0-range2-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape1-1-ndim-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape1-1-sum-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape1-1-range-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape1-1-range2-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape2-2-ndim-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape2-2-sum-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape2-2-range-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape2-2-range2-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape3--1-ndim-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape3--1-sum-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape3--1-range-<lambda>]", "dask/array/tests/test_routines.py::test_apply_along_axis[shape3--1-range2-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape0-axes0-sum0-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape0-axes0-sum1-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape0-axes0-range-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape1-0-sum0-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape1-0-sum1-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape1-0-range-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape2-axes2-sum0-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape2-axes2-sum1-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape2-axes2-range-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape3-axes3-sum0-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape3-axes3-sum1-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape3-axes3-range-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape4-axes4-sum0-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape4-axes4-sum1-<lambda>]", "dask/array/tests/test_routines.py::test_apply_over_axes[shape4-axes4-range-<lambda>]", "dask/array/tests/test_routines.py::test_ptp[shape0-None]", "dask/array/tests/test_routines.py::test_ptp[shape1-0]", "dask/array/tests/test_routines.py::test_ptp[shape2-1]", "dask/array/tests/test_routines.py::test_ptp[shape3-2]", "dask/array/tests/test_routines.py::test_ptp[shape4--1]", "dask/array/tests/test_routines.py::test_diff[0-shape0-0]", "dask/array/tests/test_routines.py::test_diff[0-shape1-1]", "dask/array/tests/test_routines.py::test_diff[0-shape2-2]", "dask/array/tests/test_routines.py::test_diff[0-shape3--1]", "dask/array/tests/test_routines.py::test_diff[1-shape0-0]", "dask/array/tests/test_routines.py::test_diff[1-shape1-1]", "dask/array/tests/test_routines.py::test_diff[1-shape2-2]", "dask/array/tests/test_routines.py::test_diff[1-shape3--1]", "dask/array/tests/test_routines.py::test_diff[2-shape0-0]", "dask/array/tests/test_routines.py::test_diff[2-shape1-1]", "dask/array/tests/test_routines.py::test_diff[2-shape2-2]", "dask/array/tests/test_routines.py::test_diff[2-shape3--1]", "dask/array/tests/test_routines.py::test_ediff1d[None-None-shape0]", "dask/array/tests/test_routines.py::test_ediff1d[None-None-shape1]", "dask/array/tests/test_routines.py::test_ediff1d[0-0-shape0]", "dask/array/tests/test_routines.py::test_ediff1d[0-0-shape1]", "dask/array/tests/test_routines.py::test_ediff1d[to_end2-to_begin2-shape0]", "dask/array/tests/test_routines.py::test_ediff1d[to_end2-to_begin2-shape1]", "dask/array/tests/test_routines.py::test_topk", "dask/array/tests/test_routines.py::test_topk_k_bigger_than_chunk", "dask/array/tests/test_routines.py::test_bincount", "dask/array/tests/test_routines.py::test_bincount_with_weights", "dask/array/tests/test_routines.py::test_bincount_raises_informative_error_on_missing_minlength_kwarg", "dask/array/tests/test_routines.py::test_digitize", "dask/array/tests/test_routines.py::test_histogram", "dask/array/tests/test_routines.py::test_histogram_alternative_bins_range", "dask/array/tests/test_routines.py::test_histogram_return_type", "dask/array/tests/test_routines.py::test_histogram_extra_args_and_shapes", "dask/array/tests/test_routines.py::test_cov", "dask/array/tests/test_routines.py::test_corrcoef", "dask/array/tests/test_routines.py::test_round", "dask/array/tests/test_routines.py::test_roll[None-3-chunks0]", "dask/array/tests/test_routines.py::test_roll[None-3-chunks1]", "dask/array/tests/test_routines.py::test_roll[None-7-chunks0]", "dask/array/tests/test_routines.py::test_roll[None-7-chunks1]", "dask/array/tests/test_routines.py::test_roll[None-9-chunks0]", "dask/array/tests/test_routines.py::test_roll[None-9-chunks1]", "dask/array/tests/test_routines.py::test_roll[None-shift3-chunks0]", "dask/array/tests/test_routines.py::test_roll[None-shift3-chunks1]", "dask/array/tests/test_routines.py::test_roll[None-shift4-chunks0]", "dask/array/tests/test_routines.py::test_roll[None-shift4-chunks1]", "dask/array/tests/test_routines.py::test_roll[0-3-chunks0]", "dask/array/tests/test_routines.py::test_roll[0-3-chunks1]", "dask/array/tests/test_routines.py::test_roll[0-7-chunks0]", "dask/array/tests/test_routines.py::test_roll[0-7-chunks1]", "dask/array/tests/test_routines.py::test_roll[0-9-chunks0]", "dask/array/tests/test_routines.py::test_roll[0-9-chunks1]", "dask/array/tests/test_routines.py::test_roll[0-shift3-chunks0]", "dask/array/tests/test_routines.py::test_roll[0-shift3-chunks1]", "dask/array/tests/test_routines.py::test_roll[0-shift4-chunks0]", "dask/array/tests/test_routines.py::test_roll[0-shift4-chunks1]", "dask/array/tests/test_routines.py::test_roll[1-3-chunks0]", "dask/array/tests/test_routines.py::test_roll[1-3-chunks1]", "dask/array/tests/test_routines.py::test_roll[1-7-chunks0]", "dask/array/tests/test_routines.py::test_roll[1-7-chunks1]", "dask/array/tests/test_routines.py::test_roll[1-9-chunks0]", "dask/array/tests/test_routines.py::test_roll[1-9-chunks1]", "dask/array/tests/test_routines.py::test_roll[1-shift3-chunks0]", "dask/array/tests/test_routines.py::test_roll[1-shift3-chunks1]", "dask/array/tests/test_routines.py::test_roll[1-shift4-chunks0]", "dask/array/tests/test_routines.py::test_roll[1-shift4-chunks1]", "dask/array/tests/test_routines.py::test_roll[-1-3-chunks0]", "dask/array/tests/test_routines.py::test_roll[-1-3-chunks1]", "dask/array/tests/test_routines.py::test_roll[-1-7-chunks0]", "dask/array/tests/test_routines.py::test_roll[-1-7-chunks1]", "dask/array/tests/test_routines.py::test_roll[-1-9-chunks0]", "dask/array/tests/test_routines.py::test_roll[-1-9-chunks1]", "dask/array/tests/test_routines.py::test_roll[-1-shift3-chunks0]", "dask/array/tests/test_routines.py::test_roll[-1-shift3-chunks1]", "dask/array/tests/test_routines.py::test_roll[-1-shift4-chunks0]", "dask/array/tests/test_routines.py::test_roll[-1-shift4-chunks1]", "dask/array/tests/test_routines.py::test_roll[axis4-3-chunks0]", "dask/array/tests/test_routines.py::test_roll[axis4-3-chunks1]", "dask/array/tests/test_routines.py::test_roll[axis4-7-chunks0]", "dask/array/tests/test_routines.py::test_roll[axis4-7-chunks1]", "dask/array/tests/test_routines.py::test_roll[axis4-9-chunks0]", "dask/array/tests/test_routines.py::test_roll[axis4-9-chunks1]", "dask/array/tests/test_routines.py::test_roll[axis4-shift3-chunks0]", "dask/array/tests/test_routines.py::test_roll[axis4-shift3-chunks1]", "dask/array/tests/test_routines.py::test_roll[axis4-shift4-chunks0]", "dask/array/tests/test_routines.py::test_roll[axis4-shift4-chunks1]", "dask/array/tests/test_routines.py::test_roll[axis5-3-chunks0]", "dask/array/tests/test_routines.py::test_roll[axis5-3-chunks1]", "dask/array/tests/test_routines.py::test_roll[axis5-7-chunks0]", "dask/array/tests/test_routines.py::test_roll[axis5-7-chunks1]", "dask/array/tests/test_routines.py::test_roll[axis5-9-chunks0]", "dask/array/tests/test_routines.py::test_roll[axis5-9-chunks1]", "dask/array/tests/test_routines.py::test_roll[axis5-shift3-chunks0]", "dask/array/tests/test_routines.py::test_roll[axis5-shift3-chunks1]", "dask/array/tests/test_routines.py::test_roll[axis5-shift4-chunks0]", "dask/array/tests/test_routines.py::test_roll[axis5-shift4-chunks1]", "dask/array/tests/test_routines.py::test_ravel", "dask/array/tests/test_routines.py::test_squeeze", "dask/array/tests/test_routines.py::test_vstack", "dask/array/tests/test_routines.py::test_hstack", "dask/array/tests/test_routines.py::test_dstack", "dask/array/tests/test_routines.py::test_take", "dask/array/tests/test_routines.py::test_take_dask_from_numpy", "dask/array/tests/test_routines.py::test_compress", "dask/array/tests/test_routines.py::test_extract", "dask/array/tests/test_routines.py::test_isnull", "dask/array/tests/test_routines.py::test_isclose", "dask/array/tests/test_routines.py::test_allclose", "dask/array/tests/test_routines.py::test_choose", "dask/array/tests/test_routines.py::test_argwhere", "dask/array/tests/test_routines.py::test_argwhere_obj", "dask/array/tests/test_routines.py::test_argwhere_str", "dask/array/tests/test_routines.py::test_where", "dask/array/tests/test_routines.py::test_where_scalar_dtype", "dask/array/tests/test_routines.py::test_where_bool_optimization", "dask/array/tests/test_routines.py::test_where_nonzero", "dask/array/tests/test_routines.py::test_where_incorrect_args", "dask/array/tests/test_routines.py::test_count_nonzero", "dask/array/tests/test_routines.py::test_count_nonzero_axis[None]", "dask/array/tests/test_routines.py::test_count_nonzero_axis[0]", "dask/array/tests/test_routines.py::test_count_nonzero_axis[axis2]", "dask/array/tests/test_routines.py::test_count_nonzero_axis[axis3]", "dask/array/tests/test_routines.py::test_count_nonzero_obj", "dask/array/tests/test_routines.py::test_count_nonzero_obj_axis[None]", "dask/array/tests/test_routines.py::test_count_nonzero_obj_axis[0]", "dask/array/tests/test_routines.py::test_count_nonzero_obj_axis[axis2]", "dask/array/tests/test_routines.py::test_count_nonzero_obj_axis[axis3]", "dask/array/tests/test_routines.py::test_count_nonzero_str", "dask/array/tests/test_routines.py::test_flatnonzero", "dask/array/tests/test_routines.py::test_nonzero", "dask/array/tests/test_routines.py::test_nonzero_method", "dask/array/tests/test_routines.py::test_coarsen", "dask/array/tests/test_routines.py::test_coarsen_with_excess", "dask/array/tests/test_routines.py::test_insert", "dask/array/tests/test_routines.py::test_multi_insert", "dask/array/tests/test_routines.py::test_result_type", "dask/diagnostics/tests/test_profiler.py::test_profiler", "dask/diagnostics/tests/test_profiler.py::test_profiler_works_under_error", "dask/diagnostics/tests/test_profiler.py::test_two_gets", "dask/diagnostics/tests/test_profiler.py::test_resource_profiler", "dask/diagnostics/tests/test_profiler.py::test_resource_profiler_multiple_gets", "dask/diagnostics/tests/test_profiler.py::test_cache_profiler", "dask/diagnostics/tests/test_profiler.py::test_register[Profiler]", "dask/diagnostics/tests/test_profiler.py::test_register[<lambda>]", "dask/diagnostics/tests/test_profiler.py::test_register[CacheProfiler]" ]
[]
BSD 3-Clause "New" or "Revised" License
1,760
[ "docs/source/array-api.rst", "dask/array/random.py", "dask/array/routines.py", "dask/dataframe/io/io.py", "dask/array/core.py", "dask/diagnostics/profile_visualize.py", "docs/source/changelog.rst" ]
[ "docs/source/array-api.rst", "dask/array/random.py", "dask/array/routines.py", "dask/dataframe/io/io.py", "dask/array/core.py", "dask/diagnostics/profile_visualize.py", "docs/source/changelog.rst" ]
knknkn1162__anyloadump-8
9f69fe9b1c27ec66d52e1eaa1028f89733aed842
2017-10-14 07:18:29
9f69fe9b1c27ec66d52e1eaa1028f89733aed842
diff --git a/anyloadump/loadump.py b/anyloadump/loadump.py index 18050d2..a5944bf 100644 --- a/anyloadump/loadump.py +++ b/anyloadump/loadump.py @@ -1,7 +1,7 @@ from enum import Enum import subprocess, os, re, codecs -import logging +import logging, importlib logger = logging.getLogger(__name__) @@ -49,7 +49,8 @@ may raise ExtensionInferenceError def _invoke(dump_mode: DumpMode, file=None, fmt=None): ext = _extract_extension(file) if file else fmt if ext is None: raise ExtensionNotInferredError - target = _extract_extension(file).__import__(ext) + target = importlib.import_module(ext) + logger.debug("module : {}".format(target)) # "[load|dump]s?" method_mappings = dict(zip(list("rawx"), ["load"] + ["dump"] * 3)) return getattr(target, method_mappings[dump_mode.value] + 's' * (not file))
use importlib.import_module instead of __import__ https://docs.python.org/3.6/library/functions.html#__import__ says > This is an advanced function that is not needed in everyday Python programming, unlike importlib.import_module().
knknkn1162/anyloadump
diff --git a/tests/test_loadump.py b/tests/test_loadump.py index d2f37fe..14b5921 100644 --- a/tests/test_loadump.py +++ b/tests/test_loadump.py @@ -1,4 +1,5 @@ from anyloadump import loadump +from anyloadump.loadump import DumpMode import unittest import os import logging @@ -34,3 +35,81 @@ class LoadumpTests(unittest.TestCase): res = loadump._extract_extension(self._get_path("data/sample.pickle")) self.assertEqual(res, "pickle") self.assertEqual(loadump._extract_extension(self._get_path("data/dummy")), "") + + def test_invoke(self): + + import logging + logging.basicConfig(level=logging.ERROR) + logging.getLogger("anyloadump.loadump").setLevel(logging.DEBUG) + import pickle + + lst = [1,2,3] + + # check if func is json.dump function + func = loadump._invoke( + dump_mode=DumpMode.WRITE, + file = self._get_path("data/sample.json") + ) + self.assertTrue(hasattr(func, '__call__')) + self.assertEqual(func.__module__, "json") + self.assertEqual(func.__name__, "dump") + + with open(self._get_path("data/out1.json"), "w") as fo: + res = func(lst, fo) + self.assertIsNone(res) + + # check if func is json.load function + func = loadump._invoke( + dump_mode=DumpMode.READ, + file = self._get_path("data/sample.json") + ) + self.assertTrue(hasattr(func, '__call__')) + self.assertEqual(func.__module__, "json") + self.assertEqual(func.__name__, "load") + + with open(self._get_path("data/out1.json"), "r") as fi: + obj = func(fi) + self.assertEqual(lst, obj) + + # check if func is pickle.load function + pickle_file = "data/sample.pickle" + func = loadump._invoke( + dump_mode=DumpMode.READ, + file = self._get_path(pickle_file) + ) + self.assertTrue(hasattr(func, '__call__')) + self.assertEqual(func.__module__, "_pickle") + self.assertEqual(func.__name__, "load") + + with open(self._get_path(pickle_file), "rb") as fi: + obj = func(fi) + with open(self._get_path(pickle_file), "rb") as fi: + obj_cmp = pickle.load(fi) + self.assertEqual(obj_cmp, obj) + + # check if func is pickle.dumps function + func = loadump._invoke( + dump_mode=DumpMode.WRITE, + fmt = "pickle" + ) + self.assertTrue(hasattr(func, '__call__')) + self.assertEqual(func.__module__, "_pickle") + self.assertEqual(func.__name__, "dumps") + + self.assertEqual(func(lst), pickle.dumps(lst)) + + # check if res is pickle.loads function + func = loadump._invoke( + dump_mode=DumpMode.READ, + fmt = "pickle" + ) + self.assertTrue(hasattr(func, '__call__')) + self.assertEqual(func.__module__, "_pickle") + self.assertEqual(func.__name__, "loads") + + + # check whether ExtensionNotInferredError raises if both file and fmt is None. + with self.assertRaises(loadump.ExtensionNotInferredError): + loadump._invoke( + dump_mode=DumpMode.READ, + )
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 3, "test_score": 0 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/knknkn1162/anyloadump.git@9f69fe9b1c27ec66d52e1eaa1028f89733aed842#egg=anyloadump coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work pytest-cov==6.0.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: anyloadump channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - pytest-cov==6.0.0 prefix: /opt/conda/envs/anyloadump
[ "tests/test_loadump.py::LoadumpTests::test_invoke" ]
[ "tests/test_loadump.py::LoadumpTests::test_is_binary" ]
[ "tests/test_loadump.py::LoadumpTests::test_extract_extension" ]
[]
null
1,761
[ "anyloadump/loadump.py" ]
[ "anyloadump/loadump.py" ]
knknkn1162__anyloadump-11
3aedfe9cfcee8481739b0a2effa3a1890c61a08f
2017-10-14 12:19:41
3aedfe9cfcee8481739b0a2effa3a1890c61a08f
diff --git a/anyloadump/__init__.py b/anyloadump/__init__.py index b945f70..67c25d0 100644 --- a/anyloadump/__init__.py +++ b/anyloadump/__init__.py @@ -1,1 +1,2 @@ +__all__ = ["dump", "load"] from . import dump, load diff --git a/anyloadump/dump.py b/anyloadump/dump.py index 3297150..83de06f 100644 --- a/anyloadump/dump.py +++ b/anyloadump/dump.py @@ -1,33 +1,39 @@ from .loadump import loadump, DumpMode -def dump(obj, file, *, encoding=None, errors=None, **kwargs): - return loadump(obj=obj, dump_mode=DumpMode.WRITE, file=file, encoding=encoding, errors=errors, **kwargs) +def dump(obj, file, *, encoding=None, errors=None, buffering=None, **kwargs): + return loadump(obj=obj, dump_mode=DumpMode.WRITE, file=file, + encoding=encoding, errors=errors, buffering=buffering, **kwargs) -def adump(obj, file, *, encoding=None, errors=None, **kwargs): +def adump(obj, file, *, encoding=None, errors=None, buffering=None, **kwargs): return loadump( obj=obj, dump_mode=DumpMode.APPEND, file=file, encoding=encoding, errors=errors, + buffering=buffering, **kwargs ) -def xdump(obj, file, *, encoding=None, errors=None, **kwargs): +def xdump(obj, file, *, encoding=None, errors=None, buffering=None, **kwargs): return loadump( obj=obj, dump_mode=DumpMode.EXCLUSIVE_CREATION, file=file, encoding=encoding, errors=errors, + buffering=buffering, **kwargs ) -def dumps(obj, *, encoding=None, errors=None, **kwargs): - return loadump(obj=obj, dump_mode=DumpMode.WRITE, encoding=encoding, errors=errors, **kwargs) +def dumps(obj, *, encoding=None, errors=None, buffering=None, **kwargs): + return loadump(obj=obj, dump_mode=DumpMode.WRITE, + encoding=encoding, errors=errors, buffering=buffering, **kwargs) -def adumps(obj, *, encoding=None, errors=None, **kwargs): - return loadump(obj=obj, dump_mode=DumpMode.APPEND, encoding=encoding, errors=errors, **kwargs) +def adumps(obj, *, encoding=None, errors=None, buffering=None, **kwargs): + return loadump(obj=obj, dump_mode=DumpMode.APPEND, + encoding=encoding, errors=errors, buffering=buffering, **kwargs) -def xdumps(obj, *, encoding=None, errors=None, **kwargs): - return loadump(obj=obj, dump_mode=DumpMode.EXCLUSIVE_CREATION, encoding=encoding, errors=errors, **kwargs) +def xdumps(obj, *, encoding=None, errors=None, buffering=None, **kwargs): + return loadump(obj=obj, dump_mode=DumpMode.EXCLUSIVE_CREATION, + encoding=encoding, errors=errors, buffering=buffering, **kwargs) diff --git a/anyloadump/load.py b/anyloadump/load.py index 6405744..19c5a04 100644 --- a/anyloadump/load.py +++ b/anyloadump/load.py @@ -1,4 +1,7 @@ from .loadump import loadump, DumpMode -def load(file, *, encoding=None, errors=None, **kwargs): - return loadump(DumpMode.READ, file=file, encoding=encoding, errors=errors, **kwargs) +def load(filename, *, encoding=None, errors=None, buffering=None, **kwargs): + return loadump(DumpMode.READ, filename=filename, encoding=encoding, errors=errors, buffering=buffering, **kwargs) + +def loads(s, *, encoding=None, errors=None, buffering=None, **kwargs): + return loadump(DumpMode.READ, s=s, encoding=encoding, errors=errors, buffering=buffering, **kwargs) diff --git a/anyloadump/loadump.py b/anyloadump/loadump.py index a5944bf..813b8de 100644 --- a/anyloadump/loadump.py +++ b/anyloadump/loadump.py @@ -46,33 +46,33 @@ def _extract_extension(file): """ may raise ExtensionInferenceError """ -def _invoke(dump_mode: DumpMode, file=None, fmt=None): - ext = _extract_extension(file) if file else fmt +def _invoke(dump_mode: DumpMode, filename=None, fmt=None): + ext = _extract_extension(filename) if filename else fmt if ext is None: raise ExtensionNotInferredError target = importlib.import_module(ext) - logger.debug("module : {}".format(target)) # "[load|dump]s?" method_mappings = dict(zip(list("rawx"), ["load"] + ["dump"] * 3)) - return getattr(target, method_mappings[dump_mode.value] + 's' * (not file)) + method = getattr(target, method_mappings[dump_mode.value] + 's' * (not filename)) + logger.debug("module : {}, method : {}".format(target, method)) + return method """ generalized [load|dump]s? function """ -def loadump(dump_mode: DumpMode, *, obj=None, file=None, fmt = None, encoding=None, errors=None, **kwargs): +def loadump(dump_mode: DumpMode, *, + obj=None, s=None, filename=None, fmt=None, encoding=None, errors=None, buffering=None, **kwargs): + # load method precedes loads + if obj is not None: + logger.warning("`obj` & `s` are both None, so `s` is forced to set None") + s=None - kwargs.update( - dict( - encoding=encoding, - errors=errors, - obj=obj, - ) - ) - kwargs = {k: v for k, v in kwargs if k is not None} - - if file is None: - return _invoke(dump_mode=dump_mode, fmt=fmt)(**kwargs) + if filename is None: + return _invoke(dump_mode=dump_mode, fmt=fmt)(obj or s, **kwargs) else: - if not os.path.exists(file): raise FileNotFoundError - mode = dump_mode.value + ("b" if _is_binary(file) else "") - with codecs.open(file, mode=mode, encoding=encoding, errors=errors) as fp: - return _invoke(dump_mode=dump_mode, file=file, fmt=fmt)(fp=fp, **kwargs) + if not os.path.exists(filename): raise FileNotFoundError + mode = dump_mode.value + "b"*_is_binary(filename) + codecs_kwargs = \ + {k:v for k,v in dict(mode=mode, encoding=encoding, errors=errors, buffering=buffering).items() \ + if v is not None} + with codecs.open(filename=filename, **codecs_kwargs) as fp: + return _invoke(dump_mode=dump_mode, filename=filename, fmt=fmt)(fp, **kwargs)
test loadump.load module test loadump.load module see also #9
knknkn1162/anyloadump
diff --git a/tests/test_load.py b/tests/test_load.py new file mode 100644 index 0000000..1b19de1 --- /dev/null +++ b/tests/test_load.py @@ -0,0 +1,49 @@ +import unittest +from anyloadump import load +import os + +class LoadTests(unittest.TestCase): + def _get_path(self, file): + root_dir = os.path.dirname(os.path.abspath(__file__)) + return os.path.join(root_dir, file) + + def test_load(self): + + import logging + logging.basicConfig(level=logging.ERROR) + logging.getLogger("anyloadump.loadump").setLevel(logging.DEBUG) + import json, pickle + + # test text_file(json) + json_file = self._get_path("data/sample.json") + res = load.load(json_file) + + with open(json_file, "r") as fi: + obj = json.load(fi) + self.assertEqual(res, obj) + + # test binary_file(pickle) + pickle_file = self._get_path("data/sample.pickle") + res = load.load( + filename=pickle_file, + ) + + with open(pickle_file, "rb") as fi: + obj = pickle.load(fi) + + self.assertEqual(res, obj) + + def test_loads(self): + import logging + logging.basicConfig(level=logging.ERROR) + logging.getLogger("anyloadump.loadump").setLevel(logging.DEBUG) + import json, pickle + + sample = [1,2,3] + s = json.dumps(sample) + res = load.loads(s, fmt="json") # test + self.assertEqual(res, sample) + + b = pickle.dumps(sample) + res = load.loads(b, fmt="pickle") + self.assertEqual(res, sample) diff --git a/tests/test_loadump.py b/tests/test_loadump.py index 14b5921..2c9c60c 100644 --- a/tests/test_loadump.py +++ b/tests/test_loadump.py @@ -48,7 +48,7 @@ class LoadumpTests(unittest.TestCase): # check if func is json.dump function func = loadump._invoke( dump_mode=DumpMode.WRITE, - file = self._get_path("data/sample.json") + filename = self._get_path("data/sample.json") ) self.assertTrue(hasattr(func, '__call__')) self.assertEqual(func.__module__, "json") @@ -61,7 +61,7 @@ class LoadumpTests(unittest.TestCase): # check if func is json.load function func = loadump._invoke( dump_mode=DumpMode.READ, - file = self._get_path("data/sample.json") + filename = self._get_path("data/sample.json") ) self.assertTrue(hasattr(func, '__call__')) self.assertEqual(func.__module__, "json") @@ -75,7 +75,7 @@ class LoadumpTests(unittest.TestCase): pickle_file = "data/sample.pickle" func = loadump._invoke( dump_mode=DumpMode.READ, - file = self._get_path(pickle_file) + filename = self._get_path(pickle_file) ) self.assertTrue(hasattr(func, '__call__')) self.assertEqual(func.__module__, "_pickle") @@ -113,3 +113,7 @@ class LoadumpTests(unittest.TestCase): loadump._invoke( dump_mode=DumpMode.READ, ) + + def test_loadump(self): + # generalized function, tests are described in dump or load module. + pass
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 4 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/knknkn1162/anyloadump.git@3aedfe9cfcee8481739b0a2effa3a1890c61a08f#egg=anyloadump coverage==7.8.0 exceptiongroup==1.2.2 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-cov==6.0.0 tomli==2.2.1
name: anyloadump channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==6.0.0 - tomli==2.2.1 prefix: /opt/conda/envs/anyloadump
[ "tests/test_load.py::LoadTests::test_loads", "tests/test_loadump.py::LoadumpTests::test_invoke" ]
[ "tests/test_load.py::LoadTests::test_load", "tests/test_loadump.py::LoadumpTests::test_is_binary" ]
[ "tests/test_loadump.py::LoadumpTests::test_extract_extension", "tests/test_loadump.py::LoadumpTests::test_loadump" ]
[]
null
1,762
[ "anyloadump/__init__.py", "anyloadump/load.py", "anyloadump/loadump.py", "anyloadump/dump.py" ]
[ "anyloadump/__init__.py", "anyloadump/load.py", "anyloadump/loadump.py", "anyloadump/dump.py" ]
knknkn1162__anyloadump-17
30965896a2f42ac3fa92a7013f0936008e02ad5e
2017-10-15 05:59:47
30965896a2f42ac3fa92a7013f0936008e02ad5e
diff --git a/anyloadump/__init__.py b/anyloadump/__init__.py index 67c25d0..8e3dd12 100644 --- a/anyloadump/__init__.py +++ b/anyloadump/__init__.py @@ -1,2 +1,3 @@ __all__ = ["dump", "load"] -from . import dump, load +from .dump import * +from .load import *
call as anyloadump.load(...) or anylodump.dump(...) Modify import in __init__.py.
knknkn1162/anyloadump
diff --git a/tests/test_dump.py b/tests/test_dump.py index 11cd4ee..0596cbc 100644 --- a/tests/test_dump.py +++ b/tests/test_dump.py @@ -1,5 +1,5 @@ import unittest -from anyloadump import dump +import anyloadump as ald import os class DumpTests(unittest.TestCase): @@ -14,7 +14,7 @@ class DumpTests(unittest.TestCase): # test json-format json_file = self._get_path("data/out.json") - dump.dump(lst, self._get_path(json_file)) + ald.dump(lst, self._get_path(json_file)) ## confirm with open(json_file, "r") as fi: obj = json.load(fi) @@ -23,7 +23,7 @@ class DumpTests(unittest.TestCase): # test pickle-format pickle_file = self._get_path("data/out.pickle") - dump.dump(lst, self._get_path(pickle_file)) + ald.dump(lst, self._get_path(pickle_file)) with open(pickle_file, "rb") as fi: obj = pickle.load(fi) self.assertEqual(lst, obj) @@ -35,13 +35,13 @@ class DumpTests(unittest.TestCase): lst = [1,2,3] # test json-format - s = dump.dumps(lst, fmt="json") + s = ald.dumps(lst, fmt="json") ## confirm obj = json.loads(s) self.assertEqual(lst, obj) # test pickle-format - s = dump.dumps(lst, "pickle") + s = ald.dumps(lst, "pickle") ## confirm obj = pickle.loads(s) self.assertEqual(lst, obj) diff --git a/tests/test_load.py b/tests/test_load.py index 1b19de1..d76de0d 100644 --- a/tests/test_load.py +++ b/tests/test_load.py @@ -1,5 +1,5 @@ import unittest -from anyloadump import load +import anyloadump as ald import os class LoadTests(unittest.TestCase): @@ -16,7 +16,7 @@ class LoadTests(unittest.TestCase): # test text_file(json) json_file = self._get_path("data/sample.json") - res = load.load(json_file) + res = ald.load(json_file) with open(json_file, "r") as fi: obj = json.load(fi) @@ -24,7 +24,7 @@ class LoadTests(unittest.TestCase): # test binary_file(pickle) pickle_file = self._get_path("data/sample.pickle") - res = load.load( + res = ald.load( filename=pickle_file, ) @@ -41,9 +41,9 @@ class LoadTests(unittest.TestCase): sample = [1,2,3] s = json.dumps(sample) - res = load.loads(s, fmt="json") # test + res = ald.loads(s, fmt="json") # test self.assertEqual(res, sample) b = pickle.dumps(sample) - res = load.loads(b, fmt="pickle") + res = ald.loads(b, fmt="pickle") self.assertEqual(res, sample)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 0 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/knknkn1162/anyloadump.git@30965896a2f42ac3fa92a7013f0936008e02ad5e#egg=anyloadump coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work pytest-cov==6.0.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: anyloadump channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - pytest-cov==6.0.0 prefix: /opt/conda/envs/anyloadump
[ "tests/test_dump.py::DumpTests::test_dump", "tests/test_dump.py::DumpTests::test_dumps", "tests/test_load.py::LoadTests::test_load", "tests/test_load.py::LoadTests::test_loads" ]
[]
[ "tests/test_dump.py::DumpTests::test_adump", "tests/test_dump.py::DumpTests::test_adumps", "tests/test_dump.py::DumpTests::test_xdump", "tests/test_dump.py::DumpTests::test_xdumps" ]
[]
null
1,763
[ "anyloadump/__init__.py" ]
[ "anyloadump/__init__.py" ]
python-visualization__branca-29
bd9151e172f04c9347acc23ad7fefa6b7502bd0a
2017-10-15 14:25:13
d54843afb185994c0fd2494c1a97efae35aa2700
nanodan: Okay this is good to go - review and let me know! ocefpaf: @nanodan note that @BibMartin is quite busy with the day job, so if he cannot review this in the next few days please ping me again and I'll merge this. nanodan: @ocefpaf I'll check in on the weekend and ping you if still outstanding. nanodan: @ocefpaf did you want to merge this?
diff --git a/branca/_cnames.json b/branca/_cnames.json new file mode 100644 index 0000000..6baa3da --- /dev/null +++ b/branca/_cnames.json @@ -0,0 +1,1 @@ +{"indigo": "#4B0082", "gold": "#FFD700", "hotpink": "#FF69B4", "firebrick": "#B22222", "indianred": "#CD5C5C", "sage": "#87AE73", "yellow": "#FFFF00", "mistyrose": "#FFE4E1", "darkolivegreen": "#556B2F", "olive": "#808000", "darkseagreen": "#8FBC8F", "pink": "#FFC0CB", "tomato": "#FF6347", "lightcoral": "#F08080", "orangered": "#FF4500", "navajowhite": "#FFDEAD", "lime": "#00FF00", "palegreen": "#98FB98", "greenyellow": "#ADFF2F", "burlywood": "#DEB887", "seashell": "#FFF5EE", "mediumspringgreen": "#00FA9A", "fuchsia": "#FF00FF", "papayawhip": "#FFEFD5", "blanchedalmond": "#FFEBCD", "chartreuse": "#7FFF00", "dimgray": "#696969", "black": "#000000", "peachpuff": "#FFDAB9", "springgreen": "#00FF7F", "aquamarine": "#7FFFD4", "white": "#FFFFFF", "b": "#0000FF", "orange": "#FFA500", "lightsalmon": "#FFA07A", "darkslategray": "#2F4F4F", "brown": "#A52A2A", "ivory": "#FFFFF0", "dodgerblue": "#1E90FF", "peru": "#CD853F", "lawngreen": "#7CFC00", "chocolate": "#D2691E", "crimson": "#DC143C", "forestgreen": "#228B22", "slateblue": "#6A5ACD", "lightseagreen": "#20B2AA", "cyan": "#00FFFF", "mintcream": "#F5FFFA", "silver": "#C0C0C0", "antiquewhite": "#FAEBD7", "mediumorchid": "#BA55D3", "skyblue": "#87CEEB", "gray": "#808080", "darkturquoise": "#00CED1", "goldenrod": "#DAA520", "darkgreen": "#006400", "floralwhite": "#FFFAF0", "darkviolet": "#9400D3", "darkgray": "#A9A9A9", "moccasin": "#FFE4B5", "saddlebrown": "#8B4513", "darkslateblue": "#483D8B", "lightskyblue": "#87CEFA", "lightpink": "#FFB6C1", "mediumvioletred": "#C71585", "r": "#FF0000", "red": "#FF0000", "deeppink": "#FF1493", "limegreen": "#32CD32", "k": "#000000", "darkmagenta": "#8B008B", "palegoldenrod": "#EEE8AA", "plum": "#DDA0DD", "turquoise": "#40E0D0", "m": "#FF00FF", "lightgoldenrodyellow": "#FAFAD2", "darkgoldenrod": "#B8860B", "lavender": "#E6E6FA", "maroon": "#800000", "yellowgreen": "#9ACD32", "sandybrown": "#FAA460", "thistle": "#D8BFD8", "violet": "#EE82EE", "navy": "#000080", "magenta": "#FF00FF", "tan": "#D2B48C", "rosybrown": "#BC8F8F", "olivedrab": "#6B8E23", "blue": "#0000FF", "lightblue": "#ADD8E6", "ghostwhite": "#F8F8FF", "honeydew": "#F0FFF0", "cornflowerblue": "#6495ED", "linen": "#FAF0E6", "darkblue": "#00008B", "powderblue": "#B0E0E6", "seagreen": "#2E8B57", "darkkhaki": "#BDB76B", "snow": "#FFFAFA", "sienna": "#A0522D", "mediumblue": "#0000CD", "royalblue": "#4169E1", "lightcyan": "#E0FFFF", "green": "#008000", "mediumpurple": "#9370DB", "midnightblue": "#191970", "cornsilk": "#FFF8DC", "paleturquoise": "#AFEEEE", "bisque": "#FFE4C4", "slategray": "#708090", "darkcyan": "#008B8B", "khaki": "#F0E68C", "wheat": "#F5DEB3", "teal": "#008080", "darkorchid": "#9932CC", "deepskyblue": "#00BFFF", "salmon": "#FA8072", "y": "#FFFF00", "darkred": "#8B0000", "steelblue": "#4682B4", "g": "#008000", "palevioletred": "#DB7093", "lightslategray": "#778899", "aliceblue": "#F0F8FF", "lightgreen": "#90EE90", "orchid": "#DA70D6", "gainsboro": "#DCDCDC", "mediumseagreen": "#3CB371", "lightgray": "#D3D3D3", "c": "#00FFFF", "mediumturquoise": "#48D1CC", "darksage": "#598556", "lemonchiffon": "#FFFACD", "cadetblue": "#5F9EA0", "lightyellow": "#FFFFE0", "lavenderblush": "#FFF0F5", "coral": "#FF7F50", "purple": "#800080", "aqua": "#00FFFF", "lightsage": "#BCECAC", "whitesmoke": "#F5F5F5", "mediumslateblue": "#7B68EE", "darkorange": "#FF8C00", "mediumaquamarine": "#66CDAA", "darksalmon": "#E9967A", "beige": "#F5F5DC", "w": "#FFFFFF", "blueviolet": "#8A2BE2", "azure": "#F0FFFF", "lightsteelblue": "#B0C4DE", "oldlace": "#FDF5E6"} \ No newline at end of file diff --git a/branca/_schemes.json b/branca/_schemes.json new file mode 100644 index 0000000..b8bf5ee --- /dev/null +++ b/branca/_schemes.json @@ -0,0 +1,1 @@ +{"Pastel1_03": ["#fbb4ae", "#b3cde3", "#ccebc5"], "Pastel1_05": ["#fbb4ae", "#b3cde3", "#ccebc5", "#decbe4", "#fed9a6"], "Pastel1_04": ["#fbb4ae", "#b3cde3", "#ccebc5", "#decbe4"], "Pastel1_07": ["#fbb4ae", "#b3cde3", "#ccebc5", "#decbe4", "#fed9a6", "#ffffcc", "#e5d8bd"], "YlOrRd_04": ["#ffffb2", "#fecc5c", "#fd8d3c", "#e31a1c"], "Pastel1_09": ["#fbb4ae", "#b3cde3", "#ccebc5", "#decbe4", "#fed9a6", "#ffffcc", "#e5d8bd", "#fddaec", "#f2f2f2"], "Pastel1_08": ["#fbb4ae", "#b3cde3", "#ccebc5", "#decbe4", "#fed9a6", "#ffffcc", "#e5d8bd", "#fddaec"], "Spectral_07": ["#d53e4f", "#fc8d59", "#fee08b", "#ffffbf", "#e6f598", "#99d594", "#3288bd"], "RdYlBu_05": ["#d7191c", "#fdae61", "#ffffbf", "#abd9e9", "#2c7bb6"], "PuBuGn_03": ["#ece2f0", "#a6bddb", "#1c9099"], "Set1_08": ["#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#ffff33", "#a65628", "#f781bf"], "PuBuGn_05": ["#f6eff7", "#bdc9e1", "#67a9cf", "#1c9099", "#016c59"], "PuBuGn_04": ["#f6eff7", "#bdc9e1", "#67a9cf", "#02818a"], "PuBuGn_07": ["#f6eff7", "#d0d1e6", "#a6bddb", "#67a9cf", "#3690c0", "#02818a", "#016450"], "PuBuGn_06": ["#f6eff7", "#d0d1e6", "#a6bddb", "#67a9cf", "#1c9099", "#016c59"], "PuBuGn_09": ["#fff7fb", "#ece2f0", "#d0d1e6", "#a6bddb", "#67a9cf", "#3690c0", "#02818a", "#016c59", "#014636"], "PuBuGn_08": ["#fff7fb", "#ece2f0", "#d0d1e6", "#a6bddb", "#67a9cf", "#3690c0", "#02818a", "#016450"], "YlOrBr_04": ["#ffffd4", "#fed98e", "#fe9929", "#cc4c02"], "YlOrBr_05": ["#ffffd4", "#fed98e", "#fe9929", "#d95f0e", "#993404"], "Set1_07": ["#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#ffff33", "#a65628"], "YlOrBr_03": ["#fff7bc", "#fec44f", "#d95f0e"], "Set1_05": ["#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00"], "YlOrRd_03": ["#ffeda0", "#feb24c", "#f03b20"], "PuOr_06": ["#b35806", "#f1a340", "#fee0b6", "#d8daeb", "#998ec3", "#542788"], "PuOr_07": ["#b35806", "#f1a340", "#fee0b6", "#f7f7f7", "#d8daeb", "#998ec3", "#542788"], "PuOr_04": ["#e66101", "#fdb863", "#b2abd2", "#5e3c99"], "PuOr_05": ["#e66101", "#fdb863", "#f7f7f7", "#b2abd2", "#5e3c99"], "PuOr_03": ["#f1a340", "#f7f7f7", "#998ec3"], "Purples_09": ["#fcfbfd", "#efedf5", "#dadaeb", "#bcbddc", "#9e9ac8", "#807dba", "#6a51a3", "#54278f", "#3f007d"], "Set2_06": ["#66c2a5", "#fc8d62", "#8da0cb", "#e78ac3", "#a6d854", "#ffd92f"], "RdYlBu_11": ["#a50026", "#d73027", "#f46d43", "#fdae61", "#fee090", "#ffffbf", "#e0f3f8", "#abd9e9", "#74add1", "#4575b4", "#313695"], "PuOr_08": ["#b35806", "#e08214", "#fdb863", "#fee0b6", "#d8daeb", "#b2abd2", "#8073ac", "#542788"], "PuOr_09": ["#b35806", "#e08214", "#fdb863", "#fee0b6", "#f7f7f7", "#d8daeb", "#b2abd2", "#8073ac", "#542788"], "Paired_03": ["#a6cee3", "#1f78b4", "#b2df8a"], "RdBu_03": ["#ef8a62", "#f7f7f7", "#67a9cf"], "RdYlBu_10": ["#a50026", "#d73027", "#f46d43", "#fdae61", "#fee090", "#e0f3f8", "#abd9e9", "#74add1", "#4575b4", "#313695"], "Paired_07": ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f"], "Paired_06": ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c"], "Paired_05": ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99"], "Paired_04": ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c"], "Paired_09": ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6"], "Paired_08": ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00"], "RdGy_03": ["#ef8a62", "#ffffff", "#999999"], "PiYG_04": ["#d01c8b", "#f1b6da", "#b8e186", "#4dac26"], "Accent_03": ["#7fc97f", "#beaed4", "#fdc086"], "BuGn_08": ["#f7fcfd", "#e5f5f9", "#ccece6", "#99d8c9", "#66c2a4", "#41ae76", "#238b45", "#005824"], "BuGn_09": ["#f7fcfd", "#e5f5f9", "#ccece6", "#99d8c9", "#66c2a4", "#41ae76", "#238b45", "#006d2c", "#00441b"], "BuGn_04": ["#edf8fb", "#b2e2e2", "#66c2a4", "#238b45"], "BuGn_05": ["#edf8fb", "#b2e2e2", "#66c2a4", "#2ca25f", "#006d2c"], "BuGn_06": ["#edf8fb", "#ccece6", "#99d8c9", "#66c2a4", "#2ca25f", "#006d2c"], "BuGn_07": ["#edf8fb", "#ccece6", "#99d8c9", "#66c2a4", "#41ae76", "#238b45", "#005824"], "BuGn_03": ["#e5f5f9", "#99d8c9", "#2ca25f"], "YlGnBu_07": ["#ffffcc", "#c7e9b4", "#7fcdbb", "#41b6c4", "#1d91c0", "#225ea8", "#0c2c84"], "YlGnBu_06": ["#ffffcc", "#c7e9b4", "#7fcdbb", "#41b6c4", "#2c7fb8", "#253494"], "YlGnBu_05": ["#ffffcc", "#a1dab4", "#41b6c4", "#2c7fb8", "#253494"], "YlGnBu_04": ["#ffffcc", "#a1dab4", "#41b6c4", "#225ea8"], "YlGnBu_03": ["#edf8b1", "#7fcdbb", "#2c7fb8"], "RdBu_06": ["#b2182b", "#ef8a62", "#fddbc7", "#d1e5f0", "#67a9cf", "#2166ac"], "RdBu_05": ["#ca0020", "#f4a582", "#f7f7f7", "#92c5de", "#0571b0"], "RdBu_04": ["#ca0020", "#f4a582", "#92c5de", "#0571b0"], "Accent_08": ["#7fc97f", "#beaed4", "#fdc086", "#ffff99", "#386cb0", "#f0027f", "#bf5b17", "#666666"], "RdBu_09": ["#b2182b", "#d6604d", "#f4a582", "#fddbc7", "#f7f7f7", "#d1e5f0", "#92c5de", "#4393c3", "#2166ac"], "RdBu_08": ["#b2182b", "#d6604d", "#f4a582", "#fddbc7", "#d1e5f0", "#92c5de", "#4393c3", "#2166ac"], "Set2_04": ["#66c2a5", "#fc8d62", "#8da0cb", "#e78ac3"], "YlGnBu_09": ["#ffffd9", "#edf8b1", "#c7e9b4", "#7fcdbb", "#41b6c4", "#1d91c0", "#225ea8", "#253494", "#081d58"], "YlGnBu_08": ["#ffffd9", "#edf8b1", "#c7e9b4", "#7fcdbb", "#41b6c4", "#1d91c0", "#225ea8", "#0c2c84"], "Blues_08": ["#f7fbff", "#deebf7", "#c6dbef", "#9ecae1", "#6baed6", "#4292c6", "#2171b5", "#084594"], "Blues_09": ["#f7fbff", "#deebf7", "#c6dbef", "#9ecae1", "#6baed6", "#4292c6", "#2171b5", "#08519c", "#08306b"], "RdPu_09": ["#fff7f3", "#fde0dd", "#fcc5c0", "#fa9fb5", "#f768a1", "#dd3497", "#ae017e", "#7a0177", "#49006a"], "RdPu_08": ["#fff7f3", "#fde0dd", "#fcc5c0", "#fa9fb5", "#f768a1", "#dd3497", "#ae017e", "#7a0177"], "Set3_07": ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072", "#80b1d3", "#fdb462", "#b3de69"], "Set3_06": ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072", "#80b1d3", "#fdb462"], "RdPu_05": ["#feebe2", "#fbb4b9", "#f768a1", "#c51b8a", "#7a0177"], "RdPu_04": ["#feebe2", "#fbb4b9", "#f768a1", "#ae017e"], "RdPu_07": ["#feebe2", "#fcc5c0", "#fa9fb5", "#f768a1", "#dd3497", "#ae017e", "#7a0177"], "RdPu_06": ["#feebe2", "#fcc5c0", "#fa9fb5", "#f768a1", "#c51b8a", "#7a0177"], "Blues_06": ["#eff3ff", "#c6dbef", "#9ecae1", "#6baed6", "#3182bd", "#08519c"], "Blues_07": ["#eff3ff", "#c6dbef", "#9ecae1", "#6baed6", "#4292c6", "#2171b5", "#084594"], "RdPu_03": ["#fde0dd", "#fa9fb5", "#c51b8a"], "Blues_05": ["#eff3ff", "#bdd7e7", "#6baed6", "#3182bd", "#08519c"], "Paired_10": ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a"], "Paired_11": ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a", "#ffff99"], "Paired_12": ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a", "#ffff99", "#b15928"], "PuBu_06": ["#f1eef6", "#d0d1e6", "#a6bddb", "#74a9cf", "#2b8cbe", "#045a8d"], "PuBu_07": ["#f1eef6", "#d0d1e6", "#a6bddb", "#74a9cf", "#3690c0", "#0570b0", "#034e7b"], "PuBu_04": ["#f1eef6", "#bdc9e1", "#74a9cf", "#0570b0"], "PuBu_05": ["#f1eef6", "#bdc9e1", "#74a9cf", "#2b8cbe", "#045a8d"], "PuRd_05": ["#f1eef6", "#d7b5d8", "#df65b0", "#dd1c77", "#980043"], "PuBu_03": ["#ece7f2", "#a6bddb", "#2b8cbe"], "PuRd_07": ["#f1eef6", "#d4b9da", "#c994c7", "#df65b0", "#e7298a", "#ce1256", "#91003f"], "PuRd_06": ["#f1eef6", "#d4b9da", "#c994c7", "#df65b0", "#dd1c77", "#980043"], "PuRd_09": ["#f7f4f9", "#e7e1ef", "#d4b9da", "#c994c7", "#df65b0", "#e7298a", "#ce1256", "#980043", "#67001f"], "PuRd_08": ["#f7f4f9", "#e7e1ef", "#d4b9da", "#c994c7", "#df65b0", "#e7298a", "#ce1256", "#91003f"], "Set2_07": ["#66c2a5", "#fc8d62", "#8da0cb", "#e78ac3", "#a6d854", "#ffd92f", "#e5c494"], "PuBu_08": ["#fff7fb", "#ece7f2", "#d0d1e6", "#a6bddb", "#74a9cf", "#3690c0", "#0570b0", "#034e7b"], "PuBu_09": ["#fff7fb", "#ece7f2", "#d0d1e6", "#a6bddb", "#74a9cf", "#3690c0", "#0570b0", "#045a8d", "#023858"], "RdBu_10": ["#67001f", "#b2182b", "#d6604d", "#f4a582", "#fddbc7", "#d1e5f0", "#92c5de", "#4393c3", "#2166ac", "#053061"], "RdBu_11": ["#67001f", "#b2182b", "#d6604d", "#f4a582", "#fddbc7", "#f7f7f7", "#d1e5f0", "#92c5de", "#4393c3", "#2166ac", "#053061"], "Accent_06": ["#7fc97f", "#beaed4", "#fdc086", "#ffff99", "#386cb0", "#f0027f"], "Set3_03": ["#8dd3c7", "#ffffb3", "#bebada"], "Set3_05": ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072", "#80b1d3"], "Set3_12": ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072", "#80b1d3", "#fdb462", "#b3de69", "#fccde5", "#d9d9d9", "#bc80bd", "#ccebc5", "#ffed6f"], "Set3_10": ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072", "#80b1d3", "#fdb462", "#b3de69", "#fccde5", "#d9d9d9", "#bc80bd"], "Set3_04": ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072"], "RdGy_11": ["#67001f", "#b2182b", "#d6604d", "#f4a582", "#fddbc7", "#ffffff", "#e0e0e0", "#bababa", "#878787", "#4d4d4d", "#1a1a1a"], "RdGy_10": ["#67001f", "#b2182b", "#d6604d", "#f4a582", "#fddbc7", "#e0e0e0", "#bababa", "#878787", "#4d4d4d", "#1a1a1a"], "Set1_03": ["#e41a1c", "#377eb8", "#4daf4a"], "Set1_09": ["#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#ffff33", "#a65628", "#f781bf", "#999999"], "Set3_09": ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072", "#80b1d3", "#fdb462", "#b3de69", "#fccde5", "#d9d9d9"], "BuPu_08": ["#f7fcfd", "#e0ecf4", "#bfd3e6", "#9ebcda", "#8c96c6", "#8c6bb1", "#88419d", "#6e016b"], "BuPu_09": ["#f7fcfd", "#e0ecf4", "#bfd3e6", "#9ebcda", "#8c96c6", "#8c6bb1", "#88419d", "#810f7c", "#4d004b"], "RdYlGn_11": ["#a50026", "#d73027", "#f46d43", "#fdae61", "#fee08b", "#ffffbf", "#d9ef8b", "#a6d96a", "#66bd63", "#1a9850", "#006837"], "Blues_03": ["#deebf7", "#9ecae1", "#3182bd"], "Set2_05": ["#66c2a5", "#fc8d62", "#8da0cb", "#e78ac3", "#a6d854"], "BuPu_03": ["#e0ecf4", "#9ebcda", "#8856a7"], "BuPu_06": ["#edf8fb", "#bfd3e6", "#9ebcda", "#8c96c6", "#8856a7", "#810f7c"], "BuPu_07": ["#edf8fb", "#bfd3e6", "#9ebcda", "#8c96c6", "#8c6bb1", "#88419d", "#6e016b"], "BuPu_04": ["#edf8fb", "#b3cde3", "#8c96c6", "#88419d"], "BuPu_05": ["#edf8fb", "#b3cde3", "#8c96c6", "#8856a7", "#810f7c"], "Accent_04": ["#7fc97f", "#beaed4", "#fdc086", "#ffff99"], "YlOrRd_05": ["#ffffb2", "#fecc5c", "#fd8d3c", "#f03b20", "#bd0026"], "YlOrBr_08": ["#ffffe5", "#fff7bc", "#fee391", "#fec44f", "#fe9929", "#ec7014", "#cc4c02", "#8c2d04"], "Oranges_08": ["#fff5eb", "#fee6ce", "#fdd0a2", "#fdae6b", "#fd8d3c", "#f16913", "#d94801", "#8c2d04"], "Oranges_09": ["#fff5eb", "#fee6ce", "#fdd0a2", "#fdae6b", "#fd8d3c", "#f16913", "#d94801", "#a63603", "#7f2704"], "Oranges_06": ["#feedde", "#fdd0a2", "#fdae6b", "#fd8d3c", "#e6550d", "#a63603"], "Oranges_07": ["#feedde", "#fdd0a2", "#fdae6b", "#fd8d3c", "#f16913", "#d94801", "#8c2d04"], "Oranges_04": ["#feedde", "#fdbe85", "#fd8d3c", "#d94701"], "YlOrBr_09": ["#ffffe5", "#fff7bc", "#fee391", "#fec44f", "#fe9929", "#ec7014", "#cc4c02", "#993404", "#662506"], "Oranges_03": ["#fee6ce", "#fdae6b", "#e6550d"], "YlOrBr_06": ["#ffffd4", "#fee391", "#fec44f", "#fe9929", "#d95f0e", "#993404"], "Dark2_06": ["#1b9e77", "#d95f02", "#7570b3", "#e7298a", "#66a61e", "#e6ab02"], "Blues_04": ["#eff3ff", "#bdd7e7", "#6baed6", "#2171b5"], "YlOrBr_07": ["#ffffd4", "#fee391", "#fec44f", "#fe9929", "#ec7014", "#cc4c02", "#8c2d04"], "RdYlGn_05": ["#d7191c", "#fdae61", "#ffffbf", "#a6d96a", "#1a9641"], "Set3_08": ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072", "#80b1d3", "#fdb462", "#b3de69", "#fccde5"], "YlOrRd_06": ["#ffffb2", "#fed976", "#feb24c", "#fd8d3c", "#f03b20", "#bd0026"], "Dark2_03": ["#1b9e77", "#d95f02", "#7570b3"], "Accent_05": ["#7fc97f", "#beaed4", "#fdc086", "#ffff99", "#386cb0"], "RdYlGn_08": ["#d73027", "#f46d43", "#fdae61", "#fee08b", "#d9ef8b", "#a6d96a", "#66bd63", "#1a9850"], "RdYlGn_09": ["#d73027", "#f46d43", "#fdae61", "#fee08b", "#ffffbf", "#d9ef8b", "#a6d96a", "#66bd63", "#1a9850"], "PuOr_11": ["#7f3b08", "#b35806", "#e08214", "#fdb863", "#fee0b6", "#f7f7f7", "#d8daeb", "#b2abd2", "#8073ac", "#542788", "#2d004b"], "YlOrRd_07": ["#ffffb2", "#fed976", "#feb24c", "#fd8d3c", "#fc4e2a", "#e31a1c", "#b10026"], "Spectral_11": ["#9e0142", "#d53e4f", "#f46d43", "#fdae61", "#fee08b", "#ffffbf", "#e6f598", "#abdda4", "#66c2a5", "#3288bd", "#5e4fa2"], "RdGy_08": ["#b2182b", "#d6604d", "#f4a582", "#fddbc7", "#e0e0e0", "#bababa", "#878787", "#4d4d4d"], "RdGy_09": ["#b2182b", "#d6604d", "#f4a582", "#fddbc7", "#ffffff", "#e0e0e0", "#bababa", "#878787", "#4d4d4d"], "RdGy_06": ["#b2182b", "#ef8a62", "#fddbc7", "#e0e0e0", "#999999", "#4d4d4d"], "RdGy_07": ["#b2182b", "#ef8a62", "#fddbc7", "#ffffff", "#e0e0e0", "#999999", "#4d4d4d"], "RdGy_04": ["#ca0020", "#f4a582", "#bababa", "#404040"], "RdGy_05": ["#ca0020", "#f4a582", "#ffffff", "#bababa", "#404040"], "RdYlGn_04": ["#d7191c", "#fdae61", "#a6d96a", "#1a9641"], "PiYG_09": ["#c51b7d", "#de77ae", "#f1b6da", "#fde0ef", "#f7f7f7", "#e6f5d0", "#b8e186", "#7fbc41", "#4d9221"], "RdYlGn_06": ["#d73027", "#fc8d59", "#fee08b", "#d9ef8b", "#91cf60", "#1a9850"], "RdYlGn_07": ["#d73027", "#fc8d59", "#fee08b", "#ffffbf", "#d9ef8b", "#91cf60", "#1a9850"], "Spectral_04": ["#d7191c", "#fdae61", "#abdda4", "#2b83ba"], "Spectral_05": ["#d7191c", "#fdae61", "#ffffbf", "#abdda4", "#2b83ba"], "Spectral_06": ["#d53e4f", "#fc8d59", "#fee08b", "#e6f598", "#99d594", "#3288bd"], "PiYG_08": ["#c51b7d", "#de77ae", "#f1b6da", "#fde0ef", "#e6f5d0", "#b8e186", "#7fbc41", "#4d9221"], "Set2_03": ["#66c2a5", "#fc8d62", "#8da0cb"], "Spectral_03": ["#fc8d59", "#ffffbf", "#99d594"], "Reds_08": ["#fff5f0", "#fee0d2", "#fcbba1", "#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#99000d"], "Set1_04": ["#e41a1c", "#377eb8", "#4daf4a", "#984ea3"], "Spectral_08": ["#d53e4f", "#f46d43", "#fdae61", "#fee08b", "#e6f598", "#abdda4", "#66c2a5", "#3288bd"], "Spectral_09": ["#d53e4f", "#f46d43", "#fdae61", "#fee08b", "#ffffbf", "#e6f598", "#abdda4", "#66c2a5", "#3288bd"], "Set2_08": ["#66c2a5", "#fc8d62", "#8da0cb", "#e78ac3", "#a6d854", "#ffd92f", "#e5c494", "#b3b3b3"], "Reds_09": ["#fff5f0", "#fee0d2", "#fcbba1", "#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#a50f15", "#67000d"], "Greys_07": ["#f7f7f7", "#d9d9d9", "#bdbdbd", "#969696", "#737373", "#525252", "#252525"], "Greys_06": ["#f7f7f7", "#d9d9d9", "#bdbdbd", "#969696", "#636363", "#252525"], "Greys_05": ["#f7f7f7", "#cccccc", "#969696", "#636363", "#252525"], "Greys_04": ["#f7f7f7", "#cccccc", "#969696", "#525252"], "Greys_03": ["#f0f0f0", "#bdbdbd", "#636363"], "PuOr_10": ["#7f3b08", "#b35806", "#e08214", "#fdb863", "#fee0b6", "#d8daeb", "#b2abd2", "#8073ac", "#542788", "#2d004b"], "Accent_07": ["#7fc97f", "#beaed4", "#fdc086", "#ffff99", "#386cb0", "#f0027f", "#bf5b17"], "Reds_06": ["#fee5d9", "#fcbba1", "#fc9272", "#fb6a4a", "#de2d26", "#a50f15"], "Greys_09": ["#ffffff", "#f0f0f0", "#d9d9d9", "#bdbdbd", "#969696", "#737373", "#525252", "#252525", "#000000"], "Greys_08": ["#ffffff", "#f0f0f0", "#d9d9d9", "#bdbdbd", "#969696", "#737373", "#525252", "#252525"], "Reds_07": ["#fee5d9", "#fcbba1", "#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#99000d"], "RdYlBu_08": ["#d73027", "#f46d43", "#fdae61", "#fee090", "#e0f3f8", "#abd9e9", "#74add1", "#4575b4"], "RdYlBu_09": ["#d73027", "#f46d43", "#fdae61", "#fee090", "#ffffbf", "#e0f3f8", "#abd9e9", "#74add1", "#4575b4"], "BrBG_09": ["#8c510a", "#bf812d", "#dfc27d", "#f6e8c3", "#f5f5f5", "#c7eae5", "#80cdc1", "#35978f", "#01665e"], "BrBG_08": ["#8c510a", "#bf812d", "#dfc27d", "#f6e8c3", "#c7eae5", "#80cdc1", "#35978f", "#01665e"], "BrBG_07": ["#8c510a", "#d8b365", "#f6e8c3", "#f5f5f5", "#c7eae5", "#5ab4ac", "#01665e"], "BrBG_06": ["#8c510a", "#d8b365", "#f6e8c3", "#c7eae5", "#5ab4ac", "#01665e"], "BrBG_05": ["#a6611a", "#dfc27d", "#f5f5f5", "#80cdc1", "#018571"], "BrBG_04": ["#a6611a", "#dfc27d", "#80cdc1", "#018571"], "BrBG_03": ["#d8b365", "#f5f5f5", "#5ab4ac"], "PiYG_06": ["#c51b7d", "#e9a3c9", "#fde0ef", "#e6f5d0", "#a1d76a", "#4d9221"], "Reds_03": ["#fee0d2", "#fc9272", "#de2d26"], "Set3_11": ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072", "#80b1d3", "#fdb462", "#b3de69", "#fccde5", "#d9d9d9", "#bc80bd", "#ccebc5"], "Set1_06": ["#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#ffff33"], "PuRd_03": ["#e7e1ef", "#c994c7", "#dd1c77"], "PiYG_07": ["#c51b7d", "#e9a3c9", "#fde0ef", "#f7f7f7", "#e6f5d0", "#a1d76a", "#4d9221"], "RdBu_07": ["#b2182b", "#ef8a62", "#fddbc7", "#f7f7f7", "#d1e5f0", "#67a9cf", "#2166ac"], "Pastel1_06": ["#fbb4ae", "#b3cde3", "#ccebc5", "#decbe4", "#fed9a6", "#ffffcc"], "Spectral_10": ["#9e0142", "#d53e4f", "#f46d43", "#fdae61", "#fee08b", "#e6f598", "#abdda4", "#66c2a5", "#3288bd", "#5e4fa2"], "PuRd_04": ["#f1eef6", "#d7b5d8", "#df65b0", "#ce1256"], "OrRd_03": ["#fee8c8", "#fdbb84", "#e34a33"], "PiYG_03": ["#e9a3c9", "#f7f7f7", "#a1d76a"], "Oranges_05": ["#feedde", "#fdbe85", "#fd8d3c", "#e6550d", "#a63603"], "OrRd_07": ["#fef0d9", "#fdd49e", "#fdbb84", "#fc8d59", "#ef6548", "#d7301f", "#990000"], "OrRd_06": ["#fef0d9", "#fdd49e", "#fdbb84", "#fc8d59", "#e34a33", "#b30000"], "OrRd_05": ["#fef0d9", "#fdcc8a", "#fc8d59", "#e34a33", "#b30000"], "OrRd_04": ["#fef0d9", "#fdcc8a", "#fc8d59", "#d7301f"], "Reds_04": ["#fee5d9", "#fcae91", "#fb6a4a", "#cb181d"], "Reds_05": ["#fee5d9", "#fcae91", "#fb6a4a", "#de2d26", "#a50f15"], "OrRd_09": ["#fff7ec", "#fee8c8", "#fdd49e", "#fdbb84", "#fc8d59", "#ef6548", "#d7301f", "#b30000", "#7f0000"], "OrRd_08": ["#fff7ec", "#fee8c8", "#fdd49e", "#fdbb84", "#fc8d59", "#ef6548", "#d7301f", "#990000"], "BrBG_10": ["#543005", "#8c510a", "#bf812d", "#dfc27d", "#f6e8c3", "#c7eae5", "#80cdc1", "#35978f", "#01665e", "#003c30"], "BrBG_11": ["#543005", "#8c510a", "#bf812d", "#dfc27d", "#f6e8c3", "#f5f5f5", "#c7eae5", "#80cdc1", "#35978f", "#01665e", "#003c30"], "PiYG_05": ["#d01c8b", "#f1b6da", "#f7f7f7", "#b8e186", "#4dac26"], "YlOrRd_08": ["#ffffcc", "#ffeda0", "#fed976", "#feb24c", "#fd8d3c", "#fc4e2a", "#e31a1c", "#b10026"], "GnBu_04": ["#f0f9e8", "#bae4bc", "#7bccc4", "#2b8cbe"], "GnBu_05": ["#f0f9e8", "#bae4bc", "#7bccc4", "#43a2ca", "#0868ac"], "GnBu_06": ["#f0f9e8", "#ccebc5", "#a8ddb5", "#7bccc4", "#43a2ca", "#0868ac"], "GnBu_07": ["#f0f9e8", "#ccebc5", "#a8ddb5", "#7bccc4", "#4eb3d3", "#2b8cbe", "#08589e"], "Purples_08": ["#fcfbfd", "#efedf5", "#dadaeb", "#bcbddc", "#9e9ac8", "#807dba", "#6a51a3", "#4a1486"], "GnBu_03": ["#e0f3db", "#a8ddb5", "#43a2ca"], "Purples_06": ["#f2f0f7", "#dadaeb", "#bcbddc", "#9e9ac8", "#756bb1", "#54278f"], "Purples_07": ["#f2f0f7", "#dadaeb", "#bcbddc", "#9e9ac8", "#807dba", "#6a51a3", "#4a1486"], "Purples_04": ["#f2f0f7", "#cbc9e2", "#9e9ac8", "#6a51a3"], "Purples_05": ["#f2f0f7", "#cbc9e2", "#9e9ac8", "#756bb1", "#54278f"], "GnBu_08": ["#f7fcf0", "#e0f3db", "#ccebc5", "#a8ddb5", "#7bccc4", "#4eb3d3", "#2b8cbe", "#08589e"], "GnBu_09": ["#f7fcf0", "#e0f3db", "#ccebc5", "#a8ddb5", "#7bccc4", "#4eb3d3", "#2b8cbe", "#0868ac", "#084081"], "YlOrRd_09": ["#ffffcc", "#ffeda0", "#fed976", "#feb24c", "#fd8d3c", "#fc4e2a", "#e31a1c", "#bd0026", "#800026"], "Purples_03": ["#efedf5", "#bcbddc", "#756bb1"], "RdYlBu_04": ["#d7191c", "#fdae61", "#abd9e9", "#2c7bb6"], "PRGn_09": ["#762a83", "#9970ab", "#c2a5cf", "#e7d4e8", "#f7f7f7", "#d9f0d3", "#a6dba0", "#5aae61", "#1b7837"], "PRGn_08": ["#762a83", "#9970ab", "#c2a5cf", "#e7d4e8", "#d9f0d3", "#a6dba0", "#5aae61", "#1b7837"], "PRGn_07": ["#762a83", "#af8dc3", "#e7d4e8", "#f7f7f7", "#d9f0d3", "#7fbf7b", "#1b7837"], "PRGn_06": ["#762a83", "#af8dc3", "#e7d4e8", "#d9f0d3", "#7fbf7b", "#1b7837"], "PRGn_05": ["#7b3294", "#c2a5cf", "#f7f7f7", "#a6dba0", "#008837"], "PRGn_04": ["#7b3294", "#c2a5cf", "#a6dba0", "#008837"], "PRGn_03": ["#af8dc3", "#f7f7f7", "#7fbf7b"], "RdYlBu_06": ["#d73027", "#fc8d59", "#fee090", "#e0f3f8", "#91bfdb", "#4575b4"], "RdYlGn_10": ["#a50026", "#d73027", "#f46d43", "#fdae61", "#fee08b", "#d9ef8b", "#a6d96a", "#66bd63", "#1a9850", "#006837"], "YlGn_08": ["#ffffe5", "#f7fcb9", "#d9f0a3", "#addd8e", "#78c679", "#41ab5d", "#238443", "#005a32"], "YlGn_09": ["#ffffe5", "#f7fcb9", "#d9f0a3", "#addd8e", "#78c679", "#41ab5d", "#238443", "#006837", "#004529"], "RdYlBu_07": ["#d73027", "#fc8d59", "#fee090", "#ffffbf", "#e0f3f8", "#91bfdb", "#4575b4"], "PiYG_10": ["#8e0152", "#c51b7d", "#de77ae", "#f1b6da", "#fde0ef", "#e6f5d0", "#b8e186", "#7fbc41", "#4d9221", "#276419"], "PiYG_11": ["#8e0152", "#c51b7d", "#de77ae", "#f1b6da", "#fde0ef", "#f7f7f7", "#e6f5d0", "#b8e186", "#7fbc41", "#4d9221", "#276419"], "YlGn_03": ["#f7fcb9", "#addd8e", "#31a354"], "YlGn_04": ["#ffffcc", "#c2e699", "#78c679", "#238443"], "YlGn_05": ["#ffffcc", "#c2e699", "#78c679", "#31a354", "#006837"], "YlGn_06": ["#ffffcc", "#d9f0a3", "#addd8e", "#78c679", "#31a354", "#006837"], "YlGn_07": ["#ffffcc", "#d9f0a3", "#addd8e", "#78c679", "#41ab5d", "#238443", "#005a32"], "Dark2_05": ["#1b9e77", "#d95f02", "#7570b3", "#e7298a", "#66a61e"], "Dark2_04": ["#1b9e77", "#d95f02", "#7570b3", "#e7298a"], "Dark2_07": ["#1b9e77", "#d95f02", "#7570b3", "#e7298a", "#66a61e", "#e6ab02", "#a6761d"], "Pastel2_03": ["#b3e2cd", "#fdcdac", "#cbd5e8"], "Pastel2_04": ["#b3e2cd", "#fdcdac", "#cbd5e8", "#f4cae4"], "Pastel2_05": ["#b3e2cd", "#fdcdac", "#cbd5e8", "#f4cae4", "#e6f5c9"], "Pastel2_06": ["#b3e2cd", "#fdcdac", "#cbd5e8", "#f4cae4", "#e6f5c9", "#fff2ae"], "Pastel2_07": ["#b3e2cd", "#fdcdac", "#cbd5e8", "#f4cae4", "#e6f5c9", "#fff2ae", "#f1e2cc"], "Pastel2_08": ["#b3e2cd", "#fdcdac", "#cbd5e8", "#f4cae4", "#e6f5c9", "#fff2ae", "#f1e2cc", "#cccccc"], "RdYlBu_03": ["#fc8d59", "#ffffbf", "#91bfdb"], "Dark2_08": ["#1b9e77", "#d95f02", "#7570b3", "#e7298a", "#66a61e", "#e6ab02", "#a6761d", "#666666"], "RdYlGn_03": ["#fc8d59", "#ffffbf", "#91cf60"], "PRGn_11": ["#40004b", "#762a83", "#9970ab", "#c2a5cf", "#e7d4e8", "#f7f7f7", "#d9f0d3", "#a6dba0", "#5aae61", "#1b7837", "#00441b"], "Greens_08": ["#f7fcf5", "#e5f5e0", "#c7e9c0", "#a1d99b", "#74c476", "#41ab5d", "#238b45", "#005a32"], "Greens_09": ["#f7fcf5", "#e5f5e0", "#c7e9c0", "#a1d99b", "#74c476", "#41ab5d", "#238b45", "#006d2c", "#00441b"], "Greens_06": ["#edf8e9", "#c7e9c0", "#a1d99b", "#74c476", "#31a354", "#006d2c"], "Greens_07": ["#edf8e9", "#c7e9c0", "#a1d99b", "#74c476", "#41ab5d", "#238b45", "#005a32"], "Greens_04": ["#edf8e9", "#bae4b3", "#74c476", "#238b45"], "Greens_05": ["#edf8e9", "#bae4b3", "#74c476", "#31a354", "#006d2c"], "PRGn_10": ["#40004b", "#762a83", "#9970ab", "#c2a5cf", "#e7d4e8", "#d9f0d3", "#a6dba0", "#5aae61", "#1b7837", "#00441b"], "Greens_03": ["#e5f5e0", "#a1d99b", "#31a354"]} \ No newline at end of file diff --git a/branca/colormap.py b/branca/colormap.py index bfa60a7..b669596 100644 --- a/branca/colormap.py +++ b/branca/colormap.py @@ -9,223 +9,25 @@ Utility module for dealing with colormaps. from __future__ import absolute_import import math +import json +import pkg_resources from jinja2 import Template from branca.six import text_type, binary_type from branca.element import MacroElement, Figure, JavascriptLink from branca.utilities import legend_scaler -_cnames = { - 'aliceblue': '#F0F8FF', - 'antiquewhite': '#FAEBD7', - 'aqua': '#00FFFF', - 'aquamarine': '#7FFFD4', - 'azure': '#F0FFFF', - 'beige': '#F5F5DC', - 'bisque': '#FFE4C4', - 'black': '#000000', - 'blanchedalmond': '#FFEBCD', - 'blue': '#0000FF', - 'blueviolet': '#8A2BE2', - 'brown': '#A52A2A', - 'burlywood': '#DEB887', - 'cadetblue': '#5F9EA0', - 'chartreuse': '#7FFF00', - 'chocolate': '#D2691E', - 'coral': '#FF7F50', - 'cornflowerblue': '#6495ED', - 'cornsilk': '#FFF8DC', - 'crimson': '#DC143C', - 'cyan': '#00FFFF', - 'darkblue': '#00008B', - 'darkcyan': '#008B8B', - 'darkgoldenrod': '#B8860B', - 'darkgray': '#A9A9A9', - 'darkgreen': '#006400', - 'darkkhaki': '#BDB76B', - 'darkmagenta': '#8B008B', - 'darkolivegreen': '#556B2F', - 'darkorange': '#FF8C00', - 'darkorchid': '#9932CC', - 'darkred': '#8B0000', - 'darksage': '#598556', - 'darksalmon': '#E9967A', - 'darkseagreen': '#8FBC8F', - 'darkslateblue': '#483D8B', - 'darkslategray': '#2F4F4F', - 'darkturquoise': '#00CED1', - 'darkviolet': '#9400D3', - 'deeppink': '#FF1493', - 'deepskyblue': '#00BFFF', - 'dimgray': '#696969', - 'dodgerblue': '#1E90FF', - 'firebrick': '#B22222', - 'floralwhite': '#FFFAF0', - 'forestgreen': '#228B22', - 'fuchsia': '#FF00FF', - 'gainsboro': '#DCDCDC', - 'ghostwhite': '#F8F8FF', - 'gold': '#FFD700', - 'goldenrod': '#DAA520', - 'gray': '#808080', - 'green': '#008000', - 'greenyellow': '#ADFF2F', - 'honeydew': '#F0FFF0', - 'hotpink': '#FF69B4', - 'indianred': '#CD5C5C', - 'indigo': '#4B0082', - 'ivory': '#FFFFF0', - 'khaki': '#F0E68C', - 'lavender': '#E6E6FA', - 'lavenderblush': '#FFF0F5', - 'lawngreen': '#7CFC00', - 'lemonchiffon': '#FFFACD', - 'lightblue': '#ADD8E6', - 'lightcoral': '#F08080', - 'lightcyan': '#E0FFFF', - 'lightgoldenrodyellow': '#FAFAD2', - 'lightgreen': '#90EE90', - 'lightgray': '#D3D3D3', - 'lightpink': '#FFB6C1', - 'lightsage': '#BCECAC', - 'lightsalmon': '#FFA07A', - 'lightseagreen': '#20B2AA', - 'lightskyblue': '#87CEFA', - 'lightslategray': '#778899', - 'lightsteelblue': '#B0C4DE', - 'lightyellow': '#FFFFE0', - 'lime': '#00FF00', - 'limegreen': '#32CD32', - 'linen': '#FAF0E6', - 'magenta': '#FF00FF', - 'maroon': '#800000', - 'mediumaquamarine': '#66CDAA', - 'mediumblue': '#0000CD', - 'mediumorchid': '#BA55D3', - 'mediumpurple': '#9370DB', - 'mediumseagreen': '#3CB371', - 'mediumslateblue': '#7B68EE', - 'mediumspringgreen': '#00FA9A', - 'mediumturquoise': '#48D1CC', - 'mediumvioletred': '#C71585', - 'midnightblue': '#191970', - 'mintcream': '#F5FFFA', - 'mistyrose': '#FFE4E1', - 'moccasin': '#FFE4B5', - 'navajowhite': '#FFDEAD', - 'navy': '#000080', - 'oldlace': '#FDF5E6', - 'olive': '#808000', - 'olivedrab': '#6B8E23', - 'orange': '#FFA500', - 'orangered': '#FF4500', - 'orchid': '#DA70D6', - 'palegoldenrod': '#EEE8AA', - 'palegreen': '#98FB98', - 'paleturquoise': '#AFEEEE', - 'palevioletred': '#DB7093', - 'papayawhip': '#FFEFD5', - 'peachpuff': '#FFDAB9', - 'peru': '#CD853F', - 'pink': '#FFC0CB', - 'plum': '#DDA0DD', - 'powderblue': '#B0E0E6', - 'purple': '#800080', - 'red': '#FF0000', - 'rosybrown': '#BC8F8F', - 'royalblue': '#4169E1', - 'saddlebrown': '#8B4513', - 'salmon': '#FA8072', - 'sage': '#87AE73', - 'sandybrown': '#FAA460', - 'seagreen': '#2E8B57', - 'seashell': '#FFF5EE', - 'sienna': '#A0522D', - 'silver': '#C0C0C0', - 'skyblue': '#87CEEB', - 'slateblue': '#6A5ACD', - 'slategray': '#708090', - 'snow': '#FFFAFA', - 'springgreen': '#00FF7F', - 'steelblue': '#4682B4', - 'tan': '#D2B48C', - 'teal': '#008080', - 'thistle': '#D8BFD8', - 'tomato': '#FF6347', - 'turquoise': '#40E0D0', - 'violet': '#EE82EE', - 'wheat': '#F5DEB3', - 'white': '#FFFFFF', - 'whitesmoke': '#F5F5F5', - 'yellow': '#FFFF00', - 'yellowgreen': '#9ACD32', - 'r': '#FF0000', - 'g': '#008000', - 'b': '#0000FF', - 'c': '#00FFFF', - 'm': '#FF00FF', - 'y': '#FFFF00', - 'w': '#FFFFFF', - 'k': '#000000', - } - -_schemes = {'BuGn': ['#EDF8FB', '#CCECE6', '#CCECE6', - '#66C2A4', '#41AE76', '#238B45', '#005824'], - 'BuPu': ['#EDF8FB', '#BFD3E6', '#9EBCDA', - '#8C96C6', '#8C6BB1', '#88419D', '#6E016B'], - 'GnBu': ['#F0F9E8', '#CCEBC5', '#A8DDB5', - '#7BCCC4', '#4EB3D3', '#2B8CBE', '#08589E'], - 'OrRd': ['#FEF0D9', '#FDD49E', '#FDBB84', - '#FC8D59', '#EF6548', '#D7301F', '#990000'], - 'PuBu': ['#F1EEF6', '#D0D1E6', '#A6BDDB', - '#74A9CF', '#3690C0', '#0570B0', '#034E7B'], - 'PuBuGn': ['#F6EFF7', '#D0D1E6', '#A6BDDB', - '#67A9CF', '#3690C0', '#02818A', '#016450'], - 'PuRd': ['#F1EEF6', '#D4B9DA', '#C994C7', - '#DF65B0', '#E7298A', '#CE1256', '#91003F'], - 'RdPu': ['#FEEBE2', '#FCC5C0', '#FA9FB5', - '#F768A1', '#DD3497', '#AE017E', '#7A0177'], - 'YlGn': ['#FFFFCC', '#D9F0A3', '#ADDD8E', - '#78C679', '#41AB5D', '#238443', '#005A32'], - 'YlGnBu': ['#FFFFCC', '#C7E9B4', '#7FCDBB', - '#41B6C4', '#1D91C0', '#225EA8', '#0C2C84'], - 'YlOrBr': ['#FFFFD4', '#FEE391', '#FEC44F', - '#FE9929', '#EC7014', '#CC4C02', '#8C2D04'], - 'YlOrRd': ['#FFFFB2', '#FED976', '#FEB24C', - '#FD8D3C', '#FC4E2A', '#E31A1C', '#B10026'], - 'BrBg': ['#8c510a', '#d8b365', '#f6e8c3', - '#c7eae5', '#5ab4ac', '#01665e'], - 'PiYG': ['#c51b7d', '#e9a3c9', '#fde0ef', - '#e6f5d0', '#a1d76a', '#4d9221'], - 'PRGn': ['#762a83', '#af8dc3', '#e7d4e8', - '#d9f0d3', '#7fbf7b', '#1b7837'], - 'PuOr': ['#b35806', '#f1a340', '#fee0b6', - '#d8daeb', '#998ec3', '#542788'], - 'RdBu': ['#b2182b', '#ef8a62', '#fddbc7', - '#d1e5f0', '#67a9cf', '#2166ac'], - 'RdGy': ['#b2182b', '#ef8a62', '#fddbc7', - '#e0e0e0', '#999999', '#4d4d4d'], - 'RdYlBu': ['#d73027', '#fc8d59', '#fee090', - '#e0f3f8', '#91bfdb', '#4575b4'], - 'RdYlGn': ['#d73027', '#fc8d59', '#fee08b', - '#d9ef8b', '#91cf60', '#1a9850'], - 'Spectral': ['#d53e4f', '#fc8d59', '#fee08b', - '#e6f598', '#99d594', '#3288bd'], - 'Accent': ['#7fc97f', '#beaed4', '#fdc086', - '#ffff99', '#386cb0', '#f0027f'], - 'Dark2': ['#1b9e77', '#d95f02', '#7570b3', - '#e7298a', '#66a61e', '#e6ab02'], - 'Paired': ['#a6cee3', '#1f78b4', '#b2df8a', - '#33a02c', '#fb9a99', '#e31a1c'], - 'Pastel1': ['#fbb4ae', '#b3cde3', '#ccebc5', - '#decbe4', '#fed9a6', '#ffffcc'], - 'Pastel2': ['#b3e2cd', '#fdcdac', '#cbd5e8', - '#f4cae4', '#e6f5c9', '#fff2ae'], - 'Set1': ['#e41a1c', '#377eb8', '#4daf4a', - '#984ea3', '#ff7f00', '#ffff33'], - 'Set2': ['#66c2a5', '#fc8d62', '#8da0cb', - '#e78ac3', '#a6d854', '#ffd92f'], - 'Set3': ['#8dd3c7', '#ffffb3', '#bebada', - '#fb8072', '#80b1d3', '#fdb462']} + +resource_package = __name__ +resource_path_schemes = '/_schemes.json' +resource_path_cnames = '/_cnames.json' + +cnames_string = pkg_resources.resource_stream( + resource_package, resource_path_cnames).read().decode() +_cnames = json.loads(cnames_string) + +schemes_string = pkg_resources.resource_stream( + resource_package, resource_path_schemes).read().decode() +_schemes = json.loads(schemes_string) def _parse_color(x): diff --git a/branca/scheme_base_codes.json b/branca/scheme_base_codes.json new file mode 100644 index 0000000..4e02c71 --- /dev/null +++ b/branca/scheme_base_codes.json @@ -0,0 +1,1 @@ +{"codes": ["Spectral", "RdYlGn", "PuBu", "Accent", "OrRd", "Set1", "Set2", "Set3", "BuPu", "Dark2", "RdBu", "Oranges", "BuGn", "PiYG", "YlOrBr", "YlGn", "Pastel2", "RdPu", "Greens", "PRGn", "YlGnBu", "RdYlBu", "Paired", "BrBG", "Purples", "Reds", "Pastel1", "GnBu", "Greys", "RdGy", "YlOrRd", "PuOr", "PuRd", "Blues", "PuBuGn"]} \ No newline at end of file diff --git a/branca/scheme_info.json b/branca/scheme_info.json new file mode 100644 index 0000000..4cd15b1 --- /dev/null +++ b/branca/scheme_info.json @@ -0,0 +1,1 @@ +{"Spectral": "Diverging", "RdYlGn": "Diverging", "Set2": "Qualitative", "Accent": "Qualitative", "OrRd": "Sequential", "Set1": "Qualitative", "PuBu": "Sequential", "Set3": "Qualitative", "BuPu": "Sequential", "Dark2": "Qualitative", "RdBu": "Diverging", "BuGn": "Sequential", "PiYG": "Diverging", "YlOrBr": "Sequential", "YlGn": "Sequential", "RdPu": "Sequential", "PRGn": "Diverging", "YlGnBu": "Sequential", "RdYlBu": "Diverging", "Paired": "Qualitative", "Pastel2": "Qualitative", "Pastel1": "Qualitative", "GnBu": "Sequential", "RdGy": "Diverging", "YlOrRd": "Sequential", "PuOr": "Diverging", "PuRd": "Sequential", "BrBg": "Diverging", "PuBuGn": "Sequential"} \ No newline at end of file diff --git a/branca/utilities.py b/branca/utilities.py index 410c219..afb995a 100644 --- a/branca/utilities.py +++ b/branca/utilities.py @@ -15,6 +15,7 @@ import math import zlib import struct import json +import pkg_resources import base64 from jinja2 import Environment, PackageLoader @@ -108,124 +109,71 @@ def color_brewer(color_code, n=6): """ maximum_n = 253 + minimum_n = 3 + + # Raise an error if the n requested is greater than the maximum. + if n > maximum_n: + raise ValueError("The maximum number of colors in a" + " ColorBrewer sequential color series is 253") + if n < minimum_n: + raise ValueError("The minimum number of colors in a" + " ColorBrewer sequential color series is 3") + if color_code[-2:] == '_r': - core_color_code = color_code[:-2] + base_code = color_code[:-2] + core_color_code = base_code + '_' + str(n).zfill(2) color_reverse = True else: - core_color_code = color_code + base_code = color_code + core_color_code = base_code + '_' + str(n).zfill(2) color_reverse = False - scheme_info = {'BuGn': 'Sequential', - 'BuPu': 'Sequential', - 'GnBu': 'Sequential', - 'OrRd': 'Sequential', - 'PuBu': 'Sequential', - 'PuBuGn': 'Sequential', - 'PuRd': 'Sequential', - 'RdPu': 'Sequential', - 'YlGn': 'Sequential', - 'YlGnBu': 'Sequential', - 'YlOrBr': 'Sequential', - 'YlOrRd': 'Sequential', - 'BrBg': 'Diverging', - 'PiYG': 'Diverging', - 'PRGn': 'Diverging', - 'PuOr': 'Diverging', - 'RdBu': 'Diverging', - 'RdGy': 'Diverging', - 'RdYlBu': 'Diverging', - 'RdYlGn': 'Diverging', - 'Spectral': 'Diverging', - 'Accent': 'Qualitative', - 'Dark2': 'Qualitative', - 'Paired': 'Qualitative', - 'Pastel1': 'Qualitative', - 'Pastel2': 'Qualitative', - 'Set1': 'Qualitative', - 'Set2': 'Qualitative', - 'Set3': 'Qualitative', - } - - schemes = {'BuGn': ['#EDF8FB', '#CCECE6', '#CCECE6', - '#66C2A4', '#41AE76', '#238B45', '#005824'], - 'BuPu': ['#EDF8FB', '#BFD3E6', '#9EBCDA', - '#8C96C6', '#8C6BB1', '#88419D', '#6E016B'], - 'GnBu': ['#F0F9E8', '#CCEBC5', '#A8DDB5', - '#7BCCC4', '#4EB3D3', '#2B8CBE', '#08589E'], - 'OrRd': ['#FEF0D9', '#FDD49E', '#FDBB84', - '#FC8D59', '#EF6548', '#D7301F', '#990000'], - 'PuBu': ['#F1EEF6', '#D0D1E6', '#A6BDDB', - '#74A9CF', '#3690C0', '#0570B0', '#034E7B'], - 'PuBuGn': ['#F6EFF7', '#D0D1E6', '#A6BDDB', - '#67A9CF', '#3690C0', '#02818A', '#016450'], - 'PuRd': ['#F1EEF6', '#D4B9DA', '#C994C7', - '#DF65B0', '#E7298A', '#CE1256', '#91003F'], - 'RdPu': ['#FEEBE2', '#FCC5C0', '#FA9FB5', - '#F768A1', '#DD3497', '#AE017E', '#7A0177'], - 'YlGn': ['#FFFFCC', '#D9F0A3', '#ADDD8E', - '#78C679', '#41AB5D', '#238443', '#005A32'], - 'YlGnBu': ['#FFFFCC', '#C7E9B4', '#7FCDBB', - '#41B6C4', '#1D91C0', '#225EA8', '#0C2C84'], - 'YlOrBr': ['#FFFFD4', '#FEE391', '#FEC44F', - '#FE9929', '#EC7014', '#CC4C02', '#8C2D04'], - 'YlOrRd': ['#FFFFB2', '#FED976', '#FEB24C', - '#FD8D3C', '#FC4E2A', '#E31A1C', '#B10026'], - 'BrBg': ['#8c510a', '#d8b365', '#f6e8c3', - '#c7eae5', '#5ab4ac', '#01665e'], - 'PiYG': ['#c51b7d', '#e9a3c9', '#fde0ef', - '#e6f5d0', '#a1d76a', '#4d9221'], - 'PRGn': ['#762a83', '#af8dc3', '#e7d4e8', - '#d9f0d3', '#7fbf7b', '#1b7837'], - 'PuOr': ['#b35806', '#f1a340', '#fee0b6', - '#d8daeb', '#998ec3', '#542788'], - 'RdBu': ['#b2182b', '#ef8a62', '#fddbc7', - '#d1e5f0', '#67a9cf', '#2166ac'], - 'RdGy': ['#b2182b', '#ef8a62', '#fddbc7', - '#e0e0e0', '#999999', '#4d4d4d'], - 'RdYlBu': ['#d73027', '#fc8d59', '#fee090', - '#e0f3f8', '#91bfdb', '#4575b4'], - 'RdYlGn': ['#d73027', '#fc8d59', '#fee08b', - '#d9ef8b', '#91cf60', '#1a9850'], - 'Spectral': ['#d53e4f', '#fc8d59', '#fee08b', - '#e6f598', '#99d594', '#3288bd'], - 'Accent': ['#7fc97f', '#beaed4', '#fdc086', - '#ffff99', '#386cb0', '#f0027f'], - 'Dark2': ['#1b9e77', '#d95f02', '#7570b3', - '#e7298a', '#66a61e', '#e6ab02'], - 'Paired': ['#a6cee3', '#1f78b4', '#b2df8a', - '#33a02c', '#fb9a99', '#e31a1c'], - 'Pastel1': ['#fbb4ae', '#b3cde3', '#ccebc5', - '#decbe4', '#fed9a6', '#ffffcc'], - 'Pastel2': ['#b3e2cd', '#fdcdac', '#cbd5e8', - '#f4cae4', '#e6f5c9', '#fff2ae'], - 'Set1': ['#e41a1c', '#377eb8', '#4daf4a', - '#984ea3', '#ff7f00', '#ffff33'], - 'Set2': ['#66c2a5', '#fc8d62', '#8da0cb', - '#e78ac3', '#a6d854', '#ffd92f'], - 'Set3': ['#8dd3c7', '#ffffb3', '#bebada', - '#fb8072', '#80b1d3', '#fdb462'], - } + resource_package = __name__ + resource_path_schemes = '/_schemes.json' + resource_path_scheme_info = '/_cnames.json' + resource_path_scheme_base_codes = '/scheme_base_codes.json' - # Raise an error if the n requested is greater than the maximum. - if n > maximum_n: - raise ValueError("The maximum number of colors in a" - " ColorBrewer sequential color series is 253") + schemes_string = pkg_resources.resource_stream( + resource_package, resource_path_schemes).read().decode() + schemes = json.loads(schemes_string) + + scheme_info_string = pkg_resources.resource_stream( + resource_package, resource_path_scheme_info).read().decode() + scheme_info = json.loads(scheme_info_string) - # Only if n is greater than six do we interpolate values. - if n > 6: - if core_color_code not in schemes: - color_scheme = None + core_schemes_string = pkg_resources.resource_stream( + resource_package, resource_path_scheme_base_codes).read().decode() + core_schemes = json.loads(core_schemes_string)['codes'] + + if base_code not in core_schemes: + raise ValueError(base_code + " is not a valid ColorBrewer code") + + try: + schemes[core_color_code] + explicit_scheme = True + except KeyError: + explicit_scheme = False + + # Only if n is greater than the scheme length do we interpolate values. + if not explicit_scheme: + # Check to make sure that it is not a qualitative scheme. + if scheme_info[base_code] == 'Qualitative': + matching_quals = [] + for key in schemes: + if base_code + '_' in key: + matching_quals.append(int(key.split('_')[1])) + + raise ValueError("Expanded color support is not available" + " for Qualitative schemes; restrict the" + " number of colors for the " + base_code + + " code to between " + str(min(matching_quals)) + + " and " + str(max(matching_quals)) + ) else: - # Check to make sure that it is not a qualitative scheme. - if scheme_info[core_color_code] == 'Qualitative': - raise ValueError("Expanded color support is not available" - " for Qualitative schemes, restrict" - " number of colors to 6") + if not color_reverse: + color_scheme = linear_gradient(schemes.get(core_color_code), n) else: - if not color_reverse: - color_scheme = linear_gradient(schemes.get(core_color_code), n) - else: - color_scheme = linear_gradient(schemes.get(core_color_code)[::-1], n) + color_scheme = linear_gradient(schemes.get(core_color_code)[::-1], n) else: if not color_reverse: color_scheme = schemes.get(core_color_code, None)
Question about color support. For Qualitative schemes, restrict number of colors to 6? Is the any solution to support more color? as the Paired has 12 kinds of colors.
python-visualization/branca
diff --git a/tests/test_colormap.py b/tests/test_colormap.py index 73f3baa..51cfbb3 100644 --- a/tests/test_colormap.py +++ b/tests/test_colormap.py @@ -23,7 +23,7 @@ def test_simple_linear(): def test_linear_to_step(): some_list = [30.6, 50, 51, 52, 53, 54, 55, 60, 70, 100] - lc = cm.linear.YlOrRd + lc = cm.linear.YlOrRd_06 lc.to_step(n=12) lc.to_step(index=[0, 2, 4, 6, 8, 10]) lc.to_step(data=some_list, n=12) @@ -44,14 +44,14 @@ def test_step_to_linear(): def test_linear_object(): - cm.linear.OrRd._repr_html_() - cm.linear.PuBu.to_step(12) - cm.linear.YlGn.scale(3, 12) + cm.linear.OrRd_06._repr_html_() + cm.linear.PuBu_06.to_step(12) + cm.linear.YlGn_06.scale(3, 12) cm.linear._repr_html_() def test_step_object(): - cm.step.OrRd._repr_html_() - cm.step.PuBu.to_linear() - cm.step.YlGn.scale(3, 12) + cm.step.OrRd_06._repr_html_() + cm.step.PuBu_06.to_linear() + cm.step.YlGn_06.scale(3, 12) cm.step._repr_html_() diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 5d96edb..d19932d 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -4,9 +4,9 @@ import branca.utilities as ut def test_color_brewer_base(): scheme = ut.color_brewer('YlGnBu', 9) assert scheme == [ - '#ffffcc', '#d5eeba', '#a3dbb7', - '#6fc7bd', '#41b6c4', '#269ac1', - '#1f77b4', '#1c519f', '#0c2c84' + '#ffffd9', '#edf8b1', '#c7e9b4', + '#7fcdbb', '#41b6c4', '#1d91c0', + '#225ea8', '#253494', '#081d58' ]
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 0 }, "num_modified_files": 2 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 -e git+https://github.com/python-visualization/branca.git@bd9151e172f04c9347acc23ad7fefa6b7502bd0a#egg=branca certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 Jinja2==3.0.3 MarkupSafe==2.0.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: branca channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jinja2==3.0.3 - markupsafe==2.0.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/branca
[ "tests/test_colormap.py::test_linear_to_step", "tests/test_colormap.py::test_linear_object", "tests/test_colormap.py::test_step_object", "tests/test_utilities.py::test_color_brewer_base" ]
[]
[ "tests/test_colormap.py::test_simple_step", "tests/test_colormap.py::test_simple_linear", "tests/test_colormap.py::test_step_to_linear", "tests/test_utilities.py::test_color_brewer_reverse" ]
[]
MIT License
1,765
[ "branca/scheme_base_codes.json", "branca/_schemes.json", "branca/colormap.py", "branca/_cnames.json", "branca/scheme_info.json", "branca/utilities.py" ]
[ "branca/scheme_base_codes.json", "branca/_schemes.json", "branca/colormap.py", "branca/_cnames.json", "branca/scheme_info.json", "branca/utilities.py" ]
networkx__networkx-2713
9f6c9cd6a561d41192bc29f14fd9bc16bcaad919
2017-10-15 17:09:15
93b4b9227aa8a7ac4cbd946cf3dae3b168e17b45
diff --git a/networkx/algorithms/community/quality.py b/networkx/algorithms/community/quality.py index 7de690af7..e04ff260d 100644 --- a/networkx/algorithms/community/quality.py +++ b/networkx/algorithms/community/quality.py @@ -114,7 +114,10 @@ def inter_community_edges(G, partition): # for block in partition)) # return sum(1 for u, v in G.edges() if aff[u] != aff[v]) # - return nx.quotient_graph(G, partition, create_using=nx.MultiGraph()).size() + if G.is_directed(): + return nx.quotient_graph(G, partition, create_using=nx.MultiDiGraph()).size() + else: + return nx.quotient_graph(G, partition, create_using=nx.MultiGraph()).size() def inter_community_non_edges(G, partition): diff --git a/networkx/algorithms/simple_paths.py b/networkx/algorithms/simple_paths.py index 763fa24d7..a2ef79671 100644 --- a/networkx/algorithms/simple_paths.py +++ b/networkx/algorithms/simple_paths.py @@ -333,7 +333,6 @@ def shortest_simple_paths(G, source, target, weight=None): for path in listA: if path[:i] == root: ignore_edges.add((path[i - 1], path[i])) - ignore_nodes.add(root[-1]) try: length, spur = shortest_path_func(G, root[-1], target, ignore_nodes=ignore_nodes, @@ -343,6 +342,7 @@ def shortest_simple_paths(G, source, target, weight=None): listB.push(root_length + length, path) except nx.NetworkXNoPath: pass + ignore_nodes.add(root[-1]) if listB: path = listB.pop() @@ -447,6 +447,8 @@ def _bidirectional_pred_succ(G, source, target, ignore_nodes=None, ignore_edges= succ is a dictionary of successors from w to the target. """ # does BFS from both source and target and meets in the middle + if ignore_nodes and (source in ignore_nodes or target in ignore_nodes): + raise nx.NetworkXNoPath("No path between %s and %s." % (source, target)) if target == source: return ({target: None}, {source: None}, source) @@ -605,6 +607,8 @@ def _bidirectional_dijkstra(G, source, target, weight='weight', shortest_path shortest_path_length """ + if ignore_nodes and (source in ignore_nodes or target in ignore_nodes): + raise nx.NetworkXNoPath("No path between %s and %s." % (source, target)) if source == target: return (0, [source])
inter_community_non_edges ignore directionality Hi, I think the function: nx.algorithms.community.quality.inter_community_non_edges() does not work properly for directed graph. It always return the non-edge of a undirected graph, basically halving the number of edges. This mean that the performance function (nx.algorithms.community.performance) will never by higher than 50% for a directed graph. I'm using version '2.0.dev_20170801111157', python 3.5.1 Best, Nicolas
networkx/networkx
diff --git a/networkx/algorithms/community/tests/test_quality.py b/networkx/algorithms/community/tests/test_quality.py index 0c5b94c5a..79ce7e7f6 100644 --- a/networkx/algorithms/community/tests/test_quality.py +++ b/networkx/algorithms/community/tests/test_quality.py @@ -12,6 +12,7 @@ module. """ from __future__ import division +from nose.tools import assert_equal from nose.tools import assert_almost_equal import networkx as nx @@ -19,6 +20,7 @@ from networkx import barbell_graph from networkx.algorithms.community import coverage from networkx.algorithms.community import modularity from networkx.algorithms.community import performance +from networkx.algorithms.community.quality import inter_community_edges class TestPerformance(object): @@ -61,3 +63,17 @@ def test_modularity(): assert_almost_equal(-16 / (14 ** 2), modularity(G, C)) C = [{0, 1, 2}, {3, 4, 5}] assert_almost_equal((35 * 2) / (14 ** 2), modularity(G, C)) + + +def test_inter_community_edges_with_digraphs(): + G = nx.complete_graph(2, create_using = nx.DiGraph()) + partition = [{0}, {1}] + assert_equal(inter_community_edges(G, partition), 2) + + G = nx.complete_graph(10, create_using = nx.DiGraph()) + partition = [{0}, {1, 2}, {3, 4, 5}, {6, 7, 8, 9}] + assert_equal(inter_community_edges(G, partition), 70) + + G = nx.cycle_graph(4, create_using = nx.DiGraph()) + partition = [{0, 1}, {2, 3}] + assert_equal(inter_community_edges(G, partition), 2) diff --git a/networkx/algorithms/tests/test_simple_paths.py b/networkx/algorithms/tests/test_simple_paths.py index e29255c32..4c701e487 100644 --- a/networkx/algorithms/tests/test_simple_paths.py +++ b/networkx/algorithms/tests/test_simple_paths.py @@ -220,6 +220,40 @@ def test_directed_weighted_shortest_simple_path(): cost = this_cost +def test_weighted_shortest_simple_path_issue2427(): + G = nx.Graph() + G.add_edge('IN', 'OUT', weight = 2) + G.add_edge('IN', 'A', weight = 1) + G.add_edge('IN', 'B', weight = 2) + G.add_edge('B', 'OUT', weight = 2) + assert_equal(list(nx.shortest_simple_paths(G, 'IN', 'OUT', weight = "weight")), + [['IN', 'OUT'], ['IN', 'B', 'OUT']]) + G = nx.Graph() + G.add_edge('IN', 'OUT', weight = 10) + G.add_edge('IN', 'A', weight = 1) + G.add_edge('IN', 'B', weight = 1) + G.add_edge('B', 'OUT', weight = 1) + assert_equal(list(nx.shortest_simple_paths(G, 'IN', 'OUT', weight = "weight")), + [['IN', 'B', 'OUT'], ['IN', 'OUT']]) + + +def test_directed_weighted_shortest_simple_path_issue2427(): + G = nx.DiGraph() + G.add_edge('IN', 'OUT', weight = 2) + G.add_edge('IN', 'A', weight = 1) + G.add_edge('IN', 'B', weight = 2) + G.add_edge('B', 'OUT', weight = 2) + assert_equal(list(nx.shortest_simple_paths(G, 'IN', 'OUT', weight = "weight")), + [['IN', 'OUT'], ['IN', 'B', 'OUT']]) + G = nx.DiGraph() + G.add_edge('IN', 'OUT', weight = 10) + G.add_edge('IN', 'A', weight = 1) + G.add_edge('IN', 'B', weight = 1) + G.add_edge('B', 'OUT', weight = 1) + assert_equal(list(nx.shortest_simple_paths(G, 'IN', 'OUT', weight = "weight")), + [['IN', 'B', 'OUT'], ['IN', 'OUT']]) + + def test_weight_name(): G = nx.cycle_graph(7) nx.set_edge_attributes(G, 1, 'weight') @@ -303,6 +337,38 @@ def test_bidirectional_shortest_path_restricted_directed_cycle(): ) +def test_bidirectional_shortest_path_ignore(): + G = nx.Graph() + nx.add_path(G, [1, 2]) + nx.add_path(G, [1, 3]) + nx.add_path(G, [1, 4]) + assert_raises( + nx.NetworkXNoPath, + _bidirectional_shortest_path, + G, + 1, 2, + ignore_nodes=[1], + ) + assert_raises( + nx.NetworkXNoPath, + _bidirectional_shortest_path, + G, + 1, 2, + ignore_nodes=[2], + ) + G = nx.Graph() + nx.add_path(G, [1, 3]) + nx.add_path(G, [1, 4]) + nx.add_path(G, [3, 2]) + assert_raises( + nx.NetworkXNoPath, + _bidirectional_shortest_path, + G, + 1, 2, + ignore_nodes=[1, 2], + ) + + def validate_path(G, s, t, soln_len, path): assert_equal(path[0], s) assert_equal(path[-1], t) @@ -362,3 +428,30 @@ def test_bidirectional_dijkstra_no_path(): nx.add_path(G, [1, 2, 3]) nx.add_path(G, [4, 5, 6]) path = _bidirectional_dijkstra(G, 1, 6) + + +def test_bidirectional_dijkstra_ignore(): + G = nx.Graph() + nx.add_path(G, [1, 2, 10]) + nx.add_path(G, [1, 3, 10]) + assert_raises( + nx.NetworkXNoPath, + _bidirectional_dijkstra, + G, + 1, 2, + ignore_nodes=[1], + ) + assert_raises( + nx.NetworkXNoPath, + _bidirectional_dijkstra, + G, + 1, 2, + ignore_nodes=[2], + ) + assert_raises( + nx.NetworkXNoPath, + _bidirectional_dijkstra, + G, + 1, 2, + ignore_nodes=[1, 2], + )
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 2 }
2.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y libgdal-dev graphviz" ], "python": "3.6", "reqs_path": [ "requirements/default.txt", "requirements/test.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 coverage==6.2 decorator==5.1.1 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 -e git+https://github.com/networkx/networkx.git@9f6c9cd6a561d41192bc29f14fd9bc16bcaad919#egg=networkx nose==1.3.7 nose-ignore-docstring==0.2 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 requests==2.27.1 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: networkx channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - decorator==5.1.1 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - nose==1.3.7 - nose-ignore-docstring==0.2 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - requests==2.27.1 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/networkx
[ "networkx/algorithms/community/tests/test_quality.py::test_inter_community_edges_with_digraphs", "networkx/algorithms/tests/test_simple_paths.py::test_weighted_shortest_simple_path_issue2427", "networkx/algorithms/tests/test_simple_paths.py::test_directed_weighted_shortest_simple_path_issue2427", "networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_shortest_path_ignore", "networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_dijkstra_ignore" ]
[]
[ "networkx/algorithms/community/tests/test_quality.py::TestPerformance::test_bad_partition", "networkx/algorithms/community/tests/test_quality.py::TestPerformance::test_good_partition", "networkx/algorithms/community/tests/test_quality.py::TestCoverage::test_bad_partition", "networkx/algorithms/community/tests/test_quality.py::TestCoverage::test_good_partition", "networkx/algorithms/community/tests/test_quality.py::test_modularity", "networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_empty_list", "networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_trivial_path", "networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_trivial_nonpath", "networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_simple_path", "networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_non_simple_path", "networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_cycle", "networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_missing_node", "networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_directed_path", "networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_directed_non_path", "networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_directed_cycle", "networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_multigraph", "networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_multidigraph", "networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths", "networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_cutoff", "networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_multigraph", "networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_multigraph_with_cutoff", "networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_directed", "networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_empty", "networkx/algorithms/tests/test_simple_paths.py::test_hamiltonian_path", "networkx/algorithms/tests/test_simple_paths.py::test_cutoff_zero", "networkx/algorithms/tests/test_simple_paths.py::test_source_missing", "networkx/algorithms/tests/test_simple_paths.py::test_target_missing", "networkx/algorithms/tests/test_simple_paths.py::test_shortest_simple_paths", "networkx/algorithms/tests/test_simple_paths.py::test_shortest_simple_paths_directed", "networkx/algorithms/tests/test_simple_paths.py::test_Greg_Bernstein", "networkx/algorithms/tests/test_simple_paths.py::test_weighted_shortest_simple_path", "networkx/algorithms/tests/test_simple_paths.py::test_directed_weighted_shortest_simple_path", "networkx/algorithms/tests/test_simple_paths.py::test_weight_name", "networkx/algorithms/tests/test_simple_paths.py::test_ssp_source_missing", "networkx/algorithms/tests/test_simple_paths.py::test_ssp_target_missing", "networkx/algorithms/tests/test_simple_paths.py::test_ssp_multigraph", "networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_shortest_path_restricted_cycle", "networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_shortest_path_restricted_wheel", "networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_shortest_path_restricted_directed_cycle", "networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_dijksta_restricted", "networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_dijkstra_no_path" ]
[]
BSD 3-Clause
1,766
[ "networkx/algorithms/simple_paths.py", "networkx/algorithms/community/quality.py" ]
[ "networkx/algorithms/simple_paths.py", "networkx/algorithms/community/quality.py" ]
smarkets__marge-bot-59
48d0576a978af8b71f4971926e345d7d1425a8c0
2017-10-15 18:24:53
48d0576a978af8b71f4971926e345d7d1425a8c0
diff --git a/marge/app.py b/marge/app.py index 576ade9..29b524e 100644 --- a/marge/app.py +++ b/marge/app.py @@ -178,7 +178,7 @@ def main(args=sys.argv[1:]): add_reviewers=options.add_reviewers, reapprove=options.impersonate_approvers, embargo=options.embargo, - ci_timeout=timedelta(seconds=options.ci_timeout), + ci_timeout=options.ci_timeout, ) ) diff --git a/marge/job.py b/marge/job.py index b2d69fe..ae2b251 100644 --- a/marge/job.py +++ b/marge/job.py @@ -63,7 +63,7 @@ class MergeJob(object): log.exception('Unexpected Git error') merge_request.comment('Something seems broken on my local git repo; check my logs!') raise - except Exception: + except Exception as _ex: log.exception('Unexpected Exception') merge_request.comment("I'm broken on the inside, please somebody fix me... :cry:") self.unassign_from_mr(merge_request) @@ -119,11 +119,6 @@ class MergeJob(object): log.info('Commit id to merge %r (into: %r)', actual_sha, target_sha) time.sleep(5) - if source_project.only_allow_merge_if_pipeline_succeeds: - self.wait_for_ci_to_pass(source_project.id, actual_sha) - log.info('CI passed!') - time.sleep(2) - sha_now = Commit.last_on_branch(source_project.id, merge_request.source_branch, api).id # Make sure no-one managed to race and push to the branch in the # meantime, because we're about to impersonate the approvers, and @@ -133,13 +128,18 @@ class MergeJob(object): # Re-approve the merge request, in case us pushing it has removed # approvals. Note that there is a bit of a race; effectively # approval can't be withdrawn after we've pushed (resetting - # approvals) and CI runs. + # approvals) if self.opts.reapprove: # approving is not idempotent, so we need to check first that there are no approvals, # otherwise we'll get a failure on trying to re-instate the previous approvals current_approvals = merge_request.fetch_approvals() if not current_approvals.sufficient: approvals.reapprove() + + if source_project.only_allow_merge_if_pipeline_succeeds: + self.wait_for_ci_to_pass(source_project.id, actual_sha) + log.info('CI passed!') + time.sleep(2) try: merge_request.accept(remove_branch=True, sha=actual_sha) except gitlab.NotAcceptable as err:
Re-approvals only applied after successful CI run There is a comment related to this within `marge/job.py`, including for context: ``` # Re-approve the merge request, in case us pushing it has removed # approvals. Note that there is a bit of a race; effectively # approval can't be withdrawn after we've pushed (resetting # approvals) and CI runs. ``` Occasionally CI may fail due to transient network issues that are unrelated to the change made. In this case, Marge will error out and not bother attempting to reapply any approvals. GitLab doesn't remove approvals on CI failure, so it doesn't quite make sense that this happens with Marge. This also applies to any potential exception that might occur between the force push and applying approvals, we need to restart marge and then manually approve again. I'm unaware as to whether there is a historical reason for why approvals are reapplied when they are, but could they no be applied immediately after the rebase?
smarkets/marge-bot
diff --git a/tests/test_app.py b/tests/test_app.py index d8a4705..ed8e64b 100644 --- a/tests/test_app.py +++ b/tests/test_app.py @@ -1,10 +1,141 @@ +import contextlib import datetime +import os +import re +import shlex +import unittest.mock as mock +from functools import wraps -from marge.app import time_interval +import pytest + +import marge.app as app +import marge.bot as bot +import marge.interval as interval +import marge.job as job + +import tests.gitlab_api_mock as gitlab_mock +from tests.test_user import INFO as user_info + + [email protected] +def env(**kwargs): + original = os.environ.copy() + + os.environ.clear() + for k, v in kwargs.items(): + os.environ[k] = v + + yield + + os.environ.clear() + for k, v in original.items(): + os.environ[k] = v + + [email protected] +def main(cmdline=''): + def api_mock(gitlab_url, auth_token): + assert gitlab_url == 'http://foo.com' + assert auth_token in ('NON-ADMIN-TOKEN', 'ADMIN-TOKEN') + api = gitlab_mock.Api(gitlab_url=gitlab_url, auth_token=auth_token, initial_state='initial') + user_info_for_token = dict(user_info, is_admin=auth_token == 'ADMIN-TOKEN') + api.add_user(user_info_for_token, is_current=True) + return api + + class DoNothingBot(bot.Bot): + instance = None + + def start(self): + assert self.__class__.instance is None + self.__class__.instance = self + + @property + def config(self): + return self._config + + with mock.patch('marge.bot.Bot', new=DoNothingBot), mock.patch('marge.gitlab.Api', new=api_mock): + app.main(args=shlex.split(cmdline)) + the_bot = DoNothingBot.instance + assert the_bot is not None + yield the_bot + + +def test_default_values(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main() as bot: + assert bot.user.info == user_info + assert bot.config.project_regexp == re.compile('.*') + assert bot.config.git_timeout == datetime.timedelta(seconds=120) + assert bot.config.merge_opts == job.MergeJobOptions.default() + +def test_embargo(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main('--embargo="Fri 1pm-Mon 7am"') as bot: + assert bot.config.merge_opts == job.MergeJobOptions.default( + embargo=interval.IntervalUnion.from_human('Fri 1pm-Mon 7am'), + ) + +def test_add_tested(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main('--add-tested') as bot: + assert bot.config.merge_opts != job.MergeJobOptions.default() + assert bot.config.merge_opts == job.MergeJobOptions.default(add_tested=True) + +def test_add_part_of(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main('--add-part-of') as bot: + assert bot.config.merge_opts != job.MergeJobOptions.default() + assert bot.config.merge_opts == job.MergeJobOptions.default(add_part_of=True) + +def test_add_reviewers(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with pytest.raises(AssertionError): + with main('--add-reviewers') as bot: + pass + + with env(MARGE_AUTH_TOKEN="ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main('--add-reviewers') as bot: + assert bot.config.merge_opts != job.MergeJobOptions.default() + assert bot.config.merge_opts == job.MergeJobOptions.default(add_reviewers=True) + + +def test_impersonate_approvers(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with pytest.raises(AssertionError): + with main('--impersonate-approvers') as bot: + pass + + with env(MARGE_AUTH_TOKEN="ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main('--impersonate-approvers') as bot: + assert bot.config.merge_opts != job.MergeJobOptions.default() + assert bot.config.merge_opts == job.MergeJobOptions.default(reapprove=True) + + +def test_project_regexp(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main("--project-regexp='foo.*bar'") as bot: + assert bot.config.project_regexp == re.compile('foo.*bar') + +def test_ci_timeout(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main("--ci-timeout 5m") as bot: + assert bot.config.merge_opts != job.MergeJobOptions.default() + assert bot.config.merge_opts == job.MergeJobOptions.default(ci_timeout=datetime.timedelta(seconds=5*60)) + +def test_deprecated_max_ci_time_in_minutes(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main("--max-ci-time-in-minutes=5") as bot: + assert bot.config.merge_opts != job.MergeJobOptions.default() + assert bot.config.merge_opts == job.MergeJobOptions.default(ci_timeout=datetime.timedelta(seconds=5*60)) + +def test_git_timeout(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main("--git-timeout '150 s'") as bot: + assert bot.config.git_timeout == datetime.timedelta(seconds=150) # FIXME: I'd reallly prefer this to be a doctest, but adding --doctest-modules # seems to seriously mess up the test run def test_time_interval(): _900s = datetime.timedelta(0, 900) - assert [time_interval(x) for x in ['15min', '15min', '.25h', '900s']] == [_900s] * 4 + assert [app.time_interval(x) for x in ['15min', '15m', '.25h', '900s']] == [_900s] * 4 diff --git a/tests/test_job.py b/tests/test_job.py index e9b0639..4e9031b 100644 --- a/tests/test_job.py +++ b/tests/test_job.py @@ -88,6 +88,11 @@ class MockLab(object): Ok(_commit(id=rewritten_sha, status='success')), from_state=['passed', 'merged'], ) + api.add_transition( + GET('/projects/1234/repository/branches/useless_new_feature'), + Ok({'commit': _commit(id=rewritten_sha, status='running')}), + from_state='pushed', + ) api.add_transition( GET('/projects/1234/repository/branches/useless_new_feature'), Ok({'commit': _commit(id=rewritten_sha, status='success')}), @@ -192,14 +197,14 @@ class TestRebaseAndAccept(object): api.add_transition( GET('/projects/1234/repository/branches/useless_new_feature'), Ok({'commit': _commit(id=new_branch_head_sha, status='success')}), - from_state='passed', to_state='passed_but_head_changed' + from_state='pushed', to_state='pushed_but_head_changed' ) with patch('marge.job.push_rebased_and_rewritten_version', side_effect=mocklab.push_rebased): with mocklab.expected_failure("Someone pushed to branch while we were trying to merge"): job = self.make_job(marge.job.MergeJobOptions.default(add_tested=True, add_reviewers=False)) job.execute() - assert api.state == 'passed_but_head_changed' + assert api.state == 'pushed_but_head_changed' assert api.notes == ["I couldn't merge this branch: Someone pushed to branch while we were trying to merge"] def test_succeeds_second_time_if_master_moved(self, time_sleep):
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 2 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==2.11.7 attrs==22.2.0 backports.zoneinfo==0.2.1 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 dateparser==1.1.3 dill==0.3.4 humanize==3.14.0 idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 isort==5.10.1 lazy-object-proxy==1.7.1 -e git+https://github.com/smarkets/marge-bot.git@48d0576a978af8b71f4971926e345d7d1425a8c0#egg=marge maya==0.6.1 mccabe==0.7.0 packaging==21.3 pendulum==2.1.2 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pylint==2.13.9 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 pytz-deprecation-shim==0.1.0.post0 pytzdata==2020.1 regex==2022.3.2 requests==2.27.1 six==1.17.0 snaptime==0.2.4 tomli==1.2.3 typed-ast==1.5.5 typing_extensions==4.1.1 tzdata==2025.2 tzlocal==4.2 urllib3==1.26.20 wrapt==1.16.0 zipp==3.6.0
name: marge-bot channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==2.11.7 - attrs==22.2.0 - backports-zoneinfo==0.2.1 - charset-normalizer==2.0.12 - coverage==6.2 - dateparser==1.1.3 - dill==0.3.4 - humanize==3.14.0 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - isort==5.10.1 - lazy-object-proxy==1.7.1 - maya==0.6.1 - mccabe==0.7.0 - packaging==21.3 - pendulum==2.1.2 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pylint==2.13.9 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pytz-deprecation-shim==0.1.0.post0 - pytzdata==2020.1 - regex==2022.3.2 - requests==2.27.1 - six==1.17.0 - snaptime==0.2.4 - tomli==1.2.3 - typed-ast==1.5.5 - typing-extensions==4.1.1 - tzdata==2025.2 - tzlocal==4.2 - urllib3==1.26.20 - wrapt==1.16.0 - zipp==3.6.0 prefix: /opt/conda/envs/marge-bot
[ "tests/test_app.py::test_default_values", "tests/test_app.py::test_embargo", "tests/test_app.py::test_add_tested", "tests/test_app.py::test_add_part_of", "tests/test_app.py::test_add_reviewers", "tests/test_app.py::test_impersonate_approvers", "tests/test_app.py::test_project_regexp", "tests/test_app.py::test_ci_timeout", "tests/test_app.py::test_deprecated_max_ci_time_in_minutes", "tests/test_app.py::test_git_timeout", "tests/test_job.py::TestRebaseAndAccept::test_fails_on_not_acceptable_if_master_did_not_move" ]
[]
[ "tests/test_app.py::test_time_interval", "tests/test_job.py::TestRebaseAndAccept::test_succeeds_first_time", "tests/test_job.py::TestRebaseAndAccept::test_succeeds_second_time_if_master_moved", "tests/test_job.py::TestRebaseAndAccept::test_handles_races_for_merging", "tests/test_job.py::TestRebaseAndAccept::test_handles_request_becoming_wip_after_push", "tests/test_job.py::TestRebaseAndAccept::test_guesses_git_hook_error_on_merge_refusal", "tests/test_job.py::TestRebaseAndAccept::test_tells_explicitly_that_gitlab_refused_to_merge", "tests/test_job.py::TestRebaseAndAccept::test_wont_merge_wip_stuff", "tests/test_job.py::TestRebaseAndAccept::test_wont_merge_branches_with_autosquash_if_rewriting", "tests/test_job.py::TestMergeJobOptions::test_default", "tests/test_job.py::TestMergeJobOptions::test_default_ci_time" ]
[]
BSD 3-Clause "New" or "Revised" License
1,767
[ "marge/job.py", "marge/app.py" ]
[ "marge/job.py", "marge/app.py" ]
stitchfix__nodebook-5
08d7b380e4417c0b16d74b3244b109726b0c8359
2017-10-16 01:32:10
08d7b380e4417c0b16d74b3244b109726b0c8359
diff --git a/README.md b/README.md index ce22d57..5b3672b 100644 --- a/README.md +++ b/README.md @@ -30,9 +30,9 @@ For additional example usage, see [nodebook_demo.ipynb](./nodebook_demo.ipynb). ## FAQ -#### Q: Does Nodebook support Python 3? +#### Q: Does Nodebook support Python 2? -Unfortunately, not yet. Please try it in Python 2.7. +Yes, but we recommend using Python 3. #### Q: Why am I seeing "ERROR:root:Cell magic `%%execute_cell` not found."? diff --git a/ipython/extensions/nodebookext.py b/ipython/extensions/nodebookext.py index 30dc7a5..f049f7a 100644 --- a/ipython/extensions/nodebookext.py +++ b/ipython/extensions/nodebookext.py @@ -1,4 +1,5 @@ -import cPickle as pickle +from __future__ import absolute_import +import six.moves.cPickle as pickle import os import sys import errno diff --git a/nodebook/nodebookcore.py b/nodebook/nodebookcore.py index a12eda5..98b5cdc 100644 --- a/nodebook/nodebookcore.py +++ b/nodebook/nodebookcore.py @@ -1,6 +1,9 @@ +from __future__ import absolute_import +from __future__ import print_function from . import pickledict import ast -import __builtin__ +import six.moves.builtins +import six INDENT = ' ' # an indent is canonically 4 spaces ;) @@ -116,7 +119,7 @@ class Nodebook(object): res, output_objs, output_hashes = node.run(input_objs, input_hashes) # update node outputs - for var, val in output_objs.iteritems(): + for var, val in six.iteritems(output_objs): self.variables[output_hashes[var]] = val self._update_output_hashes(node, output_hashes) @@ -129,7 +132,7 @@ class Nodebook(object): """ # base case if node is None: - if var in __builtin__.__dict__: + if var in six.moves.builtins.__dict__: return None else: raise KeyError("name '%s' is not defined" % var) @@ -141,7 +144,7 @@ class Nodebook(object): else: # re-run the parent if it wasn't valid # TODO: synchronize output with frontend javascript - print "auto-running invalidated node N_%s (%s)" % (node.get_index() + 1, node.name) + print("auto-running invalidated node N_%s (%s)" % (node.get_index() + 1, node.name)) self.run_node(node.name) return self._find_latest_output(node, var) else: @@ -153,23 +156,23 @@ class Nodebook(object): Update node's output hashes and invalid downstream nodes that depended on their previous values """ # invalidate any any children relying on specific hash-versions of old outputs that aren't in the new outputs - invalidated_outputs = set(node.outputs.iteritems()) - set(outputs.iteritems()) + invalidated_outputs = set(six.iteritems(node.outputs)) - set(six.iteritems(outputs)) invalidated_outputs = {k: v for k, v in invalidated_outputs} # also invalidate any children that rely on any version of a brand-new output, regardless of hash # TODO this is potentially overly restrictive, if, eg, a value is blindly over-written again later # TODO(con't) we should try to account for this to avoid invalidating excessively many cells - new_outputs = set(outputs.iteritems()) - set(node.outputs.iteritems()) + new_outputs = set(six.iteritems(outputs)) - set(six.iteritems(node.outputs)) new_outputs = {k: v for k, v in new_outputs} # update reference counts - for val_hash in new_outputs.itervalues(): + for val_hash in six.itervalues(new_outputs): self.add_ref(val_hash) - for val_hash in invalidated_outputs.itervalues(): + for val_hash in six.itervalues(invalidated_outputs): self.remove_ref(val_hash) # invalidate changed outputs - invalidated_outputs.update({k: None for k, _ in new_outputs.iteritems()}) + invalidated_outputs.update({k: None for k, _ in six.iteritems(new_outputs)}) node.outputs = outputs node.invalidate_children(invalidated_outputs) @@ -274,10 +277,10 @@ class Node(object): block = ast.parse(self.code) if len(block.body) > 0 and type(block.body[-1]) is ast.Expr: last = ast.Expression(block.body.pop().value) - exec compile(block, '<string>', mode='exec') in env + exec(compile(block, '<string>', mode='exec'), env) res = eval(compile(last, '<string>', mode='eval'), env) else: - exec compile(block, '<string>', mode='exec') in env + exec(compile(block, '<string>', mode='exec'), env) res = None # find outputs which have changed from input hashes diff --git a/nodebook/pickledict.py b/nodebook/pickledict.py index 6e8c4be..d4fc6e5 100644 --- a/nodebook/pickledict.py +++ b/nodebook/pickledict.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import import io import os from functools import partial @@ -5,6 +6,7 @@ import hashlib import pandas as pd import msgpack import inspect +import six # using dill instead of pickle for more complete serialization import dill @@ -13,9 +15,18 @@ import dill try: from cStringIO import StringIO except ImportError: - from StringIO import StringIO + try: + from StringIO import StringIO + except ImportError: + # Python3. We are using StringIO as a target for pickle, so we + # actually want BytesIO. + from io import BytesIO as StringIO -import UserDict +try: + from UserDict import DictMixin +except ImportError: + # see https://github.com/flask-restful/flask-restful/pull/231/files + from collections import MutableMapping as DictMixin PANDAS_CODE = 1 DILL_CODE = 2 @@ -45,7 +56,7 @@ def msgpack_deserialize(code, data): return msgpack.ExtType(code, data) -class PickleDict(object, UserDict.DictMixin): +class PickleDict(DictMixin): """ Dictionary with immutable elements using pickle(dill), optionally supporting persisting to disk """ @@ -55,13 +66,13 @@ class PickleDict(object, UserDict.DictMixin): persist_path: if provided, perform serialization to/from disk to this path """ self.persist_path = persist_path + self.encodings = {} self.dump = partial(msgpack.dump, default=msgpack_serialize) self.load = partial(msgpack.load, ext_hook=msgpack_deserialize) - self.dict = {} def keys(self): - return self.dict.keys() + return list(self.dict.keys()) def __len__(self): return len(self.dict) @@ -77,25 +88,33 @@ class PickleDict(object, UserDict.DictMixin): return self[key] return default + def __iter__(self): + for key in self.dict: + yield key + def __getitem__(self, key): if self.persist_path is not None: path = self.dict[key] with open(path, 'rb') as f: - value = self.load(f) + value = self.load(f, encoding=self.encodings[key]) else: f = StringIO(self.dict[key]) - value = self.load(f) + value = self.load(f, encoding=self.encodings[key]) return value def __setitem__(self, key, value): + encoding = None + if isinstance(value, six.string_types): + encoding = 'utf-8' + self.encodings[key] = encoding if self.persist_path is not None: path = os.path.join(self.persist_path, '%s.pak' % key) with open(path, 'wb') as f: - self.dump(value, f) + self.dump(value, f, encoding=encoding) self.dict[key] = path else: f = StringIO() - self.dump(value, f) + self.dump(value, f, encoding=encoding) serialized = f.getvalue() self.dict[key] = serialized diff --git a/nodebook/utils.py b/nodebook/utils.py index 5bffb80..baab14e 100644 --- a/nodebook/utils.py +++ b/nodebook/utils.py @@ -1,10 +1,12 @@ +from __future__ import absolute_import import json -from nodebookcore import INDENT +from .nodebookcore import INDENT +import six def output_to_function(output_node, main_closing_statement, args): def add_dependencies(node_inputs, dep_set): - dep = {(k, v) for k, v in node_inputs.iteritems() if k not in args and v is not None} + dep = {(k, v) for k, v in six.iteritems(node_inputs) if k not in args and v is not None} return dep_set.union(dep) depends = add_dependencies(output_node.inputs, set()) @@ -14,8 +16,8 @@ def output_to_function(output_node, main_closing_statement, args): n = output_node while not depends.issubset(avail) and n.parent is not None: n = n.parent - if len(depends.intersection(n.outputs.iteritems())) != 0: - avail.update(n.outputs.iteritems()) + if len(depends.intersection(six.iteritems(n.outputs))) != 0: + avail.update(six.iteritems(n.outputs)) depends = add_dependencies(n.inputs, depends) funcs.append(n.extract_function()) @@ -37,16 +39,16 @@ def create_module(node, export_statement, input_dict): """ create a python module to execute a given node """ - body = output_to_function(node, export_statement, input_dict.keys()) + body = output_to_function(node, export_statement, list(input_dict.keys())) imports = '\n'.join([ 'import json', ]) deser = '' - for k, v in input_dict.iteritems(): + for k, v in six.iteritems(input_dict): deser += "\n{} = json.loads('{}')".format(k, json.dumps(v)) - deser += '\nmain({})'.format(','.join(input_dict.iterkeys())) + deser += '\nmain({})'.format(','.join(six.iterkeys(input_dict))) code = '{}\n\n{}\n\n{}\n'.format(imports, body, deser) return code diff --git a/setup.py b/setup.py index b47742a..6a5b93a 100644 --- a/setup.py +++ b/setup.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import from setuptools import setup, find_packages import os import sys @@ -8,7 +9,7 @@ if 'bdist_wheel' in sys.argv: setup( name='nodebook', - version='0.1.0', + version='0.2.0-dev', author='Kevin Zielnicki', author_email='[email protected]', license='Stitch Fix 2017', @@ -17,7 +18,7 @@ setup( long_description='Nodebook Jupyter Extension', url='https://github.com/stitchfix/nodebook', install_requires=[ - 'ipython<6', # newer versions of ipython do not support 2.7 + 'ipython', 'jupyter', 'click', 'dill',
Python 3 support Nodebook currently only supports python 2, but python 3 support should be included for the next major release.
stitchfix/nodebook
diff --git a/tests/test_nodebookcore.py b/tests/test_nodebookcore.py index dcb2579..cfd9646 100644 --- a/tests/test_nodebookcore.py +++ b/tests/test_nodebookcore.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import import pandas as pd import pytest from nodebook.nodebookcore import ReferenceFinder, Nodebook, Node diff --git a/tests/test_pickledict.py b/tests/test_pickledict.py index b6d773c..ef35fdd 100644 --- a/tests/test_pickledict.py +++ b/tests/test_pickledict.py @@ -1,3 +1,6 @@ +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals import pandas as pd import pytest from nodebook.pickledict import PickleDict @@ -7,7 +10,7 @@ from nodebook.pickledict import PickleDict def mydict(request, tmpdir): if request.param == 'tmpdir': persist_path = tmpdir.strpath - print persist_path + print(persist_path) else: persist_path = None return PickleDict(persist_path=persist_path) @@ -22,6 +25,10 @@ class TestPickleDict(object): mydict['test_string'] = 'foo' assert mydict['test_string'] == 'foo' + def test_bytes(self, mydict): + mydict['test_bytes'] = b'foo' + assert mydict['test_bytes'] == b'foo' + def test_df(self, mydict): df = pd.DataFrame({'a': [0, 1, 2], 'b': ['foo', 'bar', 'baz']}) mydict['test_df'] = df
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 6 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "2.7", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==3.6.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 bleach==4.1.0 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 click==8.0.4 comm==0.1.4 contextvars==2.4 dataclasses==0.8 decorator==5.1.1 defusedxml==0.7.1 dill==0.3.4 entrypoints==0.4 idna==3.10 immutables==0.19 importlib-metadata==4.8.3 iniconfig==1.1.1 ipykernel==5.5.6 ipython==5.10.0 ipython-genutils==0.2.0 ipywidgets==7.8.5 Jinja2==3.0.3 json5==0.9.16 jsonschema==3.2.0 jupyter==1.1.1 jupyter-client==7.1.2 jupyter-console==5.2.0 jupyter-core==4.9.2 jupyter-server==1.13.1 jupyterlab==3.2.9 jupyterlab-pygments==0.1.2 jupyterlab-server==2.10.3 jupyterlab_widgets==1.1.11 MarkupSafe==2.0.1 mistune==0.8.4 msgpack-python==0.5.6 nbclassic==0.3.5 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nest-asyncio==1.6.0 -e git+https://github.com/stitchfix/nodebook.git@08d7b380e4417c0b16d74b3244b109726b0c8359#egg=nodebook notebook==6.4.10 numpy==1.19.5 packaging==21.3 pandas==1.1.5 pandocfilters==1.5.1 pexpect==4.9.0 pickleshare==0.7.5 pluggy==1.0.0 prometheus-client==0.17.1 prompt-toolkit==1.0.18 ptyprocess==0.7.0 py==1.11.0 pycparser==2.21 Pygments==2.5.2 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-runner==5.3.2 python-dateutil==2.9.0.post0 pytz==2025.2 pyzmq==25.1.2 requests==2.27.1 Send2Trash==1.8.3 simplegeneric==0.8.1 six==1.17.0 sniffio==1.2.0 terminado==0.12.1 testpath==0.6.0 tomli==1.2.3 tornado==6.1 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 wcwidth==0.2.13 webencodings==0.5.1 websocket-client==1.3.1 widgetsnbextension==3.6.10 zipp==3.6.0
name: nodebook channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==3.6.2 - argon2-cffi==21.3.0 - argon2-cffi-bindings==21.2.0 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - bleach==4.1.0 - cffi==1.15.1 - charset-normalizer==2.0.12 - click==8.0.4 - comm==0.1.4 - contextvars==2.4 - dataclasses==0.8 - decorator==5.1.1 - defusedxml==0.7.1 - dill==0.3.4 - entrypoints==0.4 - idna==3.10 - immutables==0.19 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - ipykernel==5.5.6 - ipython==5.10.0 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - jinja2==3.0.3 - json5==0.9.16 - jsonschema==3.2.0 - jupyter==1.1.1 - jupyter-client==7.1.2 - jupyter-console==5.2.0 - jupyter-core==4.9.2 - jupyter-server==1.13.1 - jupyterlab==3.2.9 - jupyterlab-pygments==0.1.2 - jupyterlab-server==2.10.3 - jupyterlab-widgets==1.1.11 - markupsafe==2.0.1 - mistune==0.8.4 - msgpack-python==0.5.6 - nbclassic==0.3.5 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nest-asyncio==1.6.0 - notebook==6.4.10 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pandocfilters==1.5.1 - pexpect==4.9.0 - pickleshare==0.7.5 - pluggy==1.0.0 - prometheus-client==0.17.1 - prompt-toolkit==1.0.18 - ptyprocess==0.7.0 - py==1.11.0 - pycparser==2.21 - pygments==2.5.2 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-runner==5.3.2 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyzmq==25.1.2 - requests==2.27.1 - send2trash==1.8.3 - simplegeneric==0.8.1 - six==1.17.0 - sniffio==1.2.0 - terminado==0.12.1 - testpath==0.6.0 - tomli==1.2.3 - tornado==6.1 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - wcwidth==0.2.13 - webencodings==0.5.1 - websocket-client==1.3.1 - widgetsnbextension==3.6.10 - zipp==3.6.0 prefix: /opt/conda/envs/nodebook
[ "tests/test_nodebookcore.py::TestReferenceFinder::test_assign", "tests/test_nodebookcore.py::TestReferenceFinder::test_augassign", "tests/test_nodebookcore.py::TestReferenceFinder::test_import", "tests/test_nodebookcore.py::TestReferenceFinder::test_multiline", "tests/test_nodebookcore.py::TestNodebook::test_single_node", "tests/test_nodebookcore.py::TestNodebook::test_node_chain", "tests/test_pickledict.py::TestPickleDict::test_int[mode_memory]", "tests/test_pickledict.py::TestPickleDict::test_int[mode_disk]", "tests/test_pickledict.py::TestPickleDict::test_string[mode_memory]", "tests/test_pickledict.py::TestPickleDict::test_string[mode_disk]", "tests/test_pickledict.py::TestPickleDict::test_bytes[mode_memory]", "tests/test_pickledict.py::TestPickleDict::test_bytes[mode_disk]", "tests/test_pickledict.py::TestPickleDict::test_df[mode_memory]", "tests/test_pickledict.py::TestPickleDict::test_df[mode_disk]", "tests/test_pickledict.py::TestPickleDict::test_immutability[mode_memory]", "tests/test_pickledict.py::TestPickleDict::test_immutability[mode_disk]" ]
[]
[]
[]
Apache License 2.0
1,768
[ "ipython/extensions/nodebookext.py", "nodebook/utils.py", "setup.py", "nodebook/nodebookcore.py", "README.md", "nodebook/pickledict.py" ]
[ "ipython/extensions/nodebookext.py", "nodebook/utils.py", "setup.py", "nodebook/nodebookcore.py", "README.md", "nodebook/pickledict.py" ]
elastic__elasticsearch-dsl-py-752
269fef7fa12333f7622c3694df75a1b296d87ae2
2017-10-16 02:08:29
e8906dcd17eb2021bd191325817ff7541d838ea1
diff --git a/elasticsearch_dsl/analysis.py b/elasticsearch_dsl/analysis.py index 8424283..c2abd94 100644 --- a/elasticsearch_dsl/analysis.py +++ b/elasticsearch_dsl/analysis.py @@ -19,9 +19,9 @@ class AnalysisBase(object): class CustomAnalysis(object): name = 'custom' - def __init__(self, name, builtin_type='custom', **kwargs): + def __init__(self, filter_name, builtin_type='custom', **kwargs): self._builtin_type = builtin_type - self._name = name + self._name = filter_name super(CustomAnalysis, self).__init__(**kwargs) def to_dict(self): diff --git a/elasticsearch_dsl/faceted_search.py b/elasticsearch_dsl/faceted_search.py index 795132f..129b4fc 100644 --- a/elasticsearch_dsl/faceted_search.py +++ b/elasticsearch_dsl/faceted_search.py @@ -145,7 +145,8 @@ class DateHistogramFacet(Facet): # so we need to set key to 0 to avoid TypeError exception if bucket['key'] is None: bucket['key'] = 0 - return datetime.utcfromtimestamp(int(bucket['key']) / 1000) + # Preserve milliseconds in the datetime + return datetime.utcfromtimestamp(int(bucket['key']) / 1000.0) else: return bucket['key'] diff --git a/elasticsearch_dsl/field.py b/elasticsearch_dsl/field.py index 129b53c..d895e7e 100644 --- a/elasticsearch_dsl/field.py +++ b/elasticsearch_dsl/field.py @@ -218,7 +218,8 @@ class Date(Field): if isinstance(data, date): return data if isinstance(data, int): - return datetime.utcfromtimestamp(data / 1000) + # Divide by a float to preserve milliseconds on the datetime. + return datetime.utcfromtimestamp(data / 1000.0) try: # TODO: add format awareness
Datetimes dropping miliseconds The problems causes dates like '1970-01-01T00:17:47.045Z' in ElasticSearch to end up as a datetime like datetime.datetime(1970, 1, 1, 0, 17, 47) My expectation is that this value should produce a datetime like datetime.datetime(1970, 1, 1, 0, 17, 47, 45000) https://github.com/elastic/elasticsearch-dsl-py/blob/269fef7fa12333f7622c3694df75a1b296d87ae2/elasticsearch_dsl/field.py#L221 This is caused by integer division on the line referenced above. Changing this line to divide by 1000.0 fixes the issue.
elastic/elasticsearch-dsl-py
diff --git a/test_elasticsearch_dsl/test_analysis.py b/test_elasticsearch_dsl/test_analysis.py index 014c43d..6dc3c09 100644 --- a/test_elasticsearch_dsl/test_analysis.py +++ b/test_elasticsearch_dsl/test_analysis.py @@ -79,3 +79,11 @@ def test_custom_analyzer_can_collect_custom_items(): } } == a.get_analysis_definition() +def test_stemmer_analyzer_can_pass_name(): + t = analysis.token_filter('my_english_filter', name="minimal_english", type="stemmer") + assert t.to_dict() == 'my_english_filter' + assert { + "type" : "stemmer", + "name" : "minimal_english" + } == t.get_definition() +
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 3 }
5.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[develop]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytz" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 docutils==0.18.1 elasticsearch==5.5.3 -e git+https://github.com/elastic/elasticsearch-dsl-py.git@269fef7fa12333f7622c3694df75a1b296d87ae2#egg=elasticsearch_dsl idna==3.10 imagesize==1.4.1 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work Jinja2==3.0.3 MarkupSafe==2.0.1 mock==5.2.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work Pygments==2.14.0 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.27.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: elasticsearch-dsl-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - babel==2.11.0 - charset-normalizer==2.0.12 - coverage==6.2 - docutils==0.18.1 - elasticsearch==5.5.3 - idna==3.10 - imagesize==1.4.1 - jinja2==3.0.3 - markupsafe==2.0.1 - mock==5.2.0 - pygments==2.14.0 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.27.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - urllib3==1.26.20 prefix: /opt/conda/envs/elasticsearch-dsl-py
[ "test_elasticsearch_dsl/test_analysis.py::test_stemmer_analyzer_can_pass_name" ]
[]
[ "test_elasticsearch_dsl/test_analysis.py::test_analyzer_serializes_as_name", "test_elasticsearch_dsl/test_analysis.py::test_analyzer_has_definition", "test_elasticsearch_dsl/test_analysis.py::test_normalizer_serializes_as_name", "test_elasticsearch_dsl/test_analysis.py::test_normalizer_has_definition", "test_elasticsearch_dsl/test_analysis.py::test_tokenizer", "test_elasticsearch_dsl/test_analysis.py::test_custom_analyzer_can_collect_custom_items" ]
[]
Apache License 2.0
1,769
[ "elasticsearch_dsl/field.py", "elasticsearch_dsl/faceted_search.py", "elasticsearch_dsl/analysis.py" ]
[ "elasticsearch_dsl/field.py", "elasticsearch_dsl/faceted_search.py", "elasticsearch_dsl/analysis.py" ]
cloudant__python-cloudant-332
e1b5a3291a0759be6d2350e9626a0f6e6e3c657b
2017-10-16 14:59:58
eda73e429f404db7c22c1ccd4c265b5c70063dae
diff --git a/CHANGES.rst b/CHANGES.rst index eab0557..c683e44 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,7 @@ Unreleased ========== - [NEW] Added ``Result.all()`` convenience method. +- [NEW] Allow ``service_name`` to be specified when instantiating from a Bluemix VCAP_SERVICES environment variable. - [IMPROVED] Updated ``posixpath.join`` references to use ``'/'.join`` when concatenating URL parts. - [IMPROVED] Updated documentation by replacing deprecated Cloudant links with the latest Bluemix links. diff --git a/src/cloudant/__init__.py b/src/cloudant/__init__.py index 04131db..7b1ba55 100644 --- a/src/cloudant/__init__.py +++ b/src/cloudant/__init__.py @@ -92,7 +92,7 @@ def cloudant_iam(account_name, api_key, **kwargs): cloudant_session.disconnect() @contextlib.contextmanager -def cloudant_bluemix(vcap_services, instance_name=None, **kwargs): +def cloudant_bluemix(vcap_services, instance_name=None, service_name=None, **kwargs): """ Provides a context manager to create a Cloudant session and provide access to databases, docs etc. @@ -101,6 +101,7 @@ def cloudant_bluemix(vcap_services, instance_name=None, **kwargs): :type vcap_services: dict or str :param str instance_name: Optional Bluemix instance name. Only required if multiple Cloudant instances are available. + :param str service_name: Optional Bluemix service name. :param str encoder: Optional json Encoder object used to encode documents for storage. Defaults to json.JSONEncoder. @@ -149,11 +150,10 @@ def cloudant_bluemix(vcap_services, instance_name=None, **kwargs): print client.all_dbs() # ... """ - service = CloudFoundryService(vcap_services, instance_name) - cloudant_session = Cloudant( - service.username, - service.password, - url=service.url, + cloudant_session = Cloudant.bluemix( + vcap_services, + instance_name=instance_name, + service_name=service_name, **kwargs ) cloudant_session.connect() diff --git a/src/cloudant/_common_util.py b/src/cloudant/_common_util.py index fe2e068..05e3bd3 100644 --- a/src/cloudant/_common_util.py +++ b/src/cloudant/_common_util.py @@ -498,18 +498,18 @@ class IAMSession(ClientSession): class CloudFoundryService(object): """ Manages Cloud Foundry service configuration. """ - def __init__(self, vcap_services, name=None): + def __init__(self, vcap_services, instance_name=None, service_name=None): try: services = vcap_services if not isinstance(vcap_services, dict): services = json.loads(vcap_services) - cloudant_services = services.get('cloudantNoSQLDB', []) + cloudant_services = services.get(service_name, []) # use first service if no name given and only one service present - use_first = name is None and len(cloudant_services) == 1 + use_first = instance_name is None and len(cloudant_services) == 1 for service in cloudant_services: - if use_first or service.get('name') == name: + if use_first or service.get('name') == instance_name: credentials = service['credentials'] self._host = credentials['host'] self._name = service.get('name') diff --git a/src/cloudant/client.py b/src/cloudant/client.py index 3a1360c..ce7d493 100755 --- a/src/cloudant/client.py +++ b/src/cloudant/client.py @@ -754,7 +754,7 @@ class Cloudant(CouchDB): return resp.json() @classmethod - def bluemix(cls, vcap_services, instance_name=None, **kwargs): + def bluemix(cls, vcap_services, instance_name=None, service_name=None, **kwargs): """ Create a Cloudant session using a VCAP_SERVICES environment variable. @@ -762,6 +762,7 @@ class Cloudant(CouchDB): :type vcap_services: dict or str :param str instance_name: Optional Bluemix instance name. Only required if multiple Cloudant instances are available. + :param str service_name: Optional Bluemix service name. Example usage: @@ -775,7 +776,10 @@ class Cloudant(CouchDB): print client.all_dbs() """ - service = CloudFoundryService(vcap_services, instance_name) + service_name = service_name or 'cloudantNoSQLDB' # default service + service = CloudFoundryService(vcap_services, + instance_name=instance_name, + service_name=service_name) return Cloudant(service.username, service.password, url=service.url,
Cloudant.bluemix does not work on IBM Bluemix Dedicated service Please include the following information in your ticket. - Cloudant (python-cloudant) version(s) that are affected by this issue. *2.6.0* - Python version *3.6.2* - A small code sample that demonstrates the issue. *See below* IBM Bluemix Dedicated service uses "cloudantNoSQLDB Dedicated" as the name of the service, rather than "cloudantNoSQLDB" used by Public Bluemix. Unfortunately, the CloudFoundryService class hardcodes the value: `cloudant_services = services.get('cloudantNoSQLDB', [])` Any chance we could make that value a parameter?
cloudant/python-cloudant
diff --git a/tests/unit/client_tests.py b/tests/unit/client_tests.py index 796e5fc..db78861 100644 --- a/tests/unit/client_tests.py +++ b/tests/unit/client_tests.py @@ -552,6 +552,34 @@ class CloudantClientTests(UnitTestDbBase): except Exception as err: self.fail('Exception {0} was raised.'.format(str(err))) + def test_cloudant_bluemix_dedicated_context_helper(self): + """ + Test that the cloudant_bluemix context helper works as expected when + specifying a service name. + """ + instance_name = 'Cloudant NoSQL DB-wq' + service_name = 'cloudantNoSQLDB Dedicated' + vcap_services = {service_name: [{ + 'credentials': { + 'username': self.user, + 'password': self.pwd, + 'host': '{0}.cloudant.com'.format(self.account), + 'port': 443, + 'url': self.url + }, + 'name': instance_name, + }]} + + try: + with cloudant_bluemix(vcap_services, + instance_name=instance_name, + service_name=service_name) as c: + self.assertIsInstance(c, Cloudant) + self.assertIsInstance(c.r_session, requests.Session) + self.assertEquals(c.session()['userCtx']['name'], self.user) + except Exception as err: + self.fail('Exception {0} was raised.'.format(str(err))) + def test_constructor_with_account(self): """ Test instantiating a client object using an account name diff --git a/tests/unit/cloud_foundry_tests.py b/tests/unit/cloud_foundry_tests.py index 043949f..43249b7 100644 --- a/tests/unit/cloud_foundry_tests.py +++ b/tests/unit/cloud_foundry_tests.py @@ -91,68 +91,104 @@ class CloudFoundryServiceTests(unittest.TestCase): ] } ]}) + self._test_vcap_services_dedicated = json.dumps({ + 'cloudantNoSQLDB Dedicated': [ # dedicated service name + { + 'name': 'Cloudant NoSQL DB 1', # valid service + 'credentials': { + 'host': 'example.cloudant.com', + 'password': 'pa$$w0rd01', + 'port': 1234, + 'username': 'example' + } + } + ] + }) def test_get_vcap_service_default_success(self): - service = CloudFoundryService(self._test_vcap_services_single) + service = CloudFoundryService( + self._test_vcap_services_single, + service_name='cloudantNoSQLDB' + ) self.assertEqual('Cloudant NoSQL DB 1', service.name) def test_get_vcap_service_default_success_as_dict(self): service = CloudFoundryService( - json.loads(self._test_vcap_services_single) + json.loads(self._test_vcap_services_single), + service_name='cloudantNoSQLDB' ) self.assertEqual('Cloudant NoSQL DB 1', service.name) def test_get_vcap_service_default_failure_multiple_services(self): with self.assertRaises(CloudantException) as cm: - CloudFoundryService(self._test_vcap_services_multiple) + CloudFoundryService( + self._test_vcap_services_multiple, + service_name='cloudantNoSQLDB' + ) self.assertEqual('Missing service in VCAP_SERVICES', str(cm.exception)) def test_get_vcap_service_instance_host(self): service = CloudFoundryService( - self._test_vcap_services_multiple, 'Cloudant NoSQL DB 1' + self._test_vcap_services_multiple, + instance_name='Cloudant NoSQL DB 1', + service_name='cloudantNoSQLDB' ) self.assertEqual('example.cloudant.com', service.host) def test_get_vcap_service_instance_password(self): service = CloudFoundryService( - self._test_vcap_services_multiple, 'Cloudant NoSQL DB 1' + self._test_vcap_services_multiple, + instance_name='Cloudant NoSQL DB 1', + service_name='cloudantNoSQLDB' ) self.assertEqual('pa$$w0rd01', service.password) def test_get_vcap_service_instance_port(self): service = CloudFoundryService( - self._test_vcap_services_multiple, 'Cloudant NoSQL DB 1' + self._test_vcap_services_multiple, + instance_name='Cloudant NoSQL DB 1', + service_name='cloudantNoSQLDB' ) self.assertEqual('1234', service.port) def test_get_vcap_service_instance_port_default(self): service = CloudFoundryService( - self._test_vcap_services_multiple, 'Cloudant NoSQL DB 2' + self._test_vcap_services_multiple, + instance_name='Cloudant NoSQL DB 2', + service_name='cloudantNoSQLDB' ) self.assertEqual('443', service.port) def test_get_vcap_service_instance_url(self): service = CloudFoundryService( - self._test_vcap_services_multiple, 'Cloudant NoSQL DB 1' + self._test_vcap_services_multiple, + instance_name='Cloudant NoSQL DB 1', + service_name='cloudantNoSQLDB' ) self.assertEqual('https://example.cloudant.com:1234', service.url) def test_get_vcap_service_instance_username(self): service = CloudFoundryService( - self._test_vcap_services_multiple, 'Cloudant NoSQL DB 1' + self._test_vcap_services_multiple, + instance_name='Cloudant NoSQL DB 1', + service_name='cloudantNoSQLDB' ) self.assertEqual('example', service.username) def test_raise_error_for_missing_host(self): with self.assertRaises(CloudantException): CloudFoundryService( - self._test_vcap_services_multiple, 'Cloudant NoSQL DB 3' + self._test_vcap_services_multiple, + instance_name='Cloudant NoSQL DB 3', + service_name='cloudantNoSQLDB' ) def test_raise_error_for_missing_password(self): with self.assertRaises(CloudantException) as cm: CloudFoundryService( - self._test_vcap_services_multiple, 'Cloudant NoSQL DB 4' + self._test_vcap_services_multiple, + instance_name='Cloudant NoSQL DB 4', + service_name='cloudantNoSQLDB' ) self.assertEqual( "Invalid service: 'password' missing", @@ -162,7 +198,9 @@ class CloudFoundryServiceTests(unittest.TestCase): def test_raise_error_for_missing_username(self): with self.assertRaises(CloudantException) as cm: CloudFoundryService( - self._test_vcap_services_multiple, 'Cloudant NoSQL DB 5' + self._test_vcap_services_multiple, + instance_name='Cloudant NoSQL DB 5', + service_name='cloudantNoSQLDB' ) self.assertEqual( "Invalid service: 'username' missing", @@ -172,7 +210,9 @@ class CloudFoundryServiceTests(unittest.TestCase): def test_raise_error_for_invalid_credentials_type(self): with self.assertRaises(CloudantException) as cm: CloudFoundryService( - self._test_vcap_services_multiple, 'Cloudant NoSQL DB 6' + self._test_vcap_services_multiple, + instance_name='Cloudant NoSQL DB 6', + service_name='cloudantNoSQLDB' ) self.assertEqual( 'Failed to decode VCAP_SERVICES service credentials', @@ -182,7 +222,9 @@ class CloudFoundryServiceTests(unittest.TestCase): def test_raise_error_for_missing_service(self): with self.assertRaises(CloudantException) as cm: CloudFoundryService( - self._test_vcap_services_multiple, 'Cloudant NoSQL DB 7' + self._test_vcap_services_multiple, + instance_name='Cloudant NoSQL DB 7', + service_name='cloudantNoSQLDB' ) self.assertEqual('Missing service in VCAP_SERVICES', str(cm.exception)) @@ -190,3 +232,10 @@ class CloudFoundryServiceTests(unittest.TestCase): with self.assertRaises(CloudantException) as cm: CloudFoundryService('{', 'Cloudant NoSQL DB 1') # invalid JSON self.assertEqual('Failed to decode VCAP_SERVICES JSON', str(cm.exception)) + + def test_get_vcap_service_with_dedicated_service_name_success(self): + service = CloudFoundryService( + self._test_vcap_services_dedicated, + service_name='cloudantNoSQLDB Dedicated' + ) + self.assertEqual('Cloudant NoSQL DB 1', service.name)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 4 }
2.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "mock==1.3.0", "nose", "sphinx", "pylint", "flaky", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.5", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 astroid==2.11.7 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 -e git+https://github.com/cloudant/python-cloudant.git@e1b5a3291a0759be6d2350e9626a0f6e6e3c657b#egg=cloudant dill==0.3.4 docutils==0.18.1 flaky==3.8.1 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 isort==5.10.1 Jinja2==3.0.3 lazy-object-proxy==1.7.1 MarkupSafe==2.0.1 mccabe==0.7.0 mock==1.3.0 nose==1.3.7 packaging==21.3 pbr==6.1.1 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 Pygments==2.14.0 pylint==2.13.9 pyparsing==3.1.4 pytest==7.0.1 pytz==2025.2 requests==2.27.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 typed-ast==1.5.5 typing_extensions==4.1.1 urllib3==1.26.20 wrapt==1.16.0 zipp==3.6.0
name: python-cloudant channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - astroid==2.11.7 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - dill==0.3.4 - docutils==0.18.1 - flaky==3.8.1 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isort==5.10.1 - jinja2==3.0.3 - lazy-object-proxy==1.7.1 - markupsafe==2.0.1 - mccabe==0.7.0 - mock==1.3.0 - nose==1.3.7 - packaging==21.3 - pbr==6.1.1 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pygments==2.14.0 - pylint==2.13.9 - pyparsing==3.1.4 - pytest==7.0.1 - pytz==2025.2 - requests==2.27.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - typed-ast==1.5.5 - typing-extensions==4.1.1 - urllib3==1.26.20 - wrapt==1.16.0 - zipp==3.6.0 prefix: /opt/conda/envs/python-cloudant
[ "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_get_vcap_service_default_failure_multiple_services", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_get_vcap_service_default_success", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_get_vcap_service_default_success_as_dict", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_get_vcap_service_instance_host", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_get_vcap_service_instance_password", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_get_vcap_service_instance_port", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_get_vcap_service_instance_port_default", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_get_vcap_service_instance_url", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_get_vcap_service_instance_username", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_get_vcap_service_with_dedicated_service_name_success", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_raise_error_for_invalid_credentials_type", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_raise_error_for_missing_host", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_raise_error_for_missing_password", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_raise_error_for_missing_service", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_raise_error_for_missing_username" ]
[]
[ "tests/unit/client_tests.py::CloudantClientExceptionTests::test_raise_using_invalid_code", "tests/unit/client_tests.py::CloudantClientExceptionTests::test_raise_with_proper_code_and_args", "tests/unit/client_tests.py::CloudantClientExceptionTests::test_raise_without_args", "tests/unit/client_tests.py::CloudantClientExceptionTests::test_raise_without_code", "tests/unit/cloud_foundry_tests.py::CloudFoundryServiceTests::test_raise_error_for_invalid_vcap" ]
[]
Apache License 2.0
1,772
[ "src/cloudant/client.py", "src/cloudant/_common_util.py", "CHANGES.rst", "src/cloudant/__init__.py" ]
[ "src/cloudant/client.py", "src/cloudant/_common_util.py", "CHANGES.rst", "src/cloudant/__init__.py" ]
pynamodb__PynamoDB-376
3adf6063bf7d627638e52733d23be7a8867acc6b
2017-10-16 21:25:32
1828bda52376a4b0313146b64ffb447e5392f467
diff --git a/docs/conditional.rst b/docs/conditional.rst index 12521a7..bcac16c 100644 --- a/docs/conditional.rst +++ b/docs/conditional.rst @@ -1,3 +1,5 @@ +.. _conditional_operations: + Conditional Operations ====================== diff --git a/docs/local.rst b/docs/local.rst index 2349727..982d09e 100644 --- a/docs/local.rst +++ b/docs/local.rst @@ -15,9 +15,9 @@ that your server is listening on. .. note:: If you are using DynamoDB Local and also use ``rate_limited_scan`` on your models, you must also - set ``allow_rate_limited_scan_without_consumed_capacity`` to ``True`` in the - `settings file <settings.rst#allow_rate_limited_scan_without_consumed_capacity>`_ (dynalite does not require - this step because it implements returning of consumed capacity in responses, which is used by ``rate_limited_scan``). + set ``allow_rate_limited_scan_without_consumed_capacity`` to ``True`` in :ref:`settings` + (dynalite does not require this step because it implements returning of consumed capacity in + responses, which is used by ``rate_limited_scan``). .. note:: diff --git a/docs/quickstart.rst b/docs/quickstart.rst index 3d46d71..1405c46 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -32,7 +32,7 @@ PynamoDB allows you to create the table: >>> UserModel.create_table(read_capacity_units=1, write_capacity_units=1) -Now you can create a user: +Now you can create a user in local memory: >>> user = UserModel('[email protected]', first_name='Samuel', last_name='Adams') dynamodb-user<[email protected]> @@ -62,6 +62,49 @@ Ready to delete the user? >>> user.delete() +Changing items +^^^^^^^^^^^^^^ + +Changing existing items in the database can be done using either +`update()` or `save()`. There are important differences between the +two. + +Use of `save()` looks like this:: + + user = UserModel.get('[email protected]') + user.first_name = 'Robert' + user.save() + +Use of `update()` (in its simplest form) looks like this:: + + user = UserModel.get('[email protected]') + user.update( + actions=[ + UserModel.first_name.set('Robert') + ] + ) + +`save()` will entirely replace an object (it internally uses `PutItem +<http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html>`_). As +a consequence, even if you modify only one attribute prior to calling +`save()`, the entire object is re-written. Any modifications done to +the same user by other processes will be lost, even if made to other +attributues that you did not change. To avoid this, use `update()` to +perform more fine grained updates or see the +:ref:`conditional_operations` for how to avoid race conditions +entirely. + +Additionally, PynamoDB ignores attributes it does not know about when +reading an object from the database. As a result, if the item in +DynamoDB contains attributes not declared in your model, `save()` will +cause those attributes to be deleted. + +In particular, performing a rolling upgrade of your application after +having added an attribute is an example of such a situation. To avoid +data loss, either avoid using `save()` or perform a multi-step update +with the first step is to upgrade to a version that merely declares +the attribute on the model without ever setting it to any value. + Querying ^^^^^^^^ diff --git a/docs/settings.rst b/docs/settings.rst index f61e00a..3b9288a 100644 --- a/docs/settings.rst +++ b/docs/settings.rst @@ -1,3 +1,5 @@ +.. _settings: + Settings ======== @@ -59,7 +61,9 @@ Default: ``False`` If ``True``, ``rate_limited_scan()`` will proceed silently (without rate limiting) if the DynamoDB server does not return consumed -capacity information in responses. +capacity information in responses. If ``False``, scans will fail +should the server not return consumed capacity information in an +effort to prevent unintentional capacity usage.. Overriding settings ~~~~~~~~~~~~~~~~~~~ diff --git a/docs/tutorial.rst b/docs/tutorial.rst index dd03776..6b2db70 100644 --- a/docs/tutorial.rst +++ b/docs/tutorial.rst @@ -102,7 +102,7 @@ Here is an example that specifies both the ``host`` and the ``region`` to use: table_name = 'Thread' # Specifies the region region = 'us-west-1' - # Specifies the hostname + # Optional: Specify the hostname only if it needs to be changed from the default AWS setting host = 'http://localhost' # Specifies the write capacity write_capacity_units = 10 diff --git a/pynamodb/connection/base.py b/pynamodb/connection/base.py index ae163f5..ac92ae0 100644 --- a/pynamodb/connection/base.py +++ b/pynamodb/connection/base.py @@ -420,8 +420,8 @@ class Connection(object): for attr in six.itervalues(data[LAST_EVALUATED_KEY]): _convert_binary(attr) if UNPROCESSED_KEYS in data: - for item_list in six.itervalues(data[UNPROCESSED_KEYS]): - for item in item_list: + for table_data in six.itervalues(data[UNPROCESSED_KEYS]): + for item in table_data[KEYS]: for attr in six.itervalues(item): _convert_binary(attr) if UNPROCESSED_ITEMS in data: diff --git a/pynamodb/models.py b/pynamodb/models.py index 92dda13..af4b42b 100644 --- a/pynamodb/models.py +++ b/pynamodb/models.py @@ -61,9 +61,14 @@ class BatchWrite(ModelContextManager): """ def save(self, put_item): """ - This adds `put_item` to the list of pending writes to be performed. - Additionally, the a BatchWriteItem will be performed if the length of items - reaches 25. + This adds `put_item` to the list of pending operations to be performed. + + If the list currently contains 25 items, which is the DynamoDB imposed + limit on a BatchWriteItem call, one of two things will happen. If auto_commit + is True, a BatchWriteItem operation will be sent with the already pending + writes after which put_item is appended to the (now empty) list. If auto_commit + is False, ValueError is raised to indicate additional items cannot be accepted + due to the DynamoDB imposed limit. :param put_item: Should be an instance of a `Model` to be written """ @@ -76,8 +81,14 @@ class BatchWrite(ModelContextManager): def delete(self, del_item): """ - This adds `del_item` to the list of pending deletes to be performed. - If the list of items reaches 25, a BatchWriteItem will be called. + This adds `del_item` to the list of pending operations to be performed. + + If the list currently contains 25 items, which is the DynamoDB imposed + limit on a BatchWriteItem call, one of two things will happen. If auto_commit + is True, a BatchWriteItem operation will be sent with the already pending + operations after which put_item is appended to the (now empty) list. If auto_commit + is False, ValueError is raised to indicate additional items cannot be accepted + due to the DynamoDB imposed limit. :param del_item: Should be an instance of a `Model` to be deleted """ @@ -294,9 +305,13 @@ class Model(AttributeContainer): @classmethod def batch_write(cls, auto_commit=True): """ - Returns a context manager for a batch operation' + Returns a BatchWrite context manager for a batch operation. - :param auto_commit: Commits writes automatically if `True` + :param auto_commit: If true, the context manager will commit writes incrementally + as items are written to as necessary to honor item count limits + in the DynamoDB API (see BatchWrite). Regardless of the value + passed here, changes automatically commit on context exit + (whether successful or not). """ return BatchWrite(cls, auto_commit=auto_commit) @@ -455,7 +470,8 @@ class Model(AttributeContainer): def get(cls, hash_key, range_key=None, - consistent_read=False): + consistent_read=False, + attributes_to_get=None): """ Returns a single object using the provided keys @@ -466,7 +482,8 @@ class Model(AttributeContainer): data = cls._get_connection().get_item( hash_key, range_key=range_key, - consistent_read=consistent_read + consistent_read=consistent_read, + attributes_to_get=attributes_to_get ) if data: item_data = data.get(ITEM)
Error while _handle_binary_attributes when UNPROCESSED_KEYS in batch_get request Hi, I encounter some problems whenever I exceed the provisionned throughput (if it does not exceed everything works well). Instead of retrying to request unprocessed items, it raises an error while trying to handle binary attributes. This occurs when trying to process batch_get requests. ``` /Users/leonardbinet/anaconda/envs/api_transilien/lib/python3.6/site-packages/pynamodb/connection/base.py in dispatch(self, operation_name, operation_kwargs) 262 self._log_debug(operation_name, operation_kwargs) 263 --> 264 data = self._make_api_call(operation_name, operation_kwargs) 265 266 if data and CONSUMED_CAPACITY in data: /Users/leonardbinet/anaconda/envs/api_transilien/lib/python3.6/site-packages/pynamod/connection/base.py in _make_api_call(self, operation_name, operation_kwargs) 355 continue 356 --> 357 return self._handle_binary_attributes(data) 358 359 @staticmethod /Users/leonardbinet/anaconda/envs/api_transilien/lib/python3.6/site-packages/pynamodb/connection/base.py in _handle_binary_attributes(data) 378 for item_list in six.itervalues(data[UNPROCESSED_KEYS]): 379 for item in item_list: --> 380 for attr in six.itervalues(item): 381 _convert_binary(attr) 382 if UNPROCESSED_ITEMS in data: /Users/leonardbinet/anaconda/envs/api_transilien/lib/python3.6/site-packages/six.py in itervalues(d, **kw) 576 577 def itervalues(d, **kw): --> 578 return iter(d.values(**kw)) 579 580 def iteritems(d, **kw): AttributeError: 'str' object has no attribute 'values' ``` Here is more detail about objects when investigating with ipython: ``` ipdb> d 'Keys' ipdb> item 'Keys' ipdb> item_list.keys() dict_keys(['Keys']) ipdb> item_list {'Keys': [{'station_id': {'S': '8739322'}, 'day_train_num': {'S': '20170401_496702'}}, {'station_id': {'S':'8738149'}, 'day_train_num': {'S': '20170401_497601'}}, ... ... {'station_id': {'S': '8727608'}, 'day_train_num': {'S': '20170401_496538'}}, {'station_id': {'S': '8738237'}, 'day_train_num': {'S': '20170401_496704'}} ]} ipdb> data.keys() dict_keys(['ConsumedCapacity', 'Responses', 'UnprocessedKeys']) ipdb> data["UnprocessedKeys"] {'real_departures_2': {'Keys': [{'station_id': {'S': '8739322'}, 'day_train_num': {'S': '20170401_496702'}}, {'station_id': {'S': '8738149'}, 'day_train_num': {'S': '20170401_497601'}}, {'station_id': {'S': '8711386'}, 'day_train_num': {'S': '20170401_499001'}}, {'station_id': {'S': '8738237'}, 'day_train_num': {'S': '20170401_496703'}}, .... ``` Obviously the problem is that item_list variable is not a list, but a dictionary containing the item_list as value under key "Keys": `{"Keys": item_list}` And that initially: `data["UnprocessedKeys"]`is structured as follow: `{"table_name":{"Keys": item_list}}` instead of `{"Keys": item_list}` I think that we only need to add a line in code (line 377 in pynamodb/connection/base.py): Instead of: ``` if UNPROCESSED_KEYS in data: for item_list in six.itervalues(data[UNPROCESSED_KEYS]): for item in item_list: for attr in six.itervalues(item): _convert_binary(attr) ``` insert another loop: ``` if UNPROCESSED_KEYS in data: for table_unprocessed_keys in six.itervalues(data[UNPROCESSED_KEYS]): for item_list in six.itervalues(table_unprocessed_keys): for item in item_list: for attr in six.itervalues(item): _convert_binary(attr) ```
pynamodb/PynamoDB
diff --git a/pynamodb/tests/test_base_connection.py b/pynamodb/tests/test_base_connection.py index 5d36fad..4095d6c 100644 --- a/pynamodb/tests/test_base_connection.py +++ b/pynamodb/tests/test_base_connection.py @@ -2586,6 +2586,44 @@ class ConnectionTestCase(TestCase): _assert=True ) + def test_handle_binary_attributes_for_unprocessed_keys(self): + binary_blob = six.b('\x00\xFF\x00\xFF') + unprocessed_keys = { + 'UnprocessedKeys': { + 'MyTable': { + 'AttributesToGet': ['ForumName'], + 'Keys': [ + { + 'ForumName': {'S': 'FooForum'}, + 'Subject': {'B': base64.b64encode(binary_blob).decode(DEFAULT_ENCODING)} + }, + { + 'ForumName': {'S': 'FooForum'}, + 'Subject': {'S': 'thread-1'} + } + ], + 'ConsistentRead': False + }, + 'MyOtherTable': { + 'AttributesToGet': ['ForumName'], + 'Keys': [ + { + 'ForumName': {'S': 'FooForum'}, + 'Subject': {'B': base64.b64encode(binary_blob).decode(DEFAULT_ENCODING)} + }, + { + 'ForumName': {'S': 'FooForum'}, + 'Subject': {'S': 'thread-1'} + } + ], + 'ConsistentRead': False + } + } + } + data = Connection._handle_binary_attributes(unprocessed_keys) + self.assertEqual(data['UnprocessedKeys']['MyTable']['Keys'][0]['Subject']['B'], binary_blob) + self.assertEqual(data['UnprocessedKeys']['MyOtherTable']['Keys'][0]['Subject']['B'], binary_blob) + def test_get_expected_map(self): conn = Connection(self.region) with patch(PATCH_METHOD) as req:
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 7 }
3.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-mock" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 botocore==1.2.0 certifi==2021.5.30 coverage==6.2 docutils==0.18.1 importlib-metadata==4.8.3 iniconfig==1.1.1 jmespath==0.7.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 -e git+https://github.com/pynamodb/PynamoDB.git@3adf6063bf7d627638e52733d23be7a8867acc6b#egg=pynamodb pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-mock==3.6.1 python-dateutil==2.9.0.post0 six==1.9.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: PynamoDB channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - botocore==1.2.0 - coverage==6.2 - docutils==0.18.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jmespath==0.7.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - python-dateutil==2.9.0.post0 - six==1.9.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/PynamoDB
[ "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_handle_binary_attributes_for_unprocessed_keys" ]
[ "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_create_connection" ]
[ "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_batch_get_item", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_batch_write_item", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_create_prepared_request", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_create_table", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_delete_item", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_delete_table", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_describe_table", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_get_expected_map", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_get_item", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_get_query_filter_map", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_handle_binary_attributes_for_unprocessed_items", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_list_tables", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_make_api_call_retries_properly", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_make_api_call_throws_retry_disabled", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_make_api_call_throws_verbose_error_after_backoff", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_make_api_call_throws_verbose_error_after_backoff_later_succeeds", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_make_api_call_throws_when_retries_exhausted", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_put_item", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_query", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_rate_limited_scan", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_rate_limited_scan_retries_max_sleep", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_rate_limited_scan_retries_min_sleep", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_rate_limited_scan_retries_on_rate_unavailable", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_rate_limited_scan_retries_on_rate_unavailable_within_s", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_rate_limited_scan_retries_timeout", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_ratelimited_scan_exception_on_max_threshold", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_ratelimited_scan_raises_non_client_error", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_ratelimited_scan_raises_other_client_errors", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_ratelimited_scan_retries_on_throttling", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_ratelimited_scan_with_pagination_ends", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_scan", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_subsequent_client_is_cached_when_credentials_truthy", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_subsequent_client_is_not_cached_when_credentials_none", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_update_item", "pynamodb/tests/test_base_connection.py::ConnectionTestCase::test_update_table" ]
[]
MIT License
1,773
[ "docs/quickstart.rst", "docs/settings.rst", "pynamodb/models.py", "docs/local.rst", "docs/tutorial.rst", "docs/conditional.rst", "pynamodb/connection/base.py" ]
[ "docs/quickstart.rst", "docs/settings.rst", "pynamodb/models.py", "docs/local.rst", "docs/tutorial.rst", "docs/conditional.rst", "pynamodb/connection/base.py" ]
adamchainz__ec2-metadata-32
d712c51c7474f392ddac09750d85c62987e626c2
2017-10-17 07:20:43
81fb01c262381462dc1929311ff632b184879d27
diff --git a/HISTORY.rst b/HISTORY.rst index 2f30547..1d3dfdf 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -8,6 +8,10 @@ Pending Release .. Insert new release notes below this line +* All methods can now raise ``requests.exceptions.HTTPError`` if the metadata + API returns a bad response, rather than failing during parsing or silently + returning data from non-200 responses. + 1.2.1 (2017-08-31) ------------------ diff --git a/ec2_metadata.py b/ec2_metadata.py index e087b14..ce62284 100644 --- a/ec2_metadata.py +++ b/ec2_metadata.py @@ -29,59 +29,65 @@ class BaseLazyObject(object): class EC2Metadata(BaseLazyObject): + def _get_url(self, url, raise_for_status=True): + resp = requests.get(url) + if raise_for_status: + resp.raise_for_status() + return resp + @property def account_id(self): return self.instance_identity_document['accountId'] @cached_property def ami_id(self): - return requests.get(METADATA_URL + 'ami-id').text + return self._get_url(METADATA_URL + 'ami-id').text @cached_property def availability_zone(self): - return requests.get(METADATA_URL + 'placement/availability-zone').text + return self._get_url(METADATA_URL + 'placement/availability-zone').text @cached_property def ami_launch_index(self): - return int(requests.get(METADATA_URL + 'ami-launch-index').text) + return int(self._get_url(METADATA_URL + 'ami-launch-index').text) @cached_property def ami_manifest_path(self): - return requests.get(METADATA_URL + 'ami-manifest-path').text + return self._get_url(METADATA_URL + 'ami-manifest-path').text @cached_property def instance_id(self): - return requests.get(METADATA_URL + 'instance-id').text + return self._get_url(METADATA_URL + 'instance-id').text @cached_property def instance_identity_document(self): - return requests.get(DYNAMIC_URL + 'instance-identity/document').json() + return self._get_url(DYNAMIC_URL + 'instance-identity/document').json() @cached_property def instance_type(self): - return requests.get(METADATA_URL + 'instance-type').text + return self._get_url(METADATA_URL + 'instance-type').text @cached_property def mac(self): - return requests.get(METADATA_URL + 'mac').text + return self._get_url(METADATA_URL + 'mac').text @cached_property def network_interfaces(self): - macs_text = requests.get(METADATA_URL + 'network/interfaces/macs/').text + macs_text = self._get_url(METADATA_URL + 'network/interfaces/macs/').text macs = [line.rstrip('/') for line in macs_text.splitlines()] - return {mac: NetworkInterface(mac) for mac in macs} + return {mac: NetworkInterface(mac, self) for mac in macs} @cached_property def private_hostname(self): - return requests.get(METADATA_URL + 'local-hostname').text + return self._get_url(METADATA_URL + 'local-hostname').text @cached_property def private_ipv4(self): - return requests.get(METADATA_URL + 'local-ipv4').text + return self._get_url(METADATA_URL + 'local-ipv4').text @cached_property def public_hostname(self): - resp = requests.get(METADATA_URL + 'public-hostname') + resp = self._get_url(METADATA_URL + 'public-hostname', raise_for_status=False) if resp.status_code == 404: return None else: @@ -89,7 +95,7 @@ class EC2Metadata(BaseLazyObject): @cached_property def public_ipv4(self): - resp = requests.get(METADATA_URL + 'public-ipv4') + resp = self._get_url(METADATA_URL + 'public-ipv4', raise_for_status=False) if resp.status_code == 404: return None else: @@ -101,15 +107,15 @@ class EC2Metadata(BaseLazyObject): @cached_property def reservation_id(self): - return requests.get(METADATA_URL + 'reservation-id').text + return self._get_url(METADATA_URL + 'reservation-id').text @cached_property def security_groups(self): - return requests.get(METADATA_URL + 'security-groups').text.splitlines() + return self._get_url(METADATA_URL + 'security-groups').text.splitlines() @cached_property def user_data(self): - resp = requests.get(USERDATA_URL) + resp = self._get_url(USERDATA_URL, raise_for_status=False) if resp.status_code == 404: return None else: @@ -118,14 +124,22 @@ class EC2Metadata(BaseLazyObject): class NetworkInterface(BaseLazyObject): - def __init__(self, mac): + def __init__(self, mac, parent=None): self.mac = mac + if parent is None: + self.parent = ec2_metadata + else: + self.parent = parent def __repr__(self): return 'NetworkInterface({mac})'.format(mac=repr(self.mac)) def __eq__(self, other): - return isinstance(other, NetworkInterface) and self.mac == other.mac + return ( + isinstance(other, NetworkInterface) and + self.mac == other.mac and + self.parent == other.parent + ) def _url(self, item): return '{base}network/interfaces/macs/{mac}/{item}'.format( @@ -136,14 +150,14 @@ class NetworkInterface(BaseLazyObject): @cached_property def device_number(self): - return int(requests.get(self._url('device-number')).text) + return int(self.parent._get_url(self._url('device-number')).text) @cached_property def ipv4_associations(self): associations = {} for public_ip in self.public_ipv4s: - resp = requests.get(self._url('ipv4-associations/{}'.format(public_ip))) - resp.raise_for_status() + url = self._url('ipv4-associations/{}'.format(public_ip)) + resp = self.parent._get_url(url) private_ips = resp.text.splitlines() associations[public_ip] = private_ips return associations @@ -156,19 +170,19 @@ class NetworkInterface(BaseLazyObject): @cached_property def owner_id(self): - return requests.get(self._url('owner-id')).text + return self.parent._get_url(self._url('owner-id')).text @cached_property def private_hostname(self): - return requests.get(self._url('local-hostname')).text + return self.parent._get_url(self._url('local-hostname')).text @cached_property def private_ipv4s(self): - return requests.get(self._url('local-ipv4s')).text.splitlines() + return self.parent._get_url(self._url('local-ipv4s')).text.splitlines() @cached_property def public_hostname(self): - resp = requests.get(self._url('public-hostname')) + resp = self.parent._get_url(self._url('public-hostname'), raise_for_status=False) if resp.status_code == 404: return None else: @@ -176,7 +190,7 @@ class NetworkInterface(BaseLazyObject): @cached_property def public_ipv4s(self): - resp = requests.get(self._url('public-ipv4s')) + resp = self.parent._get_url(self._url('public-ipv4s'), raise_for_status=False) if resp.status_code == 404: return [] else: @@ -184,19 +198,19 @@ class NetworkInterface(BaseLazyObject): @cached_property def security_groups(self): - return requests.get(self._url('security-groups')).text.splitlines() + return self.parent._get_url(self._url('security-groups')).text.splitlines() @cached_property def security_group_ids(self): - return requests.get(self._url('security-group-ids')).text.splitlines() + return self.parent._get_url(self._url('security-group-ids')).text.splitlines() @cached_property def subnet_id(self): - return requests.get(self._url('subnet-id')).text + return self.parent._get_url(self._url('subnet-id')).text @cached_property def subnet_ipv4_cidr_block(self): - resp = requests.get(self._url('subnet-ipv4-cidr-block')) + resp = self.parent._get_url(self._url('subnet-ipv4-cidr-block'), raise_for_status=False) if resp.status_code == 404: return None else: @@ -210,11 +224,11 @@ class NetworkInterface(BaseLazyObject): @cached_property def vpc_id(self): - return requests.get(self._url('vpc-id')).text + return self.parent._get_url(self._url('vpc-id')).text @cached_property def vpc_ipv4_cidr_block(self): - resp = requests.get(self._url('vpc-ipv4-cidr-block')) + resp = self.parent._get_url(self._url('vpc-ipv4-cidr-block'), raise_for_status=False) if resp.status_code == 404: return None else: @@ -222,7 +236,7 @@ class NetworkInterface(BaseLazyObject): @cached_property def vpc_ipv4_cidr_blocks(self): - resp = requests.get(self._url('vpc-ipv4-cidr-blocks')) + resp = self.parent._get_url(self._url('vpc-ipv4-cidr-blocks'), raise_for_status=False) if resp.status_code == 404: return [] else:
Filter out non-200 responses Currently we don't filter out error responses from the metadata API, which means it can return data from error responses. There should be a `resp.raise_for_status()` in there, or even filter for exactly `resp.status_code == 200`
adamchainz/ec2-metadata
diff --git a/test_ec2_metadata.py b/test_ec2_metadata.py index 5bd65a5..73707ee 100644 --- a/test_ec2_metadata.py +++ b/test_ec2_metadata.py @@ -4,6 +4,7 @@ from __future__ import absolute_import, division, print_function, unicode_litera import json import pytest +import requests import responses from ec2_metadata import DYNAMIC_URL, METADATA_URL, USERDATA_URL, NetworkInterface, ec2_metadata @@ -61,6 +62,12 @@ def test_account_id(resps): assert ec2_metadata.account_id == '1234' +def test_account_id_error(resps): + add_response(resps, DYNAMIC_URL + 'instance-identity/document', status=500) + with pytest.raises(requests.exceptions.HTTPError): + ec2_metadata.account_id + + def test_ami_id(resps): add_response(resps, 'ami-id', 'ami-12345678') assert ec2_metadata.ami_id == 'ami-12345678' @@ -121,7 +128,7 @@ def test_mac(resps): def test_network_interfaces(resps): add_response(resps, 'network/interfaces/macs/', example_mac + '/') - assert ec2_metadata.network_interfaces == {example_mac: NetworkInterface(example_mac)} + assert ec2_metadata.network_interfaces == {example_mac: NetworkInterface(example_mac, ec2_metadata)} def test_private_hostname(resps):
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 2 }
1.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "pip install tox" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 cached-property==1.3.1 certifi==2017.7.27.1 chardet==3.0.4 configparser==3.5.0 cookies==2.2.1 coverage==4.4.1 distlib==0.3.9 -e git+https://github.com/adamchainz/ec2-metadata.git@d712c51c7474f392ddac09750d85c62987e626c2#egg=ec2_metadata enum34==1.1.6 execnet==1.9.0 filelock==3.4.1 flake8==3.4.1 funcsigs==1.0.2 idna==2.6 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 mccabe==0.6.1 mock==2.0.0 modernize==0.5 multilint==2.2.0 packaging==21.3 pbr==3.1.1 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.3.1 pyflakes==1.5.0 pyparsing==3.1.4 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==2.5.1 pytest-mock==3.6.1 pytest-xdist==3.0.2 requests==2.18.4 responses==0.8.1 six==1.17.0 toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 urllib3==1.22 virtualenv==20.17.1 zipp==3.6.0
name: ec2-metadata channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - cached-property==1.3.1 - certifi==2017.7.27.1 - chardet==3.0.4 - configparser==3.5.0 - cookies==2.2.1 - coverage==4.4.1 - distlib==0.3.9 - enum34==1.1.6 - execnet==1.9.0 - filelock==3.4.1 - flake8==3.4.1 - funcsigs==1.0.2 - idna==2.6 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - mccabe==0.6.1 - mock==2.0.0 - modernize==0.5 - multilint==2.2.0 - packaging==21.3 - pbr==3.1.1 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.3.1 - pyflakes==1.5.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==2.5.1 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - requests==2.18.4 - responses==0.8.1 - six==1.17.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - urllib3==1.22 - virtualenv==20.17.1 - zipp==3.6.0 prefix: /opt/conda/envs/ec2-metadata
[ "test_ec2_metadata.py::test_account_id_error", "test_ec2_metadata.py::test_network_interfaces" ]
[]
[ "test_ec2_metadata.py::test_account_id", "test_ec2_metadata.py::test_ami_id", "test_ec2_metadata.py::test_ami_id_cached", "test_ec2_metadata.py::test_ami_id_cached_cleared", "test_ec2_metadata.py::test_ami_launch_index", "test_ec2_metadata.py::test_ami_manifest_path", "test_ec2_metadata.py::test_availability_zone", "test_ec2_metadata.py::test_instance_id", "test_ec2_metadata.py::test_instance_identity", "test_ec2_metadata.py::test_instance_type", "test_ec2_metadata.py::test_mac", "test_ec2_metadata.py::test_private_hostname", "test_ec2_metadata.py::test_private_ipv4", "test_ec2_metadata.py::test_public_hostname", "test_ec2_metadata.py::test_public_hostname_none", "test_ec2_metadata.py::test_public_ipv4", "test_ec2_metadata.py::test_public_ipv4_none", "test_ec2_metadata.py::test_region", "test_ec2_metadata.py::test_reservation_id", "test_ec2_metadata.py::test_security_groups_single", "test_ec2_metadata.py::test_security_groups_two", "test_ec2_metadata.py::test_security_groups_emptystring", "test_ec2_metadata.py::test_user_data_none", "test_ec2_metadata.py::test_user_data_something", "test_ec2_metadata.py::test_network_interface_equal", "test_ec2_metadata.py::test_network_interface_not_equal", "test_ec2_metadata.py::test_network_interface_not_equal_class", "test_ec2_metadata.py::test_network_interface_repr", "test_ec2_metadata.py::test_network_interface_device_number", "test_ec2_metadata.py::test_network_interface_ipv4_associations", "test_ec2_metadata.py::test_network_interface_owner_id", "test_ec2_metadata.py::test_network_interface_private_hostname", "test_ec2_metadata.py::test_network_interface_private_ipv4s", "test_ec2_metadata.py::test_network_interface_public_hostname", "test_ec2_metadata.py::test_network_interface_public_hostname_none", "test_ec2_metadata.py::test_network_interface_public_ipv4s", "test_ec2_metadata.py::test_network_interface_public_ipv4s_empty", "test_ec2_metadata.py::test_network_interface_security_groups", "test_ec2_metadata.py::test_network_interface_security_group_ids", "test_ec2_metadata.py::test_network_interface_subnet_id", "test_ec2_metadata.py::test_network_interface_subnet_ipv4_cidr_block", "test_ec2_metadata.py::test_network_interface_subnet_ipv4_cidr_block_none", "test_ec2_metadata.py::test_network_interface_vpc_id", "test_ec2_metadata.py::test_network_interface_vpc_ipv4_cidr_block", "test_ec2_metadata.py::test_network_interface_vpc_ipv4_cidr_block_none", "test_ec2_metadata.py::test_network_interface_vpc_ipv4_cidr_blocks", "test_ec2_metadata.py::test_network_interface_vpc_ipv4_cidr_blocks_none" ]
[]
MIT License
1,774
[ "HISTORY.rst", "ec2_metadata.py" ]
[ "HISTORY.rst", "ec2_metadata.py" ]
kopf__httsleep-12
969f30c9a77519e05434e1c46dd486c066c1002a
2017-10-17 14:08:35
1a5e76d39488876abe06c7f359bdf84e1e5433e5
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f6a9e74..a162d14 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,11 @@ httsleep Changelog ================== +Version NEXT +------------- +* The kwarg ``verify`` is now supported, allowing users of httsleep to specify + ``verify=False`` in the same way as when directly using the ``requests`` library. + Version 0.2.0 ------------- * The shorthand kwargs (``status_code``, ``json``, ``jsonpath``, ``text``, ``callback``) diff --git a/CONTRIBUTORS b/CONTRIBUTORS index d7dbbc4..cbc5a7d 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -1,2 +1,3 @@ Aengus Walton <[email protected]> Patrick Mühlbauer <[email protected]> +Oliver Lockwood <[email protected]> diff --git a/httsleep/main.py b/httsleep/main.py index fd607f1..649d5ed 100644 --- a/httsleep/main.py +++ b/httsleep/main.py @@ -34,6 +34,9 @@ class HttSleeper(object): function that takes the response as an argument returning True. :param auth: a (username, password) tuple for HTTP authentication. :param headers: a dict of HTTP headers. + :param verify: Either a boolean, in which case it controls whether we verify the server's + TLS certificate, or a string, in which case it must be a path to a CA + bundle to use. Defaults to ``True``. :param polling_interval: how many seconds to sleep between requests. :param max_retries: the maximum number of retries to make, after which a StopIteration exception is raised. @@ -46,7 +49,7 @@ class HttSleeper(object): """ def __init__(self, url_or_request, until=None, alarms=None, status_code=None, json=None, jsonpath=None, text=None, callback=None, - auth=None, headers=None, + auth=None, headers=None, verify=True, polling_interval=DEFAULT_POLLING_INTERVAL, max_retries=DEFAULT_MAX_RETRIES, ignore_exceptions=None, @@ -84,6 +87,7 @@ class HttSleeper(object): 'jsonpath': jsonpath, 'text': text, 'callback': callback} until.append({k: v for k, v in condition.items() if v}) + self.verify = verify self.until = until self.alarms = alarms self.polling_interval = int(polling_interval) @@ -145,7 +149,7 @@ class HttSleeper(object): """ while True: try: - response = self.session.send(self.request.prepare()) + response = self.session.send(self.request.prepare(), verify=self.verify) for condition in self.alarms: if self.meets_condition(response, condition): raise Alarm(response, condition) @@ -192,9 +196,9 @@ class HttSleeper(object): return True -def httsleep(url_or_request, until=None, alarms=None, - status_code=None, json=None, jsonpath=None, text=None, callback=None, - auth=None, headers=None, +def httsleep(url_or_request, until=None, alarms=None, status_code=None, + json=None, jsonpath=None, text=None, callback=None, + auth=None, headers=None, verify=True, polling_interval=DEFAULT_POLLING_INTERVAL, max_retries=DEFAULT_MAX_RETRIES, ignore_exceptions=None, @@ -207,7 +211,9 @@ def httsleep(url_or_request, until=None, alarms=None, return HttSleeper( url_or_request, until=until, alarms=alarms, status_code=status_code, json=json, jsonpath=jsonpath, text=text, callback=callback, - auth=auth, headers=headers, polling_interval=polling_interval, - max_retries=max_retries, ignore_exceptions=ignore_exceptions, + auth=auth, headers=headers, verify=verify, + polling_interval=polling_interval, + max_retries=max_retries, + ignore_exceptions=ignore_exceptions, loglevel=loglevel ).run()
Support `verify=False` usage as per requests library It's not currently possible to use `httsleep` and ignore certificate errors, as you would with `requests.get(..., verify=False)`. This looks fairly simple to add so I'll try to follow this with a PR and hopefully we can improve things.
kopf/httsleep
diff --git a/tests/test_init.py b/tests/test_init.py index 513fe3f..d367d77 100644 --- a/tests/test_init.py +++ b/tests/test_init.py @@ -35,6 +35,16 @@ def test_headers(): assert obj.request.headers == headers +def test_default_verify(): + obj = HttSleeper(URL, CONDITION) + assert obj.verify == True + + +def test_verify(): + obj = HttSleeper(URL, CONDITION, verify=False) + assert obj.verify == False + + def test_ignore_exceptions_default_value(): obj = HttSleeper(URL, CONDITION) assert obj.ignore_exceptions == tuple() diff --git a/tests/test_run.py b/tests/test_run.py index 7a708d1..24f59ff 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -5,6 +5,7 @@ from jsonpath_rw.jsonpath import Fields import mock import pytest from requests.exceptions import ConnectionError +from requests import Response from httsleep.main import HttSleeper, Alarm, DEFAULT_POLLING_INTERVAL @@ -22,6 +23,36 @@ def test_run_success(): assert not mock_sleep.called [email protected] +def test_propagate_verify(): + """Should tell requests to skip SSL verification if verify==False""" + resp = Response() + resp.status_code = 200 + httsleep = HttSleeper(URL, {'status_code': 200}, verify=False) + with mock.patch('requests.sessions.Session.send') as mock_session_send: + mock_session_send.return_value = resp + httsleep.run() + assert mock_session_send.called + args, kwargs = mock_session_send.call_args + assert 'verify' in kwargs + assert kwargs['verify'] == False + + [email protected] +def test_default_sends_verify_true(): + """Should not send a value for 'verify' to requests by default""" + resp = Response() + resp.status_code = 200 + httsleep = HttSleeper(URL, {'status_code': 200}) + with mock.patch('requests.sessions.Session.send') as mock_session_send: + mock_session_send.return_value = resp + httsleep.run() + assert mock_session_send.called + args, kwargs = mock_session_send.call_args + assert 'verify' in kwargs + assert kwargs['verify'] == True + + @httpretty.activate def test_run_alarm(): """Should raise an Alarm when a failure criteria has been reached"""
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 3 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "httpretty", "mock" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 charset-normalizer==3.4.1 decorator==5.2.1 exceptiongroup==1.2.2 httpretty==1.1.4 -e git+https://github.com/kopf/httsleep.git@969f30c9a77519e05434e1c46dd486c066c1002a#egg=httsleep idna==3.10 iniconfig==2.1.0 jsonpath-rw==1.4.0 mock==5.2.0 packaging==24.2 pluggy==1.5.0 ply==3.11 pytest==8.3.5 requests==2.32.3 six==1.17.0 tomli==2.2.1 urllib3==2.3.0
name: httsleep channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - decorator==5.2.1 - exceptiongroup==1.2.2 - httpretty==1.1.4 - idna==3.10 - iniconfig==2.1.0 - jsonpath-rw==1.4.0 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - ply==3.11 - pytest==8.3.5 - requests==2.32.3 - six==1.17.0 - tomli==2.2.1 - urllib3==2.3.0 prefix: /opt/conda/envs/httsleep
[ "tests/test_init.py::test_default_verify", "tests/test_init.py::test_verify", "tests/test_run.py::test_propagate_verify", "tests/test_run.py::test_default_sends_verify_true" ]
[ "tests/test_run.py::test_run_sleep_default_interval", "tests/test_run.py::test_run_sleep_custom_interval" ]
[ "tests/test_init.py::test_request_built_from_url", "tests/test_init.py::test_url_or_request", "tests/test_init.py::test_auth", "tests/test_init.py::test_headers", "tests/test_init.py::test_ignore_exceptions_default_value", "tests/test_init.py::test_max_retries", "tests/test_init.py::test_until", "tests/test_init.py::test_empty_until", "tests/test_init.py::test_invalid_until", "tests/test_init.py::test_status_code_cast_as_int", "tests/test_init.py::test_alarms", "tests/test_init.py::test_invalid_alarms", "tests/test_init.py::test_status_code_cast_as_int_in_alarm", "tests/test_init.py::test_kwarg_condition", "tests/test_run.py::test_run_success", "tests/test_run.py::test_run_alarm", "tests/test_run.py::test_run_success_alarm", "tests/test_run.py::test_run_retries", "tests/test_run.py::test_run_max_retries", "tests/test_run.py::test_ignore_exceptions", "tests/test_run.py::test_json_condition", "tests/test_run.py::test_text_condition", "tests/test_run.py::test_jsonpath_condition", "tests/test_run.py::test_precompiled_jsonpath_expression", "tests/test_run.py::test_jsonpath_condition_multiple_values", "tests/test_run.py::test_multiple_jsonpath_conditions", "tests/test_run.py::test_callback_condition", "tests/test_run.py::test_multiple_success_conditions", "tests/test_run.py::test_multiple_alarms" ]
[]
Apache License 2.0
1,775
[ "httsleep/main.py", "CHANGELOG.rst", "CONTRIBUTORS" ]
[ "httsleep/main.py", "CHANGELOG.rst", "CONTRIBUTORS" ]
johnthagen__cppcheck-junit-6
62cf03e4eee1ce80cccc1a9557417fb4b7f436f2
2017-10-17 21:53:41
62cf03e4eee1ce80cccc1a9557417fb4b7f436f2
codecov-io: # [Codecov](https://codecov.io/gh/johnthagen/cppcheck-junit/pull/6?src=pr&el=h1) Report > Merging [#6](https://codecov.io/gh/johnthagen/cppcheck-junit/pull/6?src=pr&el=desc) into [master](https://codecov.io/gh/johnthagen/cppcheck-junit/commit/62cf03e4eee1ce80cccc1a9557417fb4b7f436f2?src=pr&el=desc) will **not change** coverage. > The diff coverage is `100%`. [![Impacted file tree graph](https://codecov.io/gh/johnthagen/cppcheck-junit/pull/6/graphs/tree.svg?height=150&token=IW4AuhadSQ&width=650&src=pr)](https://codecov.io/gh/johnthagen/cppcheck-junit/pull/6?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #6 +/- ## ===================================== Coverage 100% 100% ===================================== Files 2 2 Lines 195 197 +2 Branches 13 13 ===================================== + Hits 195 197 +2 ``` | [Impacted Files](https://codecov.io/gh/johnthagen/cppcheck-junit/pull/6?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [cppcheck\_junit.py](https://codecov.io/gh/johnthagen/cppcheck-junit/pull/6?src=pr&el=tree#diff-Y3BwY2hlY2tfanVuaXQucHk=) | `100% <ø> (ø)` | :arrow_up: | | [test.py](https://codecov.io/gh/johnthagen/cppcheck-junit/pull/6?src=pr&el=tree#diff-dGVzdC5weQ==) | `100% <100%> (ø)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/johnthagen/cppcheck-junit/pull/6?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/johnthagen/cppcheck-junit/pull/6?src=pr&el=footer). Last update [62cf03e...057f760](https://codecov.io/gh/johnthagen/cppcheck-junit/pull/6?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). bjornpiltz: This fixes #5 for me.
diff --git a/cppcheck_junit.py b/cppcheck_junit.py index 87be9a5..9adfd7e 100755 --- a/cppcheck_junit.py +++ b/cppcheck_junit.py @@ -123,8 +123,9 @@ def generate_test_suite(errors): for file_name, errors in errors.items(): test_case = ElementTree.SubElement(test_suite, 'testcase', - name=os.path.relpath(file_name) if file_name else '', - classname='', + name=os.path.relpath( + file_name) if file_name else 'Cppcheck error', + classname='Cppcheck error', time=str(1)) for error in errors: ElementTree.SubElement(test_case, @@ -153,7 +154,7 @@ def generate_single_success_test_suite(): ElementTree.SubElement(test_suite, 'testcase', name='Cppcheck success', - classname='', + classname='Cppcheck success', time=str(1)) return ElementTree.ElementTree(test_suite)
Failing tests not showing up on bamboo. I have added the following steps to my bamboo plan - `cppcheck ... 2> cppcheck-result.xml` - `cppcheck_junit cppcheck-result.xml cppcheck-junit.xml` - JUnit Parser I can see a passing test on the dashboard if cppcheck reports no errrors. If there are errors, however, I can only see that JUnit parser failed. No further information is available. Here are my logs: ```bash build 16-Oct-2017 10:21:36 Running cppcheck: cppcheck --xml --xml-version=2 --enable=all -- project=compile_commands.json 2> cppcheck-result.xml ``` ### cppcheck-result.xml: ```xml <?xml version="1.0" encoding="UTF-8"?> <results version="2"> <cppcheck version="1.80"/> <errors> <error id="missingInclude" severity="information" msg="Cppcheck cannot find all the include files (use --check-config for details)" verbose="Cppcheck cannot find all the include files. Cppcheck can check the code without the include files found. But the results will probably be more accurate if all the include files are found. Please check your project&apos;s include directories and add all of them as include directories for Cppcheck. To see what files Cppcheck cannot find use --check-config."/> </errors> </results> ``` ``` build 16-Oct-2017 10:21:36 Running cppcheck_junit: cppcheck_junit cppcheck-result.xml cppcheck-junit.xml ``` ### cppcheck-junit.xml: ```xml <?xml version='1.0' encoding='utf-8'?> <testsuite errors="1" failures="0" hostname="XXX" name="Cppcheck errors" tests="1" time="1" timestamp="2017-10-16T10:21:36.514058"> <testcase classname="" name="" time="1"> <error file="" line="0" message="0: (information) Cppcheck cannot find all the include files (use --check-config for details)" type="" /> </testcase> </testsuite> ``` ``` simple 16-Oct-2017 10:21:36 Finished task 'cppcheck' with result: Success simple 16-Oct-2017 10:21:36 Starting task 'JUnit Parser' of type 'com.atlassian.bamboo.plugins.testresultparser:task.testresultparser.junit' simple 16-Oct-2017 10:21:36 Parsing test results under <path> simple 16-Oct-2017 10:21:36 Failing task since 1 failing test cases were found. simple 16-Oct-2017 10:21:36 Finished task 'JUnit Parser' with result: Failed ```
johnthagen/cppcheck-junit
diff --git a/test.py b/test.py index e873733..18c3907 100644 --- a/test.py +++ b/test.py @@ -168,7 +168,8 @@ class GenerateTestSuiteTestCase(unittest.TestCase): self.assertTrue(required_attribute in testsuite_element.attrib.keys()) testcase_element = testsuite_element.find('testcase') - self.assertEqual(testcase_element.get('name'), '') + self.assertEqual(testcase_element.get('name'), 'Cppcheck error') + self.assertEqual(testcase_element.get('classname'), 'Cppcheck error') # Check that test_case is compliant with the spec for required_attribute in self.junit_testcase_attributes: self.assertTrue(required_attribute in testcase_element.attrib.keys()) @@ -207,6 +208,7 @@ class GenerateSingleSuccessTestSuite(unittest.TestCase): testcase_element = testsuite_element.find('testcase') self.assertEqual(testcase_element.get('name'), 'Cppcheck success') + self.assertEqual(testcase_element.get('classname'), 'Cppcheck success') # Check that test_case is compliant with the spec for required_attribute in self.junit_testcase_attributes: self.assertTrue(required_attribute in testcase_element.attrib.keys())
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
1.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 -e git+https://github.com/johnthagen/cppcheck-junit.git@62cf03e4eee1ce80cccc1a9557417fb4b7f436f2#egg=cppcheck_junit exitstatus==2.1.0 importlib-metadata==4.8.3 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 tomli==1.2.3 typing==3.7.4.3 typing_extensions==4.1.1 zipp==3.6.0
name: cppcheck-junit channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - exitstatus==2.1.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - tomli==1.2.3 - typing==3.7.4.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/cppcheck-junit
[ "test.py::GenerateTestSuiteTestCase::test_missing_file", "test.py::GenerateSingleSuccessTestSuite::test" ]
[]
[ "test.py::ParseCppcheckTestCase::test_all", "test.py::ParseCppcheckTestCase::test_bad", "test.py::ParseCppcheckTestCase::test_bad_large", "test.py::ParseCppcheckTestCase::test_file_not_found", "test.py::ParseCppcheckTestCase::test_good", "test.py::ParseCppcheckTestCase::test_malformed", "test.py::ParseCppcheckTestCase::test_missing_include_no_location_element", "test.py::ParseCppcheckTestCase::test_no_location_element", "test.py::ParseCppcheckTestCase::test_xml_version_1", "test.py::GenerateTestSuiteTestCase::test_single", "test.py::ParseArgumentsTestCase::test_no_arguments" ]
[]
MIT License
1,776
[ "cppcheck_junit.py" ]
[ "cppcheck_junit.py" ]
google__mobly-359
b4bd28313361c9ce0de33c591625b675eac507e7
2017-10-18 02:46:22
7e5e62af4ab4537bf619f0ee403c05f004c5baf0
xpconanfan: Review status: 0 of 5 files reviewed at latest revision, 11 unresolved discussions. --- *[mobly/base_instrumentation_test.py, line 25 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwheO6oVK8o23Q3RxyM:-KwheO6oVK8o23Q3RxyN:b-uxhw2t) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L25)):* > ```Python > > > class _InstrumentationStructurePrefixes(object): > ``` pls add docstrings for all classes. For this kind of things, you can provide context on what they map to in Android. It'll be helpful if there's a link to the actual Android doc where these are referred to. --- *[mobly/base_instrumentation_test.py, line 35 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhebfkVMPbEfTMw2cT:-KwhebfkVMPbEfTMw2cU:b-3vsjan) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L35)):* > ```Python > class _InstrumentationKnownStatusKeys(object): > CLASS = "class" > ERROR = "Error" > ``` why is "Error" in different case from the rest? --- *[mobly/base_instrumentation_test.py, line 41 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhegkTcVBPhP8ZorWc:-KwhegkTcVBPhP8ZorWd:bq4fwyn) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L41)):* > ```Python > > > class _InstrumentationStatusCodes(object): > ``` Would using `enum` module be helpful for any of these enum classes? --- *[mobly/base_instrumentation_test.py, line 86 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhetR3v-bREHQ6nurU:-KwhetR3v-bREHQ6nurV:b-b6qkc7) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L86)):* > ```Python > > > class _InstrumentationBlock(object): > ``` need proper docstring for classes. E.g. you may want to mention what this maps to in the instrumentation output. --- *[mobly/base_instrumentation_test.py, line 112 at r1](https://reviewable.io:443/reviews/google/mobly/359#-Kwhf6ZXAbAw6b7fN8Bm:-Kwhf6ZXAbAw6b7fN8Bn:bp8ohav) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L112)):* > ```Python > > def is_empty(self): > return self.empty > ``` if this is meant to be accessed via `is_empty` only, the attribute name should be `_empty`? Also should `is_empty` be a `@property`? --- *[mobly/base_instrumentation_test.py, line 114 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhfJNZn6oripWinVCV:-KwhfJNZn6oripWinVCW:bfjz8ao) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L114)):* > ```Python > return self.empty > > def set_error_message(self, error_message): > ``` need docstring for all methods --- *[mobly/base_instrumentation_test.py, line 300 at r1](https://reviewable.io:443/reviews/google/mobly/359#-Kwhe3t-_JsjXBpoSHL-:-Kwhe3t-_JsjXBpoSHL0:b-rg0non) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L300)):* > ```Python > formatters = [] > # If starting a new block and yet the previous block never completed, error the last block. > if instrumentation_block.previous_instrumentation_block: > ``` use local vars to avoid repeating such long names over and over? These nested `if` statements are difficult to read and maintain. Consider replacing them with funcs like `all(cond1, cond2, cond3)` --- *[mobly/controllers/android_device.py, line 411 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhddgAHjpryixc2pK2:-KwhddgAHjpryixc2pK3:b-hdyhn4) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/controllers/android_device.py#L411)):* > ```Python > """ > > DEFAULT_INSTRUMENTATION_RUNNER = 'com.android.common.support.test.runner.AndroidJUnitRunner' > ``` not used? --- *[mobly/controllers/android_device.py, line 539 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhdF0dgBwKAud_5uyX:-KwhdF0efx6o4HxoBuLz:b-jeaphc) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/controllers/android_device.py#L539)):* > ```Python > * Any other type of USB disconnection, as long as snippet session can be > kept alive while USB disconnected (reboot caused USB disconnection > is > ``` fix line breaks. a line should be as long as possible within 80 char before breaking to the next line. --- *[mobly/controllers/android_device.py, line 1041 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhdkhXXT2dhMBs2qoF:-KwhdkhXXT2dhMBs2qoG:b-36hodg) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/controllers/android_device.py#L1041)):* > ```Python > self.adb.reboot() > > def instrument(self, package, options=None, runner=None): > ``` should this be in `AdbProxy` class instead? --- *[mobly/controllers/android_device.py, line 1044 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhdPutoMA3kMl50aPx:-KwhdPutoMA3kMl50aPy:b-66crbv) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/controllers/android_device.py#L1044)):* > ```Python > """Runs an instrumentation command on the device. > > This is a convenience wrapper to avoid parameter formatting. > ``` seems like there is an extra indentation for all the lines in the body of this doc string. --- *Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/359)* <!-- Sent from Reviewable.io --> winterfroststrom: Review status: 0 of 7 files reviewed at latest revision, 11 unresolved discussions. --- *[mobly/base_instrumentation_test.py, line 35 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhebfkVMPbEfTMw2cT:-KwiC0Q12UB-Ppl5tpfC:bvwjhn0) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L35)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> why is "Error" in different case from the rest? </blockquote></details> I see reference to this in other test cases, so presumably there's some version of Adb/JUnit that outputs this in some scenario. --- *[mobly/base_instrumentation_test.py, line 41 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhegkTcVBPhP8ZorWc:-KwiBoG8riW83mTU2WP4:brsmk2i) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L41)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> Would using `enum` module be helpful for any of these enum classes? </blockquote></details> I considered that, but these strings actually correspond to raw output, so it would actually make the code clunkier to A.A.value instead of just A.A --- *[mobly/controllers/android_device.py, line 411 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhddgAHjpryixc2pK2:-KwiBgVUB4NXGWhlWt9k:b-n5akmg) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/controllers/android_device.py#L411)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> not used? </blockquote></details> This is actually used when no runner is given? --- *[mobly/controllers/android_device.py, line 539 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhdF0dgBwKAud_5uyX:-KwiBLEFjlUzNu-WfFjM:b-896fix) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/controllers/android_device.py#L539)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> fix line breaks. a line should be as long as possible within 80 char before breaking to the next line. </blockquote></details> Done. --- *[mobly/controllers/android_device.py, line 1041 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhdkhXXT2dhMBs2qoF:-KwiBe725wqssaVdtSJD:b-896fix) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/controllers/android_device.py#L1041)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> should this be in `AdbProxy` class instead? </blockquote></details> Done. --- *[mobly/controllers/android_device.py, line 1044 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhdPutoMA3kMl50aPx:-KwiBMgPrVB_n1l0yIB9:b-896fix) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/controllers/android_device.py#L1044)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> seems like there is an extra indentation for all the lines in the body of this doc string. </blockquote></details> Done. --- *Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/359)* <!-- Sent from Reviewable.io --> xpconanfan: Pls rebase now that #357 is merged --- Review status: 0 of 7 files reviewed at latest revision, 10 unresolved discussions, some commit checks failed. --- *[mobly/controllers/android_device_lib/adb.py, line 100 at r2](https://reviewable.io:443/reviews/google/mobly/359#-KwnCmhITDIty3S4sa3h:-KwnCmhITDIty3S4sa3i:b-ml5w5q) ([raw file](https://github.com/google/mobly/blob/cef533fe9e9eef3a1358ea3b8da0236f76e15424/mobly/controllers/android_device_lib/adb.py#L100)):* > ```Python > """ > > DEFAULT_INSTRUMENTATION_RUNNER = 'com.android.common.support.test.runner.AndroidJUnitRunner' > ``` Should be a module-level constant? --- *Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/359#-:-KwnCtPI1i08_kpFBZS1:bdb65o5)* <!-- Sent from Reviewable.io --> winterfroststrom: I somewhat messed up rebasing, but it's not too terrible. --- Review status: 0 of 6 files reviewed at latest revision, 11 unresolved discussions. --- *[mobly/base_instrumentation_test.py, line 25 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwheO6oVK8o23Q3RxyM:-KwnqaHohvKTPcBn3Ep9:b-k92d97) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L25)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> pls add docstrings for all classes. For this kind of things, you can provide context on what they map to in Android. It'll be helpful if there's a link to the actual Android doc where these are referred to. </blockquote></details> I have not been able to find actual documentation for this stuff. I do know where some of this is generated from the source code level though. Also, technically, if a user is adventurous enough, they could technically just override large parts of the instrumentation runner output, which I have intention of handling. --- *[mobly/base_instrumentation_test.py, line 86 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhetR3v-bREHQ6nurU:-Kwnsk1I_T9G9E-bNqCF:b-896fix) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L86)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> need proper docstring for classes. E.g. you may want to mention what this maps to in the instrumentation output. </blockquote></details> Done. --- *[mobly/base_instrumentation_test.py, line 112 at r1](https://reviewable.io:443/reviews/google/mobly/359#-Kwhf6ZXAbAw6b7fN8Bm:-Kwnr37hJoBdRh67xQRD:b-896fix) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L112)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> if this is meant to be accessed via `is_empty` only, the attribute name should be `_empty`? Also should `is_empty` be a `@property`? </blockquote></details> Done. --- *[mobly/base_instrumentation_test.py, line 114 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwhfJNZn6oripWinVCV:-KwnsmsezeHIa8d2_T-k:b-896fix) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L114)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> need docstring for all methods </blockquote></details> Done. --- *[mobly/base_instrumentation_test.py, line 300 at r1](https://reviewable.io:443/reviews/google/mobly/359#-Kwhe3t-_JsjXBpoSHL-:-Kwnr7CjJ-V_TEPURxG4:b-896fix) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L300)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> use local vars to avoid repeating such long names over and over? These nested `if` statements are difficult to read and maintain. Consider replacing them with funcs like `all(cond1, cond2, cond3)` </blockquote></details> Done. --- *[mobly/controllers/android_device_lib/adb.py, line 100 at r2](https://reviewable.io:443/reviews/google/mobly/359#-KwnCmhITDIty3S4sa3h:-Kwnr9A5mnVqc9cqPnM4:b34sxvf) ([raw file](https://github.com/google/mobly/blob/cef533fe9e9eef3a1358ea3b8da0236f76e15424/mobly/controllers/android_device_lib/adb.py#L100)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> Should be a module-level constant? </blockquote></details> I kinda want to leave it here because a user could technically specify a default per device runner this way. But I'm thinking of this as a kind of hacky semi-unsupported feature, so I think I could go either way on this. --- *Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/359#-:-KwnsoUuJ1mVxSXA3Q_t:bscr1ph)* <!-- Sent from Reviewable.io --> xpconanfan: Review status: 0 of 6 files reviewed at latest revision, 10 unresolved discussions. --- *[mobly/base_instrumentation_test.py, line 25 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwheO6oVK8o23Q3RxyM:-KwtVqR6MwNhovPWUrJo:bck3w2a) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L25)):* <details><summary><i>Previously, winterfroststrom wrote…</i></summary><blockquote> I have not been able to find actual documentation for this stuff. I do know where some of this is generated from the source code level though. Also, technically, if a user is adventurous enough, they could technically just override large parts of the instrumentation runner output, which I have intention of handling. </blockquote></details> Sad... can we contact the team that owns this for help? maybe they know where it is. As a last resort we can always directly point to Android source code. This has to be coded somewhere... --- *[mobly/base_instrumentation_test.py, line 228 at r3](https://reviewable.io:443/reviews/google/mobly/359#-KwtWU8f27R0-YjqdrIG:-KwtWU8f27R0-YjqdrIH:buavbmd) ([raw file](https://github.com/google/mobly/blob/8e81d458fe8269e236e04af79294e0ac8e44f1a4/mobly/base_instrumentation_test.py#L228)):* > ```Python > This class is also used for storing result blocks although very little > needs to be done for those. > """ > ``` class docstrings should document public attributes in an "Attributes" section. --- *[mobly/base_instrumentation_test.py, line 273 at r3](https://reviewable.io:443/reviews/google/mobly/359#-KwtWd5A7s2MTv1xMXua:-KwtWd5A7s2MTv1xMXub:b-wawasb) ([raw file](https://github.com/google/mobly/blob/8e81d458fe8269e236e04af79294e0ac8e44f1a4/mobly/base_instrumentation_test.py#L273)):* > ```Python > > Args: > error_meessage: A string to add to the TestResultRecord to explain > ``` Misspelled "message". For args, let's use the convention: ``` name: type, description. ``` So this would be: ``` error_message: string, the message to add to the TestResultRecord... ``` Change all places. --- *[mobly/base_instrumentation_test.py, line 397 at r3](https://reviewable.io:443/reviews/google/mobly/359#-KwtX4-ID-BaFwE181-1:-KwtX4-ID-BaFwE181-2:b-9vivo4) ([raw file](https://github.com/google/mobly/blob/8e81d458fe8269e236e04af79294e0ac8e44f1a4/mobly/base_instrumentation_test.py#L397)):* > ```Python > instrumentation_block.unknown_keys[key]) > > def _add_part(self, parts, part): > ``` does this really need to be a function? seems like we can simply append all and not join the None ones --- *[mobly/controllers/android_device_lib/adb.py, line 100 at r2](https://reviewable.io:443/reviews/google/mobly/359#-KwnCmhITDIty3S4sa3h:-KwtXafdp9UbP0fiTE9D:b-3r2b6g) ([raw file](https://github.com/google/mobly/blob/cef533fe9e9eef3a1358ea3b8da0236f76e15424/mobly/controllers/android_device_lib/adb.py#L100)):* <details><summary><i>Previously, winterfroststrom wrote…</i></summary><blockquote> I kinda want to leave it here because a user could technically specify a default per device runner this way. But I'm thinking of this as a kind of hacky semi-unsupported feature, so I think I could go either way on this. </blockquote></details> I think module level is better. One less level of reference when using it. --- *Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/359)* <!-- Sent from Reviewable.io --> winterfroststrom: Review status: 0 of 6 files reviewed at latest revision, 8 unresolved discussions. --- *[mobly/base_instrumentation_test.py, line 25 at r1](https://reviewable.io:443/reviews/google/mobly/359#-KwheO6oVK8o23Q3RxyM:-Kwv7JxE7KaDmLQmxoYj:bge6sqp) ([raw file](https://github.com/google/mobly/blob/4d8d062614c1553a6bf2be8b94fb458cfc3a0689/mobly/base_instrumentation_test.py#L25)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> Sad... can we contact the team that owns this for help? maybe they know where it is. As a last resort we can always directly point to Android source code. This has to be coded somewhere... </blockquote></details> The prefixes are mostly in Android source code such as: https://android.googlesource.com/platform/frameworks/base/+/742a67127366c376fdf188ff99ba30b27d3bf90c/cmds/am/src/com/android/commands/am/Am.java The key-value stuff should be in the JUnit runner source code. So, the team that owns this would be JUnit/Android? --- *[mobly/base_instrumentation_test.py, line 397 at r3](https://reviewable.io:443/reviews/google/mobly/359#-KwtX4-ID-BaFwE181-1:-Kwv5lmByZkcM_CtuqbV:bn6uvk7) ([raw file](https://github.com/google/mobly/blob/8e81d458fe8269e236e04af79294e0ac8e44f1a4/mobly/base_instrumentation_test.py#L397)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> does this really need to be a function? seems like we can simply append all and not join the None ones </blockquote></details> So, it's a difference between this? a = [] a.append(x) a.append(y) a.append(z) '.'.join(filter(lambda x: x, a)) / self._filtered_join('.', a) And this? a = [] self._add_part(a, x) self._add_part(a, y) self._add_part(a, z) '.'.join(a) Is there much a difference? --- *[mobly/controllers/android_device_lib/adb.py, line 100 at r2](https://reviewable.io:443/reviews/google/mobly/359#-KwnCmhITDIty3S4sa3h:-Kwv74dy2uC7g3ceXR1-:b-896fix) ([raw file](https://github.com/google/mobly/blob/cef533fe9e9eef3a1358ea3b8da0236f76e15424/mobly/controllers/android_device_lib/adb.py#L100)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> I think module level is better. One less level of reference when using it. </blockquote></details> Done. --- *Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/359)* <!-- Sent from Reviewable.io --> winterfroststrom: Review status: 0 of 6 files reviewed at latest revision, 8 unresolved discussions. --- *[mobly/base_instrumentation_test.py, line 228 at r3](https://reviewable.io:443/reviews/google/mobly/359#-KwtWU8f27R0-YjqdrIG:-KwvSlkw7hoLoUTTZQH0:b-896fix) ([raw file](https://github.com/google/mobly/blob/8e81d458fe8269e236e04af79294e0ac8e44f1a4/mobly/base_instrumentation_test.py#L228)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> class docstrings should document public attributes in an "Attributes" section. </blockquote></details> Done. --- *[mobly/base_instrumentation_test.py, line 273 at r3](https://reviewable.io:443/reviews/google/mobly/359#-KwtWd5A7s2MTv1xMXua:-KwvSnpKGmiDs_8cMgSP:b-896fix) ([raw file](https://github.com/google/mobly/blob/8e81d458fe8269e236e04af79294e0ac8e44f1a4/mobly/base_instrumentation_test.py#L273)):* <details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote> Misspelled "message". For args, let's use the convention: ``` name: type, description. ``` So this would be: ``` error_message: string, the message to add to the TestResultRecord... ``` Change all places. </blockquote></details> Done. --- *Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/359)* <!-- Sent from Reviewable.io -->
diff --git a/README.md b/README.md index e914afe..7d22e5f 100644 --- a/README.md +++ b/README.md @@ -58,6 +58,9 @@ restrictions. ## Tutorials To get started with some simple tests, see the [Mobly tutorial](docs/tutorial.md). +To get started running single-device Android instrumentation tests with Mobly, +see the [instrumentation runner tutorial](docs/instrumentation_tutorial.md). + ## Mobly Snippet The Mobly Snippet projects let users better control Android devices. diff --git a/docs/conf.py b/docs/conf.py index 2e12371..044c857 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -79,7 +79,11 @@ language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'tutorial.md'] +exclude_patterns = ['_build', + 'Thumbs.db', + '.DS_Store', + 'tutorial.md', + 'instrumentation_tutorial.md'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' diff --git a/docs/instrumentation_tutorial.md b/docs/instrumentation_tutorial.md new file mode 100644 index 0000000..ff3d258 --- /dev/null +++ b/docs/instrumentation_tutorial.md @@ -0,0 +1,190 @@ +# Running Android instrumentation tests with Mobly + +This tutorial shows how to write and execute Mobly tests for running Android +instrumentation tests. For more details about instrumentation tests, please refer to +https://developer.android.com/studio/test/index.html. + +## Setup Requirements + +* A computer with at least 1 USB ports. +* Mobly package and its system dependencies installed on the computer. +* One Android device that is compatible with your instrumentatation and + application apks. +* Your instrumentation and applications apks for installing. +* A working adb setup. To check, connect one Android device to the computer + and make sure it has "USB debugging" enabled. Make sure the device shows up + in the list printed by `adb devices`. + +## Example Name Substitutions + +Here are the names that we use in this tutorial, substitute these names with +your actual apk package and file names when using your real files: + +* The application apk : `application.apk` +* The instrumentation apk : `instrumentation_test.apk` +* The instrumentation test package : `com.example.package.test` + +## Example 1: Running Instrumentation Tests + +Assuming your apks are already installed on devices. You can just subclass the +instrumentation test class and run against your package. + +You will need a configuration file for Mobly to find your devices. + +***sample_config.yml*** + +```yaml +TestBeds: + - Name: BasicTestBed + Controllers: + AndroidDevice: '*' +``` + +***instrumentation_test.py*** + +```python +from mobly import base_instrumentation_test +from mobly import test_runner +from mobly.controllers import android_device + +class InstrumentationTest(base_instrumentation_test.BaseInstrumentationTestClass): + def setup_class(self): + self.dut = self.register_controller(android_device)[0] + + def test_instrumentation(self): + self.run_instrumentation_test(self.dut, 'com.example.package.test') + + +if __name__ == '__main__': + test_runner.main() +``` + +*To execute:* + +``` +$ python instrumentation_test.py -c sample_config.yml +``` + +*Expect*: + +The output from normally running your instrumentation tests along with a summary +of the test results. + +## Example 2: Specifying Instrumentation Options + +If your instrumentation tests use instrumentation options for controlling +behaviour, then you can put these options into your configuration file and then +fetch them when you run your instrumentatation tests. + +***sample_config.yml*** + +```yaml +TestBeds: + - Name: BasicTestBed + Controllers: + AndroidDevice: '*' + TestParams: + instrumentation_option_annotation: android.support.test.filters.LargeTest + instrumentation_option_nonAnnotation: android.support.test.filters.SmallTest +``` + +***instrumentation_test.py*** + +```python +from mobly import base_instrumentation_test +from mobly import test_runner +from mobly.controllers import android_device + +class InstrumentationTest(base_instrumentation_test.BaseInstrumentationTestClass): + def setup_class(self): + self.dut = self.register_controller(android_device)[0] + self.options = self.parse_instrumentation_options(self.user_params) + + def test_instrumentation(self): + self.run_instrumentation_test(self.dut, 'com.example.package.test', + options=self.options) + + +if __name__ == '__main__': + test_runner.main() +``` + +*To execute:* + +``` +$ python instrumentation_test.py -c sample_config.yml +``` + +*Expect*: + +The output of your *LargeTest* instrumentation tests with no *SmallTest* +instrumentation test being run. + +## Example 3 Using a Custom Runner + +If you have a custom runner that you use for instrumentation tests, then you can +specify it in the *run_instrumentation_test* method call. Replace +`com.example.package.test.CustomRunner` with the fully quailied package name of +your real instrumentation runner. + +```python +def test_instrumentation(self): + self.run_instrumentation_test(self.dut, 'com.example.package.test', + runner='com.example.package.test.CustomRunner') +``` + +## Example 4: Multiple Instrumentation Runs + +If you have multiple devices that you want to run instrumentation tests +against, then you can simply call the *run_instrumentation_test* method +multiple times. If you need to distinguish between runs, then you can specify +a prefix. + +***sample_config.yml*** + +```yaml +TestBeds: + - Name: TwoDeviceTestBed + Controllers: + AndroidDevice: + - serial: xyz + label: dut + - serial: abc + label: dut +``` + +***instrumentation_test.py*** + +```python +from mobly import base_instrumentation_test +from mobly import test_runner +from mobly.controllers import android_device + +class InstrumentationTest(base_instrumentation_test.BaseInstrumentationTestClass): + def setup_class(self): + self.ads = self.register_controller(android_device) + # Get all of the dut devices to run instrumentation tests against. + self.duts = android_device.get_devices(self.ads, label='dut') + + def test_instrumentation(self): + # Iterate over the dut devices with a corresponding index. + for index, dut in zip(range(len(self.duts)), self.duts): + # Specify a prefix to help disambiguate the runs. + self.run_instrumentation_test(dut, 'com.example.package.tests', + prefix='test_run_%s' % index) + + +if __name__ == '__main__': + test_runner.main() +``` + +*To execute:* + +``` +$ python instrumentation_test.py -c sample_config.yml +``` + +*Expect*: + +The output from both instrumentation runs along with an aggregated summary of +the results from both runs. diff --git a/mobly/asserts.py b/mobly/asserts.py index 9d1a6f2..ae119e5 100644 --- a/mobly/asserts.py +++ b/mobly/asserts.py @@ -31,13 +31,14 @@ _pyunit_proxy = _ProxyTest() def assert_equal(first, second, msg=None, extras=None): - """Assert an expression evaluates to true, otherwise fail the test. + """Assert the equality of objects, otherwise fail the test. Error message is "first != second" by default. Additional explanation can be supplied in the message. Args: - expr: The expression that is evaluated. + first: The first object to compare. + second: The second object to compare. msg: A string that adds additional info about the failure. extras: An optional field for extra information to be included in test result. @@ -98,9 +99,8 @@ def assert_raises_regex(expected_exception, extras: An optional field for extra information to be included in test result. """ - context = _AssertRaisesContext(expected_exception, - expected_regex, - extras=extras) + context = _AssertRaisesContext( + expected_exception, expected_regex, extras=extras) return context @@ -242,9 +242,12 @@ def fail(msg, extras=None): def explicit_pass(msg, extras=None): """Explicitly pass a test. - A test with not uncaught exception will pass implicitly so the usage of - this is optional. It is intended for reporting extra information when a - test passes. + This will pass the test explicitly regardless of any other error happened + in the test body. E.g. even if errors have been recorded with `expects`, + the test will still be marked pass if this is called. + + A test without uncaught exception will pass implicitly so this should be + used scarcely. Args: msg: A string explaining the details of the passed test. @@ -275,8 +278,8 @@ class _AssertRaisesContext(object): exc_name = self.expected.__name__ except AttributeError: exc_name = str(self.expected) - raise signals.TestFailure('%s not raised' % exc_name, - extras=self.extras) + raise signals.TestFailure( + '%s not raised' % exc_name, extras=self.extras) if not issubclass(exc_type, self.expected): # let unexpected exceptions pass through return False @@ -289,7 +292,7 @@ class _AssertRaisesContext(object): expected_regexp = re.compile(expected_regexp) if not expected_regexp.search(str(exc_value)): raise signals.TestFailure( - '"%s" does not match "%s"' % - (expected_regexp.pattern, str(exc_value)), + '"%s" does not match "%s"' % (expected_regexp.pattern, + str(exc_value)), extras=self.extras) return True diff --git a/mobly/controllers/android_device.py b/mobly/controllers/android_device.py index 3a76426..5a4066f 100644 --- a/mobly/controllers/android_device.py +++ b/mobly/controllers/android_device.py @@ -366,7 +366,7 @@ def get_device(ads, **kwargs): raise Error('More than one device matched: %s' % serials) -def take_bug_reports(ads, test_name, begin_time): +def take_bug_reports(ads, test_name, begin_time, destination=None): """Takes bug reports on a list of android devices. If you want to take a bug report, call this function with a list of @@ -379,13 +379,15 @@ def take_bug_reports(ads, test_name, begin_time): test_name: Name of the test method that triggered this bug report. begin_time: timestamp taken when the test started, can be either string or int. + destination: string, path to the directory where the bugreport + should be saved. """ begin_time = mobly_logger.normalize_log_line_timestamp(str(begin_time)) - def take_br(test_name, begin_time, ad): - ad.take_bug_report(test_name, begin_time) + def take_br(test_name, begin_time, ad, destination): + ad.take_bug_report(test_name, begin_time, destination=destination) - args = [(test_name, begin_time, ad) for ad in ads] + args = [(test_name, begin_time, ad, destination) for ad in ads] utils.concurrent_exec(take_br, args) @@ -417,9 +419,8 @@ class AndroidDevice(object): log_path_base = getattr(logging, 'log_path', '/tmp/logs') self.log_path = os.path.join(log_path_base, 'AndroidDevice%s' % serial) self._debug_tag = self.serial - self.log = AndroidDeviceLoggerAdapter(logging.getLogger(), { - 'tag': self.debug_tag - }) + self.log = AndroidDeviceLoggerAdapter(logging.getLogger(), + {'tag': self.debug_tag}) self.sl4a = None self.ed = None self._adb_logcat_process = None @@ -884,7 +885,11 @@ class AndroidDevice(object): utils.stop_standing_subprocess(self._adb_logcat_process) self._adb_logcat_process = None - def take_bug_report(self, test_name, begin_time, timetout=300): + def take_bug_report(self, + test_name, + begin_time, + timeout=300, + destination=None): """Takes a bug report on the device and stores it in a file. Args: @@ -892,6 +897,8 @@ class AndroidDevice(object): begin_time: Timestamp of when the test started. timeout: float, the number of seconds to wait for bugreport to complete, default is 5min. + destination: string, path to the directory where the bugreport + should be saved. """ new_br = True try: @@ -902,7 +909,10 @@ class AndroidDevice(object): new_br = False except adb.AdbError: new_br = False - br_path = os.path.join(self.log_path, 'BugReports') + if destination: + br_path = utils.abs_path(destination) + else: + br_path = os.path.join(self.log_path, 'BugReports') utils.create_dir(br_path) base_name = ',%s,%s.txt' % (begin_time, self.serial) if new_br: @@ -914,8 +924,7 @@ class AndroidDevice(object): self.wait_for_boot_completion() self.log.info('Taking bugreport for %s.', test_name) if new_br: - out = self.adb.shell( - 'bugreportz', timeout=timetout).decode('utf-8') + out = self.adb.shell('bugreportz', timeout=timeout).decode('utf-8') if not out.startswith('OK'): raise DeviceError(self, 'Failed to take bugreport: %s' % out) br_out_path = out.split(':')[1].strip() @@ -924,7 +933,7 @@ class AndroidDevice(object): # shell=True as this command redirects the stdout to a local file # using shell redirection. self.adb.bugreport( - ' > %s' % full_out_path, shell=True, timeout=timetout) + ' > %s' % full_out_path, shell=True, timeout=timeout) self.log.info('Bugreport for %s taken at %s.', test_name, full_out_path) diff --git a/mobly/controllers/android_device_lib/adb.py b/mobly/controllers/android_device_lib/adb.py index 045f43e..4162caa 100644 --- a/mobly/controllers/android_device_lib/adb.py +++ b/mobly/controllers/android_device_lib/adb.py @@ -27,6 +27,9 @@ ADB = 'adb' # do with port forwarding must happen under this lock. ADB_PORT_LOCK = threading.Lock() +# Qualified class name of the default instrumentation test runner. +DEFAULT_INSTRUMENTATION_RUNNER = 'com.android.common.support.test.runner.AndroidJUnitRunner' + class AdbError(Exception): """Raised when there is an error in adb operations.""" @@ -129,7 +132,7 @@ class AdbProxy(object): raise AdbTimeoutError('Timed out Adb cmd "%s". timeout: %s' % (args, timeout)) elif timeout and timeout < 0: - raise AdbTimeoutError("Timeout is a negative value: %s" % timeout) + raise AdbTimeoutError('Timeout is a negative value: %s' % timeout) (out, err) = proc.communicate() ret = proc.returncode @@ -178,6 +181,48 @@ class AdbProxy(object): with ADB_PORT_LOCK: return self._exec_adb_cmd('forward', args, shell, timeout=None) + def instrument(self, package, options=None, runner=None): + """Runs an instrumentation command on the device. + + This is a convenience wrapper to avoid parameter formatting. + + Example: + device.instrument( + 'com.my.package.test', + options = { + 'class': 'com.my.package.test.TestSuite', + }, + ) + + Args: + package: string, the package of the instrumentation tests. + options: dict, the instrumentation options including the test + class. + runner: string, the test runner name, which defaults to + DEFAULT_INSTRUMENTATION_RUNNER. + + Returns: + The output of instrumentation command. + """ + if runner is None: + runner = DEFAULT_INSTRUMENTATION_RUNNER + if options is None: + options = {} + + options_list = [] + for option_key, option_value in options.items(): + options_list.append('-e %s %s' % (option_key, option_value)) + options_string = ' '.join(options_list) + + instrumentation_command = 'am instrument -r -w %s %s/%s' % ( + options_string, + package, + runner, + ) + logging.info('AndroidDevice|%s: Executing adb shell %s', self.serial, + instrumentation_command) + return self.shell(instrumentation_command) + def __getattr__(self, name): def adb_call(args=None, shell=False, timeout=None): """Wrapper for an ADB command. diff --git a/mobly/expects.py b/mobly/expects.py new file mode 100644 index 0000000..67f173b --- /dev/null +++ b/mobly/expects.py @@ -0,0 +1,158 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import contextlib +import logging +import time + +from mobly import asserts +from mobly import records +from mobly import signals + + +class _ExpectErrorRecorder(object): + """Singleton used to store errors caught via `expect_*` functions in test. + + This class is only instantiated once as a singleton. It holds a reference + to the record object for the test currently executing. + """ + + def __init__(self): + self.reset_internal_states() + + def reset_internal_states(self, record=None): + """Resets the internal state of the recorder. + + Args: + record: records.TestResultRecord, the test record for a test. + """ + self._record = None + self._count = 0 + self._record = record + + @property + def has_error(self): + """If any error has been recorded since the last reset.""" + return self._count > 0 + + @property + def error_count(self): + """The number of errors that have been recorded since last reset.""" + return self._count + + def add_error(self, error): + """Record an error from expect APIs. + + This method generates a position stamp for the expect. The stamp is + composed of a timestamp and the number of errors recorded so far. + + Args: + error: Exception or signals.ExceptionRecord, the error to add. + """ + self._count += 1 + self._record.add_error('expect@%s+%s' % (time.time(), self._count), + error) + + +def expect_true(condition, msg, extras=None): + """Expects an expression evaluates to True. + + If the expectation is not met, the test is marked as fail after its + execution finishes. + + Args: + expr: The expression that is evaluated. + msg: A string explaining the details in case of failure. + extras: An optional field for extra information to be included in test + result. + """ + try: + asserts.assert_true(condition, msg, extras) + except signals.TestSignal as e: + logging.exception('Expected a `True` value, got `False`.') + recorder.add_error(e) + + +def expect_false(condition, msg, extras=None): + """Expects an expression evaluates to False. + + If the expectation is not met, the test is marked as fail after its + execution finishes. + + Args: + expr: The expression that is evaluated. + msg: A string explaining the details in case of failure. + extras: An optional field for extra information to be included in test + result. + """ + try: + asserts.assert_false(condition, msg, extras) + except signals.TestSignal as e: + logging.exception('Expected a `False` value, got `True`.') + recorder.add_error(e) + + +def expect_equal(first, second, msg=None, extras=None): + """Expects the equality of objects, otherwise fail the test. + + If the expectation is not met, the test is marked as fail after its + execution finishes. + + Error message is "first != second" by default. Additional explanation can + be supplied in the message. + + Args: + first: The first object to compare. + second: The second object to compare. + msg: A string that adds additional info about the failure. + extras: An optional field for extra information to be included in test + result. + """ + try: + asserts.assert_equal(first, second, msg, extras) + except signals.TestSignal as e: + logging.exception('Expected %s equals to %s, but they are not.', first, + second) + recorder.add_error(e) + + [email protected] +def expect_no_raises(message=None, extras=None): + """Expects no exception is raised in a context. + + If the expectation is not met, the test is marked as fail after its + execution finishes. + + A default message is added to the exception `details`. + + Args: + message: string, custom message to add to exception's `details`. + extras: An optional field for extra information to be included in test + result. + """ + try: + yield + except Exception as e: + e_record = records.ExceptionRecord(e) + if extras: + e_record.extras = extras + msg = message or 'Got an unexpected exception' + details = '%s: %s' % (msg, e_record.details) + logging.exception(details) + e_record.details = details + recorder.add_error(e_record) + + +recorder = _ExpectErrorRecorder() diff --git a/mobly/records.py b/mobly/records.py index e856c33..c708675 100644 --- a/mobly/records.py +++ b/mobly/records.py @@ -347,14 +347,17 @@ class TestResultRecord(object): Args: position: string, where this error occurred, e.g. 'teardown_test'. - e: An exception object. + e: An exception or a `signals.ExceptionRecord` object. """ if self.result != TestResultEnums.TEST_RESULT_FAIL: self.result = TestResultEnums.TEST_RESULT_ERROR if position in self.extra_errors: raise Error('An exception is already recorded with position "%s",' ' cannot reuse.' % position) - self.extra_errors[position] = ExceptionRecord(e, position=position) + if isinstance(e, ExceptionRecord): + self.extra_errors[position] = e + else: + self.extra_errors[position] = ExceptionRecord(e, position=position) def __str__(self): d = self.to_dict()
Create utils for executing Android Instrumentation tests There have been multiple requests to run regular AndroidInstrumentation tests with mobly. A sample use cases would be running tests on multiple devices and aggregate results across them. Let's create utils to make such tasks easy.
google/mobly
diff --git a/mobly/base_instrumentation_test.py b/mobly/base_instrumentation_test.py new file mode 100644 index 0000000..902fa68 --- /dev/null +++ b/mobly/base_instrumentation_test.py @@ -0,0 +1,936 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from collections import defaultdict +from enum import Enum +from mobly import base_test +from mobly import records +from mobly import signals + + +class _InstrumentationStructurePrefixes(object): + """Class containing prefixes that structure insturmentation output. + + Android instrumentation generally follows the following format: + + .. code-block:: none + + INSTRUMENTATION_STATUS: ... + ... + INSTRUMENTATION_STATUS: ... + INSTRUMENTATION_STATUS_CODE: ... + INSTRUMENTATION_STATUS: ... + ... + INSTRUMENTATION_STATUS: ... + INSTRUMENTATION_STATUS_CODE: ... + ... + INSTRUMENTATION_RESULT: ... + ... + INSTRUMENTATION_RESULT: ... + ... + INSTRUMENTATION_CODE: ... + + This means that these prefixes can be used to guide parsing + the output of the instrumentation command into the different + instrumetnation test methods. + + Refer to the following Android Framework package for more details: + + .. code-block:: none + + com.android.commands.am.AM + + """ + + STATUS = 'INSTRUMENTATION_STATUS:' + STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE:' + RESULT = 'INSTRUMENTATION_RESULT:' + CODE = 'INSTRUMENTATION_CODE:' + FAILED = 'INSTRUMENTATION_FAILED:' + + +class _InstrumentationKnownStatusKeys(object): + """Commonly used keys used in instrumentation output for listing + instrumentation test method result properties. + + An instrumenation status line usually contains a key-value pair such as + the following: + + .. code-block:: none + + INSTRUMENTATION_STATUS: <key>=<value> + + Some of these key-value pairs are very common and represent test case + properties. This mapping is used to handle each of the corresponding + key-value pairs different than less important key-value pairs. + + Refer to the following Android Framework packages for more details: + + .. code-block:: none + + android.app.Instrumentation + android.support.test.internal.runner.listener.InstrumentationResultPrinter + + """ + + CLASS = 'class' + ERROR = 'Error' + STACK = 'stack' + TEST = 'test' + STREAM = 'stream' + + +class _InstrumentationStatusCodes(object): + """A mapping of instrumentation status codes to test method results. + + When instrumentation runs, at various points ouput is created in a serias + of blocks that terminate as follows: + + .. code-block:: none + + INSTRUMENTATION_STATUS_CODE: 1 + + These blocks typically have several status keys in them, and they indicate + the progression of a particular instrumentation test method. When the + corresponding instrumentation test method finishes, there is generally a + line which includes a status code that gives thes the test result. + + The UNKNOWN status code is not an actual status code and is only used to + represent that a status code has not yet been read for an instrumentation + block. + + Refer to the following Android Framework package for more details: + + .. code-block:: none + + android.support.test.internal.runner.listener.InstrumentationResultPrinter + + """ + + UNKNOWN = None + OK = '0' + START = '1' + IN_PROGRESS = '2' + ERROR = '-1' + FAILURE = '-2' + IGNORED = '-3' + ASSUMPTION_FAILURE = '-4' + + +class _InstrumentationStatusCodeCategories(object): + """A mapping of instrumentation test method results to categories. + + Aside from the TIMING category, these categories roughly map to Mobly + signals and are used for determining how a particular instrumentation test + method gets recorded. + """ + + TIMING = [ + _InstrumentationStatusCodes.START, + _InstrumentationStatusCodes.IN_PROGRESS, + ] + PASS = [ + _InstrumentationStatusCodes.OK, + ] + FAIL = [ + _InstrumentationStatusCodes.ERROR, + _InstrumentationStatusCodes.FAILURE, + ] + SKIPPED = [ + _InstrumentationStatusCodes.IGNORED, + _InstrumentationStatusCodes.ASSUMPTION_FAILURE, + ] + + +class _InstrumentationKnownResultKeys(object): + """Commonly used keys for outputting instrumentation errors. + + When instrumentation finishes running all of the instrumentation test + methods, a result line will appear as follows: + + .. code-block:: none + + INSTRUMENTATION_RESULT: + + If something wrong happened during the instrumentation run such as an + application under test crash, the the line will appear similarly as thus: + + .. code-block:: none + + INSTRUMENTATION_RESULT: shortMsg=Process crashed. + + Since these keys indicate that something wrong has happened to the + instrumentation run, they should be checked for explicitly. + + Refer to the following documentation page for more information: + + .. code-block:: none + + https://developer.android.com/reference/android/app/ActivityManager.ProcessErrorStateInfo.html + + """ + + LONGMSG = 'longMsg' + SHORTMSG = 'shortMsg' + + +class _InstrumentationResultSignals(object): + """Instrumenttion result block strings for signalling run completion. + + The final section of the instrumentation output generally follows this + format: + + .. code-block:: none + + INSTRUMENTATION_RESULT: stream= + ... + INSTRUMENTATION_CODE -1 + + Inside of the ellipsed section, one of these signaling strings should be + present. If they are not present, this usually means that the + instrumentation run has failed in someway such as a crash. Because the + final instrumentation block simply summarizes information, simply roughly + checking for a particilar string should be sufficient to check to a proper + run completion as the contents of the instrumentation result block don't + really matter. + + Refer to the following JUnit package for more details: + + .. code-block:: none + + junit.textui.ResultPrinter + + """ + + FAIL = 'FAILURES!!!' + PASS = 'OK (' + + +class _InstrumentationBlockStates(Enum): + """States used for determing what the parser is currently parsing. + + The parse always starts and ends a block in the UKNOWN state, which is + used to indicate that either a method or a result block (matching the + METHOD and RESULT states respectively) are valid follow ups, which means + that parser should be checking for a structure prefix that indicates which + of those two states it should transition to. If the parser is in the + METHOD state, then the parser will be parsing input into test methods. + Otherwise, the parse can simply concatenate all the input to check for + some final run completion signals. + """ + + UNKNOWN = 0 + METHOD = 1 + RESULT = 2 + + +class _InstrumentationBlock(object): + """Container class for parsed instrumentation output for instrumentation + test methods. + + Instrumentation test methods typically follow the follwoing format: + + .. code-block:: none + + INSTRUMENTATION_STATUS: <key>=<value> + ... + INSTRUMENTATION_STATUS: <key>=<value> + INSTRUMENTATION_STATUS_CODE: <status code #> + + The main issue with parsing this however is that the key-value pairs can + span multiple lines such as this: + + .. code-block:: none + + INSTRUMENTATION_STATUS: stream= + Error in ... + ... + + Or, such as this: + + .. code-block:: none + + INSTRUMENTATION_STATUS: stack=... + ... + + Because these keys are poentially very long, constant string contatention + is potentially inefficent. Instead, this class builds up a buffer to store + the raw ouput until it is processed into an actual test result by the + _InstrumentationBlockFormatter class. + + Additionally, this class also serves to store the parser state, which + means that the BaseInstrumentationTestClass does not need to keep any + potentially volatile instrumentation related state, so multiple + instrumentation runs should have completely separate parsing states. + + This class is also used for storing result blocks although very little + needs to be done for those. + + Attributes: + current_key: string, the current key that is being parsed, default to + _InstrumentationKnownStatusKeys.STREAM. + error_message: string, an error message indicating that something + unexpected happened during a instrumentatoin test method. + known_keys: dict, well known keys that are handled uniquely. + prefix: string, a prefix to add to the class name of the + instrumentation test methods. + previous_instrumentation_block: _InstrumentationBlock, the last parsed + instrumentation block. + state: _InstrumentationBlockStates, the current state of the parser. + status_code: string, the state code for an instrumentation method + block. + unknown_keys: dict, arbitrary keys that are handled generically. + """ + + def __init__(self, + state=_InstrumentationBlockStates.UNKNOWN, + prefix=None, + previous_instrumentation_block=None): + self.state = state + self.prefix = prefix + self.previous_instrumentation_block = previous_instrumentation_block + + self._empty = True + self.error_message = '' + self.status_code = _InstrumentationStatusCodes.UNKNOWN + + self.current_key = _InstrumentationKnownStatusKeys.STREAM + self.known_keys = { + _InstrumentationKnownStatusKeys.STREAM: [], + _InstrumentationKnownStatusKeys.CLASS: [], + _InstrumentationKnownStatusKeys.ERROR: [], + _InstrumentationKnownStatusKeys.STACK: [], + _InstrumentationKnownStatusKeys.TEST: [], + _InstrumentationKnownResultKeys.LONGMSG: [], + _InstrumentationKnownResultKeys.SHORTMSG: [], + } + self.unknown_keys = defaultdict(list) + + @property + def is_empty(self): + """Deteremines whether or not anything has been parsed with this + instrumentation block. + + Returns: + A boolean indicating whether or not the this instrumentation block + has parsed and contains any output. + """ + return self._empty + + def set_error_message(self, error_message): + """Sets an error message on an instrumentation block. + + This method is used exclusively to indicate that a test method failed + to complete, which is usually cause by a crash of some sort such that + the test method is marked as error instead of ignored. + + Args: + error_message: string, an error message to be added to the + TestResultRecord to explain that something wrong happened. + """ + self._empty = False + self.error_message = error_message + + def _remove_structure_prefix(self, prefix, line): + """Helper function for removing the structure prefix for parsing. + + Args: + prefix: string, a _InstrumentationStructurePrefixes to remove from + the raw output. + line: string, the raw line from the instrumentation output. + + Returns: + A string containing a key value pair descripting some property + of the current instrumentation test method. + """ + return line[len(prefix):].strip() + + def set_status_code(self, status_code_line): + """Sets the status code for the instrumentation test method, used in + determining the test result. + + Args: + status_code_line: string, the raw instrumentation output line that + contains the status code of the instrumentation block. + """ + self._empty = False + self.status_code = self._remove_structure_prefix( + _InstrumentationStructurePrefixes.STATUS_CODE, + status_code_line, + ) + + def set_key(self, structure_prefix, key_line): + """Sets the current key for the instrumentation block. + + For unknown keys, the key is added to the value list in order to + better contextualize the value in the output. + + Args: + structure_prefix: string, the structure prefix that was matched + and that needs to be removed. + key_line: string, the raw instrumentation ouput line that contains + the key-value pair. + """ + self._empty = False + key_value = self._remove_structure_prefix( + structure_prefix, + key_line, + ) + if '=' in key_value: + (key, value) = key_value.split('=') + self.current_key = key + if key in self.known_keys: + self.known_keys[key].append(value) + else: + self.unknown_keys[key].append(key_value) + + def add_value(self, line): + """Adds unstructured or multi-line value output to the current parsed + instrumentation block for outputting later. + + Usually, this will add extra lines to the value list for the current + key-value pair. However, sometimes, such as when instrumentation + failed to start, output does not follow the structured prefix format. + In this case, adding all of the output is still useful so that a user + can debug the issue. + + Args: + line: string, the raw instrumentation line to append to the value + list. + """ + self._empty = False + if self.current_key in self.known_keys: + self.known_keys[self.current_key].append(line) + else: + self.unknown_keys[self.current_key].append(line) + + def transition_state(self, new_state): + """Transitions or sets the current instrumentation block to the new + parser state. + + Args: + new_state: _InstrumentationBlockStates, the state that the parser + should transition to. + + Returns: + A new instrumentation block set to the new state, representing + the start of parsing a new instrumentation test method. + Alternatively, if the current instrumentation block represents the + start of parsing a new instrumentation block (state UKNOWN), then + this returns the current instrumentation block set to the now + known parsing state. + """ + if self.state == _InstrumentationBlockStates.UNKNOWN: + self.state = new_state + return self + else: + return _InstrumentationBlock( + state=new_state, + prefix=self.prefix, + previous_instrumentation_block=self, + ) + + +class _InstrumentationBlockFormatter(object): + """Takes an instrumentation block and converts it into a Mobly test + result. + """ + + DEFAULT_INSTRUMENTATION_METHOD_NAME = 'instrumentation_method' + + def __init__(self, instrumentation_block): + self._prefix = instrumentation_block.prefix + self._status_code = instrumentation_block.status_code + self._error_message = instrumentation_block.error_message + self._known_keys = {} + self._unknown_keys = {} + for key, value in instrumentation_block.known_keys.items(): + self._known_keys[key] = '\n'.join( + instrumentation_block.known_keys[key]) + for key, value in instrumentation_block.unknown_keys.items(): + self._unknown_keys[key] = '\n'.join( + instrumentation_block.unknown_keys[key]) + + def _get_name(self): + """Gets the method name of the test method for the instrumentation + method block. + + Returns: + A string containing the name of the instrumentation test method's + test or a default name if no name was parsed. + """ + if self._known_keys[_InstrumentationKnownStatusKeys.TEST]: + return self._known_keys[_InstrumentationKnownStatusKeys.TEST] + else: + return self.DEFAULT_INSTRUMENTATION_METHOD_NAME + + def _get_class(self): + """Gets the class name of the test method for the instrumentation + method block. + + Returns: + A string containing the class name of the instrumentation test + method's test or empty string if no name was parsed. If a prefix + was specified, then the prefix will be prepended to the class + name. + """ + class_parts = [ + self._prefix, + self._known_keys[_InstrumentationKnownStatusKeys.CLASS] + ] + return '.'.join(filter(None, class_parts)) + + def _get_full_name(self): + """Gets the qualified name of the test method corresponding to the + instrumentation block. + + Returns: + A string containing the fully qualified name of the + instrumentation test method. If parts are missing, then degrades + steadily. + """ + full_name_parts = [self._get_class(), self._get_name()] + return '#'.join(filter(None, full_name_parts)) + + def _get_details(self): + """Gets the ouput for the detail section of the TestResultRecord. + + Returns: + A string to set for a TestResultRecord's details. + """ + detail_parts = [self._get_full_name(), self._error_message] + return '\n'.join(filter(None, detail_parts)) + + def _get_extras(self): + """Gets the output for the extras section of the TestResultRecord. + + Returns: + A string to set for a TestResultRecord's extras. + """ + # Add empty line to start key-value pairs on a new line. + extra_parts = [''] + + for value in self._unknown_keys.values(): + extra_parts.append(value) + + extra_parts.append( + self._known_keys[_InstrumentationKnownStatusKeys.STREAM]) + extra_parts.append( + self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG]) + extra_parts.append( + self._known_keys[_InstrumentationKnownResultKeys.LONGMSG]) + extra_parts.append( + self._known_keys[_InstrumentationKnownStatusKeys.ERROR]) + + if self._known_keys[ + _InstrumentationKnownStatusKeys.STACK] not in self._known_keys[ + _InstrumentationKnownStatusKeys.STREAM]: + extra_parts.append( + self._known_keys[_InstrumentationKnownStatusKeys.STACK]) + + return '\n'.join(filter(None, extra_parts)) + + def _is_failed(self): + """Determines if the test corresponding to the instrumentation block + failed. + + This method can not be used to tell if a test method passed and + should not be used for such a purpose. + + Returns: + A boolean indicating if the test method failed. + """ + if self._status_code in _InstrumentationStatusCodeCategories.FAIL: + return True + elif (self._known_keys[_InstrumentationKnownStatusKeys.STACK] + and self._status_code != + _InstrumentationStatusCodes.ASSUMPTION_FAILURE): + return True + elif self._known_keys[_InstrumentationKnownStatusKeys.ERROR]: + return True + elif self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG]: + return True + elif self._known_keys[_InstrumentationKnownResultKeys.LONGMSG]: + return True + else: + return False + + def create_test_record(self, mobly_test_class): + """Creates a TestResultRecord for the instrumentation block. + + Args: + mobly_test_class: string, the name of the Mobly test case + executing the instrumentation run. + + Returns: + A TestResultRecord with an appropriate signals exception + representing the instrumentation test method's result status. + """ + details = self._get_details() + extras = self._get_extras() + + tr_record = records.TestResultRecord( + t_name=self._get_full_name(), + t_class=mobly_test_class, + ) + if self._is_failed(): + tr_record.test_fail( + e=signals.TestFailure(details=details, extras=extras)) + elif self._status_code in _InstrumentationStatusCodeCategories.SKIPPED: + tr_record.test_skip( + e=signals.TestSkip(details=details, extras=extras)) + elif self._status_code in _InstrumentationStatusCodeCategories.PASS: + tr_record.test_pass( + e=signals.TestPass(details=details, extras=extras)) + elif self._status_code in _InstrumentationStatusCodeCategories.TIMING: + if self._error_message: + tr_record.test_error( + e=signals.TestError(details=details, extras=extras)) + else: + tr_record = None + else: + tr_record.test_error( + e=signals.TestError(details=details, extras=extras)) + if self._known_keys[_InstrumentationKnownStatusKeys.STACK]: + tr_record.termination_signal.stacktrace = self._known_keys[ + _InstrumentationKnownStatusKeys.STACK] + return tr_record + + def has_completed_result_block_format(self, error_message): + """Checks the instrumentation result block for a signal indicating + normal completion. + + Args: + error_message: string, the error message to give if the + instrumentation run did not complete successfully.- + + Returns: + A boolean indicating whether or not the instrumentation run passed + or failed overall. + + Raises: + signals.TestError: Error raised if the instrumentation run did not + complete because of a crash or some other issue. + """ + extras = self._get_extras() + if _InstrumentationResultSignals.PASS in extras: + return True + elif _InstrumentationResultSignals.FAIL in extras: + return False + else: + raise signals.TestError(details=error_message, extras=extras) + + +class BaseInstrumentationTestClass(base_test.BaseTestClass): + """Base class for all instrumentation test claseses to inherit from. + + This class extends the BaseTestClass to add functionality to run and parse + the output of instrumentation runs. + + Attributes: + DEFAULT_INSTRUMENTATION_OPTION_PREFIX: string, the default prefix for + instrumentation params contained within user params. + DEFAULT_INSTRUMENTATION_ERROR_MESSAGE: string, the default error + message to set if something has prevented something in the + instrumentation test run from completing properly. + """ + + DEFAULT_INSTRUMENTATION_OPTION_PREFIX = 'instrumentation_option_' + DEFAULT_INSTRUMENTATION_ERROR_MESSAGE = ('instrumentation run exited ' + 'unexpectedly') + + def _previous_block_never_completed(self, current_block, previous_block, + new_state): + """Checks if the previous instrumentation method block completed. + + Args: + current_block: _InstrumentationBlock, the current instrumentation + block to check for being a different instrumentation test + method. + previous_block: _InstrumentationBlock, rhe previous + instrumentation block to check for an incomplete status. + new_state: _InstrumentationBlockStates, the next state for the + parser, used to check for the instrumentation run ending + with an incomplete test. + + Returns: + A boolean indicating whether the previous instrumentation block + completed executing. + """ + if previous_block: + previously_timing_block = ( + previous_block.status_code in + _InstrumentationStatusCodeCategories.TIMING) + currently_new_block = ( + current_block.status_code == _InstrumentationStatusCodes.START + or new_state == _InstrumentationBlockStates.RESULT) + return all([previously_timing_block, currently_new_block]) + else: + return False + + def _create_formatters(self, instrumentation_block, new_state): + """Creates the _InstrumentationBlockFormatters for outputting the + instrumentation method block that have finished parsing. + + Args: + instrumentation_block: _InstrumentationBlock, the current + instrumentation method block to create formatters based upon. + new_state: _InstrumentationBlockState, the next state that the + parser will transition to. + + Returns: + A list of the formatters tha need to create and add + TestResultRecords to the test results. + """ + formatters = [] + if self._previous_block_never_completed( + current_block=instrumentation_block, + previous_block=instrumentation_block. + previous_instrumentation_block, + new_state=new_state): + instrumentation_block.previous_instrumentation_block.set_error_message( + self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE) + formatters.append( + _InstrumentationBlockFormatter( + instrumentation_block.previous_instrumentation_block)) + + if not instrumentation_block.is_empty: + formatters.append( + _InstrumentationBlockFormatter(instrumentation_block)) + return formatters + + def _transition_instrumentation_block( + self, + instrumentation_block, + new_state=_InstrumentationBlockStates.UNKNOWN): + """Transitions and finishes the current instrumentation block. + + Args: + instrumentation_block: _InstrumentationBlock, the current + instrumentation block to finish. + new_state: _InstrumentationBlockState, the next state for the + parser to transition to. + + Returns: + The new instrumentation block to use for storing parsed + instrumentation ouput. + """ + formatters = self._create_formatters(instrumentation_block, new_state) + for formatter in formatters: + test_record = formatter.create_test_record(self.TAG) + if test_record: + self.results.add_record(test_record) + self.summary_writer.dump(test_record.to_dict(), + records.TestSummaryEntryType.RECORD) + return instrumentation_block.transition_state(new_state=new_state) + + def _parse_method_block_line(self, instrumentation_block, line): + """Parses the instrumnetation method block's line. + + Args: + instrumentation_block: _InstrumentationBlock, the current + instrumentation method block. + line: string, the raw instrumentation output line to parse. + + Returns: + The next instrumentation block, which should be used to continue + parsing instrumentation output. + """ + if line.startswith(_InstrumentationStructurePrefixes.STATUS): + instrumentation_block.set_key( + _InstrumentationStructurePrefixes.STATUS, line) + return instrumentation_block + elif line.startswith(_InstrumentationStructurePrefixes.STATUS_CODE): + instrumentation_block.set_status_code(line) + return self._transition_instrumentation_block( + instrumentation_block) + elif line.startswith(_InstrumentationStructurePrefixes.RESULT): + # Unexpected transition from method block -> result block + instrumentation_block.set_key( + _InstrumentationStructurePrefixes.RESULT, line) + return self._parse_result_line( + self._transition_instrumentation_block( + instrumentation_block, + new_state=_InstrumentationBlockStates.RESULT, + ), + line, + ) + else: + instrumentation_block.add_value(line) + return instrumentation_block + + def _parse_result_block_line(self, instrumentation_block, line): + """Parses the instrumentation result block's line. + + Args: + instrumentation_block: _InstrumentationBlock, the instrumentation + result block for the instrumentation run. + line: string, the raw instrumentation output to add to the + instrumenation result block's _InstrumentationResultBlocki + object. + + Returns: + The instrumentation result block for the instrumentation run. + """ + instrumentation_block.add_value(line) + return instrumentation_block + + def _parse_unknown_block_line(self, instrumentation_block, line): + """Parses a line from the instrumentation output from the UNKNOWN + parser state. + + Args: + instrumentation_block: _InstrumentationBlock, the current + instrumenation block, where the correct categorization it noti + yet known. + line: string, the raw instrumenation output line to be used to + deteremine the correct categorization. + + Returns: + The next instrumentation block to continue parsing with. Usually, + this is the same instrumentation block but with the state + transitioned appropriately. + """ + if line.startswith(_InstrumentationStructurePrefixes.STATUS): + return self._parse_method_block_line( + self._transition_instrumentation_block( + instrumentation_block, + new_state=_InstrumentationBlockStates.METHOD, + ), + line, + ) + elif (line.startswith(_InstrumentationStructurePrefixes.RESULT) + or _InstrumentationStructurePrefixes.FAILED in line): + return self._parse_result_block_line( + self._transition_instrumentation_block( + instrumentation_block, + new_state=_InstrumentationBlockStates.RESULT, + ), + line, + ) + else: + # This would only really execute if instrumentation failed to start. + instrumentation_block.add_value(line) + return instrumentation_block + + def _parse_line(self, instrumentation_block, line): + """Parses an arbitary line from the instrumentation output based upon + the current parser state. + + Args: + instrumentation_block: _InstrumentationBlock, an instrumentation + block with any of the possible parser states. + line: string, the raw instrumentation output line to parse + appropriately. + + Returns: + The next instrumenation block to continue parsing with. + """ + if instrumentation_block.state == _InstrumentationBlockStates.METHOD: + return self._parse_method_block_line(instrumentation_block, line) + elif instrumentation_block.state == _InstrumentationBlockStates.RESULT: + return self._parse_result_block_line(instrumentation_block, line) + else: + return self._parse_unknown_block_line(instrumentation_block, line) + + def _finish_parsing(self, instrumentation_block): + """Finishes parsing the instrumentation result block for the final + instrumentation run status. + + Args: + instrumentation_block: _InstrumentationBlock, the instrumentation + result block for the instrumenation run. Potentially, thisi + could actually be method block if the instrumentation outputi + is malformed. + + Returns: + A boolean indicating whether the instrumentation run completed + with all the tests passing. + + Raises: + signals.TestError: Error raised if the instrumentation failed to + complete with either a pass or fail status. + """ + formatter = _InstrumentationBlockFormatter(instrumentation_block) + return formatter.has_completed_result_block_format( + self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE) + + def parse_instrumentation_options(self, parameters=None): + """Returns the options for the instrumentation test from user_params. + + By default, this method assume that the correct instrumentation options + all start with DEFAULT_INSTRUMENTATION_OPTION_PREFIX. + + Args: + parameters: dict, the key value pairs representing an assortment + of parameters including instrumentation options. Usually, + this argument will be from self.user_params. + + Returns: + A dictionary of options/parameters for the instrumentation tst. + """ + if parameters is None: + return {} + + filtered_parameters = {} + for parameter_key, parameter_value in parameters.items(): + if parameter_key.startswith( + self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX): + option_key = parameter_key[len( + self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX):] + filtered_parameters[option_key] = parameter_value + return filtered_parameters + + def run_instrumentation_test(self, + device, + package, + options=None, + prefix=None, + runner=None): + """Runs instrumentation tests on a device and creates test records. + + Args: + device: AndroidDevice, the device to run instrumentation tests on. + package: string, the package name of the instrumentation tests. + options: dict, Instrumentation options for the instrumentation + tests. + prefix: string, an optional prefix for parser output for + distinguishing between instrumentation test runs. + runner: string, the runner to use for the instrumentation package, + default to DEFAULT_INSTRUMENTATION_RUNNER. + + Returns: + A boolean indicating whether or not all the instrumentation test + methods passed. + + Raises: + TestError if the instrumentation run crashed or if parsing the + output failed. + """ + instrumentation_output = device.adb.instrument( + package=package, + options=options, + runner=runner, + ) + logging.info('Outputting instrumentation test log...') + logging.info(instrumentation_output) + + # TODO(winterfrosts): Implement online output generation and parsing. + instrumentation_block = _InstrumentationBlock(prefix=prefix) + for line in instrumentation_output.splitlines(): + instrumentation_block = self._parse_line(instrumentation_block, + line) + return self._finish_parsing(instrumentation_block) diff --git a/mobly/base_test.py b/mobly/base_test.py index a4045df..e9be08c 100644 --- a/mobly/base_test.py +++ b/mobly/base_test.py @@ -19,6 +19,7 @@ import inspect import logging import sys +from mobly import expects from mobly import records from mobly import signals from mobly import runtime_test_info @@ -345,7 +346,9 @@ class BaseTestClass(object): tr_record.test_begin() self.current_test_info = runtime_test_info.RuntimeTestInfo( test_name, self.log_path, tr_record) + expects.recorder.reset_internal_states(tr_record) logging.info('%s %s', TEST_CASE_TOKEN, test_name) + # Did teardown_test throw an error. teardown_test_failed = False try: try: @@ -366,6 +369,7 @@ class BaseTestClass(object): self.current_test_name) raise finally: + before_count = expects.recorder.error_count try: self._teardown_test(test_name) except signals.TestAbortSignal: @@ -374,6 +378,10 @@ class BaseTestClass(object): logging.exception(e) tr_record.add_error('teardown_test', e) teardown_test_failed = True + else: + # Check if anything failed by `expects`. + if before_count < expects.recorder.error_count: + teardown_test_failed = True except (signals.TestFailure, AssertionError) as e: tr_record.test_fail(e) except signals.TestSkip as e: @@ -390,7 +398,12 @@ class BaseTestClass(object): # Exception happened during test. tr_record.test_error(e) else: - if not teardown_test_failed: + # No exception is thrown from test and teardown, if `expects` has + # error, the test should fail with the first error in `expects`. + if expects.recorder.has_error and not teardown_test_failed: + tr_record.test_fail() + # Otherwise the test passed. + elif not teardown_test_failed: tr_record.test_pass() finally: tr_record.update_record() diff --git a/mobly/test_runner.py b/mobly/test_runner.py index 4cebda7..cf70258 100644 --- a/mobly/test_runner.py +++ b/mobly/test_runner.py @@ -124,7 +124,7 @@ def main(argv=None): def _find_test_class(): """Finds the test class in a test script. - Walk through module memebers and find the subclass of BaseTestClass. Only + Walk through module members and find the subclass of BaseTestClass. Only one subclass is allowed in a test script. Returns: @@ -392,7 +392,8 @@ class TestRunner(object): Raises: ControllerError: * The controller module has already been registered. - * The actual number of objects instantiated is less than the `min_number`. + * The actual number of objects instantiated is less than the + * `min_number`. * `required` is True and no corresponding config can be found. * Any other error occurred in the registration process. diff --git a/tests/mobly/base_instrumentation_test_test.py b/tests/mobly/base_instrumentation_test_test.py new file mode 100755 index 0000000..32a5343 --- /dev/null +++ b/tests/mobly/base_instrumentation_test_test.py @@ -0,0 +1,991 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import mock +import shutil +import tempfile + +from future.tests.base import unittest + +from mobly.base_instrumentation_test import BaseInstrumentationTestClass +from mobly import config_parser +from mobly import signals + +# A mock test package for instrumentation. +MOCK_TEST_PACKAGE = 'com.my.package.test' +# A random prefix to test that prefixes are added properly. +MOCK_PREFIX = 'my_prefix' +# A mock name for the instrumentation test subclass. +MOCK_INSTRUMENTATION_TEST_CLASS_NAME = 'MockInstrumentationTest' + + +class MockInstrumentationTest(BaseInstrumentationTestClass): + def __init__(self, tmp_dir, user_params={}): + mock_test_run_configs = config_parser.TestRunConfig() + mock_test_run_configs.summary_writer = mock.Mock() + mock_test_run_configs.log_path = tmp_dir + mock_test_run_configs.user_params = user_params + mock_test_run_configs.reporter = mock.MagicMock() + super(MockInstrumentationTest, self).__init__(mock_test_run_configs) + + def run_mock_instrumentation_test(self, instrumentation_output, prefix): + mock_device = mock.Mock() + mock_device.adb = mock.Mock() + mock_device.adb.instrument = mock.MagicMock( + return_value=instrumentation_output) + return self.run_instrumentation_test( + mock_device, MOCK_TEST_PACKAGE, prefix=prefix) + + +class InstrumentationResult(object): + def __init__(self): + self.error = None + self.completed_and_passed = False + self.executed = [] + self.skipped = [] + + +class BaseInstrumentationTestTest(unittest.TestCase): + def setUp(self): + self.tmp_dir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.tmp_dir) + + def assert_parse_instrumentation_options(self, user_params, + expected_instrumentation_options): + mit = MockInstrumentationTest(self.tmp_dir, user_params) + instrumentation_options = mit.parse_instrumentation_options( + mit.user_params) + self.assertEqual(instrumentation_options, + expected_instrumentation_options) + + def test_parse_instrumentation_options_with_no_user_params(self): + self.assert_parse_instrumentation_options({}, {}) + + def test_parse_instrumentation_options_with_no_instrumentation_params( + self): + self.assert_parse_instrumentation_options( + { + 'param1': 'val1', + 'param2': 'val2', + }, + {}, + ) + + def test_parse_instrumentation_options_with_only_instrumentation_params( + self): + self.assert_parse_instrumentation_options( + { + 'instrumentation_option_key1': 'value1', + 'instrumentation_option_key2': 'value2', + }, + {'key1': 'value1', + 'key2': 'value2'}, + ) + + def test_parse_instrumentation_options_with_mixed_user_params(self): + self.assert_parse_instrumentation_options( + { + 'param1': 'val1', + 'param2': 'val2', + 'instrumentation_option_key1': 'value1', + 'instrumentation_option_key2': 'value2', + }, + {'key1': 'value1', + 'key2': 'value2'}, + ) + + def run_instrumentation_test(self, instrumentation_output, prefix=None): + mit = MockInstrumentationTest(self.tmp_dir) + result = InstrumentationResult() + try: + result.completed_and_passed = mit.run_mock_instrumentation_test( + instrumentation_output, prefix=prefix) + except signals.TestError as e: + result.error = e + result.executed = mit.results.executed + result.skipped = mit.results.skipped + return result + + def assert_equal_test(self, actual_test, expected_test): + (expected_test_name, expected_signal) = expected_test + self.assertEqual(actual_test.test_class, + MOCK_INSTRUMENTATION_TEST_CLASS_NAME) + self.assertEqual(actual_test.test_name, expected_test_name) + self.assertIsInstance(actual_test.termination_signal.exception, + expected_signal) + + def assert_run_instrumentation_test(self, + instrumentation_output, + expected_executed=[], + expected_skipped=[], + expected_completed_and_passed=False, + expected_has_error=False, + prefix=None): + result = self.run_instrumentation_test( + instrumentation_output, prefix=prefix) + if expected_has_error: + self.assertIsInstance(result.error, signals.TestError) + else: + self.assertIsNone(result.error) + self.assertEquals(result.completed_and_passed, + expected_completed_and_passed) + self.assertEqual(len(result.executed), len(expected_executed)) + for actual_test, expected_test in zip(result.executed, + expected_executed): + self.assert_equal_test(actual_test, expected_test) + self.assertEqual(len(result.skipped), len(expected_skipped)) + for actual_test, expected_test in zip(result.skipped, + expected_skipped): + self.assert_equal_test(actual_test, expected_test) + + def test_run_instrumentation_test_with_invalid_syntax(self): + instrumentation_output = """\ +usage: am [subcommand] [options] +usage: am start [-D] [-N] [-W] [-P <FILE>] [--start-profiler <FILE>] + [--sampling INTERVAL] [-R COUNT] [-S] + +am start: start an Activity. Options are: + -D: enable debugging + +am startservice: start a Service. Options are: + --user <USER_ID> | current: Specify which user to run as; if not + specified then run as the current user. + +am task lock: bring <TASK_ID> to the front and don't allow other tasks to run. + +<INTENT> specifications include these flags and arguments: + [-a <ACTION>] [-d <DATA_URI>] [-t <MIME_TYPE>] + [-c <CATEGORY> [-c <CATEGORY>] ...] + +Error: Bad component name: / +""" + self.assert_run_instrumentation_test( + instrumentation_output, expected_has_error=True) + + def test_run_instrumentation_test_with_no_output(self): + instrumentation_output = """\ +""" + self.assert_run_instrumentation_test( + instrumentation_output, expected_has_error=True) + + def test_run_instrumentation_test_with_missing_test_package(self): + instrumentation_output = """\ +android.util.AndroidException: INSTRUMENTATION_FAILED: com.my.package.test/com.my.package.test.runner.MyRunner + at com.android.commands.am.Am.runInstrument(Am.java:897) + at com.android.commands.am.Am.onRun(Am.java:405) + at com.android.internal.os.BaseCommand.run(BaseCommand.java:51) + at com.android.commands.am.Am.main(Am.java:124) + at com.android.internal.os.RuntimeInit.nativeFinishInit(Native Method) + at com.android.internal.os.RuntimeInit.main(RuntimeInit.java:262) +INSTRUMENTATION_STATUS: id=ActivityManagerService +INSTRUMENTATION_STATUS: Error=Unable to find instrumentation info for: ComponentInfo{com.my.package.test/com.my.package.test.runner.MyRunner} +INSTRUMENTATION_STATUS_CODE: -1""" + self.assert_run_instrumentation_test( + instrumentation_output, expected_has_error=True) + + def test_run_instrumentation_test_with_missing_runner(self): + instrumentation_output = """\ +android.util.AndroidException: INSTRUMENTATION_FAILED: com.my.package.test/com.my.package.test.runner +INSTRUMENTATION_STATUS: id=ActivityManagerService +INSTRUMENTATION_STATUS: Error=Unable to find instrumentation info for: ComponentInfo{com.my.package.test/com.my.package.test.runner} +INSTRUMENTATION_STATUS_CODE: -1 + at com.android.commands.am.Am.runInstrument(Am.java:897) + at com.android.commands.am.Am.onRun(Am.java:405) + at com.android.internal.os.BaseCommand.run(BaseCommand.java:51) + at com.android.commands.am.Am.main(Am.java:124) + at com.android.internal.os.RuntimeInit.nativeFinishInit(Native Method) + at com.android.internal.os.RuntimeInit.main(RuntimeInit.java:262)""" + self.assert_run_instrumentation_test( + instrumentation_output, expected_has_error=True) + + def test_run_instrumentation_test_with_no_tests(self): + instrumentation_output = """\ +INSTRUMENTATION_RESULT: stream= + +Time: 0.001 + +OK (0 tests) + + +INSTRUMENTATION_CODE: -1 +""" + self.assert_run_instrumentation_test( + instrumentation_output, expected_completed_and_passed=True) + + def test_run_instrumentation_test_with_passing_test(self): + instrumentation_output = """\ +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stream= +com.my.package.test.BasicTest: +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: test=basicTest +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stream=. +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: test=basicTest +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS_CODE: 0 +INSTRUMENTATION_RESULT: stream= + +Time: 0.214 + +OK (1 test) + + +INSTRUMENTATION_CODE: -1 +""" + expected_executed = [ + ('com.my.package.test.BasicTest#basicTest', signals.TestPass), + ] + self.assert_run_instrumentation_test( + instrumentation_output, + expected_executed=expected_executed, + expected_completed_and_passed=True) + + def test_run_instrumentation_test_with_prefix_test(self): + instrumentation_output = """\ +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stream= +com.my.package.test.BasicTest: +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: test=basicTest +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stream=. +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: test=basicTest +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS_CODE: 0 +INSTRUMENTATION_RESULT: stream= + +Time: 0.214 + +OK (1 test) + + +INSTRUMENTATION_CODE: -1 +""" + expected_executed = [ + ('%s.com.my.package.test.BasicTest#basicTest' % MOCK_PREFIX, + signals.TestPass), + ] + self.assert_run_instrumentation_test( + instrumentation_output, + expected_executed=expected_executed, + expected_completed_and_passed=True, + prefix=MOCK_PREFIX) + + def test_run_instrumentation_test_with_failing_test(self): + instrumentation_output = """\ +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stream= +com.my.package.test.BasicTest: +INSTRUMENTATION_STATUS: test=failingTest +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stack=java.lang.UnsupportedOperationException: dummy failing test + at com.my.package.test.BasicTest.failingTest(BasicTest.java:38) + at java.lang.reflect.Method.invoke(Native Method) + at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57) + at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) + at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59) + at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) + at android.support.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80) + at android.support.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61) + at android.support.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433) + at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96) + at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109) + at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.RunRules.evaluate(RunRules.java:20) + at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81) + at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at android.support.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runner.JUnitCore.run(JUnitCore.java:137) + at org.junit.runner.JUnitCore.run(JUnitCore.java:115) + at android.support.test.internal.runner.TestExecutor.execute(TestExecutor.java:56) + at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344) + at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330) + at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253) + at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074) + +INSTRUMENTATION_STATUS: stream= +Error in failingTest(com.my.package.test.BasicTest): +java.lang.UnsupportedOperationException: dummy failing test + at com.my.package.test.BasicTest.failingTest(BasicTest.java:38) + at java.lang.reflect.Method.invoke(Native Method) + at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57) + at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) + at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59) + at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) + at android.support.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80) + at android.support.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61) + at android.support.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433) + at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96) + at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109) + at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.RunRules.evaluate(RunRules.java:20) + at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81) + at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at android.support.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runner.JUnitCore.run(JUnitCore.java:137) + at org.junit.runner.JUnitCore.run(JUnitCore.java:115) + at android.support.test.internal.runner.TestExecutor.execute(TestExecutor.java:56) + at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344) + at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330) + at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253) + at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074) + +INSTRUMENTATION_STATUS: test=failingTest +INSTRUMENTATION_STATUS_CODE: -2 +INSTRUMENTATION_RESULT: stream= + +Time: 1.92 +There was 1 failure: +1) failingTest(com.my.package.test.BasicTest) +java.lang.UnsupportedOperationException: dummy failing test + at com.my.package.test.BasicTest.failingTest(BasicTest.java:38) + at java.lang.reflect.Method.invoke(Native Method) + at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57) + at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) + at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59) + at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) + at android.support.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80) + at android.support.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61) + at android.support.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433) + at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96) + at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109) + at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.RunRules.evaluate(RunRules.java:20) + at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81) + at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at android.support.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runner.JUnitCore.run(JUnitCore.java:137) + at org.junit.runner.JUnitCore.run(JUnitCore.java:115) + at android.support.test.internal.runner.TestExecutor.execute(TestExecutor.java:56) + at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344) + at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330) + at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253) + at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074) + +FAILURES!!! +Tests run: 1, Failures: 1 + + +INSTRUMENTATION_CODE: -1""" + expected_executed = [ + ('com.my.package.test.BasicTest#failingTest', signals.TestFailure), + ] + self.assert_run_instrumentation_test( + instrumentation_output, expected_executed=expected_executed) + + def test_run_instrumentation_test_with_assumption_failure_test(self): + instrumentation_output = """\ +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stream= +com.my.package.test.BasicTest: +INSTRUMENTATION_STATUS: test=assumptionFailureTest +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stack=org.junit.AssumptionViolatedException: Assumption failure reason + at org.junit.Assume.assumeTrue(Assume.java:59) + at org.junit.Assume.assumeFalse(Assume.java:66) + at com.my.package.test.BasicTest.assumptionFailureTest(BasicTest.java:63) + at java.lang.reflect.Method.invoke(Native Method) + at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57) + at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) + at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59) + at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) + at android.support.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80) + at android.support.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61) + at android.support.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433) + at com.my.package.test.MyBaseTest$3.evaluate(MyBaseTest.java:96) + at com.my.package.test.MyBaseTest$4.evaluate(MyBaseTest.java:109) + at com.my.package.test.MyBaseTest$2.evaluate(MyBaseTest.java:77) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.RunRules.evaluate(RunRules.java:20) + at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81) + at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at android.support.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runner.JUnitCore.run(JUnitCore.java:137) + at org.junit.runner.JUnitCore.run(JUnitCore.java:115) + at android.support.test.internal.runner.TestExecutor.execute(TestExecutor.java:56) + at com.my.package.test.runner.BaseRunner.runTests(BaseRunner.java:344) + at com.my.package.test.runner.BaseRunner.onStart(BaseRunner.java:330) + at com.my.package.test.runner.BaseRunner.onStart(BaseRunner.java:253) + at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074) + +INSTRUMENTATION_STATUS: stream= +com.my.package.test.BasicTest: +INSTRUMENTATION_STATUS: test=assumptionFailureTest +INSTRUMENTATION_STATUS_CODE: -4 +INSTRUMENTATION_RESULT: stream= + +Time: 3.139 + +OK (1 test) + + +INSTRUMENTATION_CODE: -1""" + expected_skipped = [ + ('com.my.package.test.BasicTest#assumptionFailureTest', + signals.TestSkip), + ] + self.assert_run_instrumentation_test( + instrumentation_output, + expected_skipped=expected_skipped, + expected_completed_and_passed=True) + + def test_run_instrumentation_test_with_ignored_test(self): + instrumentation_output = """\ +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stream= +com.my.package.test.BasicTest: +INSTRUMENTATION_STATUS: test=ignoredTest +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stream= +com.my.package.test.BasicTest: +INSTRUMENTATION_STATUS: test=ignoredTest +INSTRUMENTATION_STATUS_CODE: -3 +INSTRUMENTATION_RESULT: stream= + +Time: 0.007 + +OK (0 tests) + + +INSTRUMENTATION_CODE: -1""" + expected_skipped = [ + ('com.my.package.test.BasicTest#ignoredTest', signals.TestSkip), + ] + self.assert_run_instrumentation_test( + instrumentation_output, + expected_skipped=expected_skipped, + expected_completed_and_passed=True) + + def test_run_instrumentation_test_with_crashed_test(self): + instrumentation_output = """\ +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stream= +com.my.package.test.BasicTest: +INSTRUMENTATION_STATUS: test=crashTest +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_RESULT: shortMsg=Process crashed. +INSTRUMENTATION_CODE: 0""" + expected_executed = [ + ('com.my.package.test.BasicTest#crashTest', signals.TestError), + ] + self.assert_run_instrumentation_test( + instrumentation_output, + expected_executed=expected_executed, + expected_has_error=True) + + def test_run_instrumentation_test_with_crashing_test(self): + instrumentation_output = """\ +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=2 +INSTRUMENTATION_STATUS: stream= +com.my.package.test.BasicTest: +INSTRUMENTATION_STATUS: test=crashAndRecover1Test +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=2 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=2 +INSTRUMENTATION_STATUS: stream= +com.my.package.test.BasicTest: +INSTRUMENTATION_STATUS: test=crashAndRecover2Test +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_RESULT: stream= + +Time: 6.342 + +OK (2 tests) + + +INSTRUMENTATION_CODE: -1""" + expected_executed = [ + ('com.my.package.test.BasicTest#crashAndRecover1Test', + signals.TestError), + ('com.my.package.test.BasicTest#crashAndRecover2Test', + signals.TestError), + ] + self.assert_run_instrumentation_test( + instrumentation_output, + expected_executed=expected_executed, + expected_completed_and_passed=True) + + def test_run_instrumentation_test_with_runner_setup_crash(self): + instrumentation_output = """\ +INSTRUMENTATION_RESULT: shortMsg=Process crashed. +INSTRUMENTATION_CODE: 0""" + self.assert_run_instrumentation_test( + instrumentation_output, expected_has_error=True) + + def test_run_instrumentation_test_with_runner_teardown_crash(self): + instrumentation_output = """\ +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stream= +com.my.package.test.BasicTest: +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: test=basicTest +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_STATUS: numtests=1 +INSTRUMENTATION_STATUS: stream=. +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: test=basicTest +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS_CODE: 0 +INSTRUMENTATION_RESULT: shortMsg=Process crashed. +INSTRUMENTATION_CODE: 0 +""" + expected_executed = [ + ('com.my.package.test.BasicTest#basicTest', signals.TestPass), + ] + self.assert_run_instrumentation_test( + instrumentation_output, + expected_executed=expected_executed, + expected_has_error=True) + + def test_run_instrumentation_test_with_multiple_tests(self): + instrumentation_output = """\ +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=4 +INSTRUMENTATION_STATUS: stream= +com.my.package.test.BasicTest: +INSTRUMENTATION_STATUS: test=failingTest +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=1 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=4 +INSTRUMENTATION_STATUS: stack=java.lang.UnsupportedOperationException: dummy failing test + at com.my.package.test.BasicTest.failingTest(BasicTest.java:40) + at java.lang.reflect.Method.invoke(Native Method) + at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57) + at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) + at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59) + at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) + at android.support.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80) + at android.support.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61) + at android.support.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433) + at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96) + at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109) + at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.RunRules.evaluate(RunRules.java:20) + at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81) + at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at android.support.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runner.JUnitCore.run(JUnitCore.java:137) + at org.junit.runner.JUnitCore.run(JUnitCore.java:115) + at android.support.test.internal.runner.TestExecutor.execute(TestExecutor.java:56) + at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344) + at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330) + at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253) + at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074) + +INSTRUMENTATION_STATUS: stream= +Error in failingTest(com.my.package.test.BasicTest): +java.lang.UnsupportedOperationException: dummy failing test + at com.my.package.test.BasicTest.failingTest(BasicTest.java:40) + at java.lang.reflect.Method.invoke(Native Method) + at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57) + at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) + at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59) + at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) + at android.support.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80) + at android.support.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61) + at android.support.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433) + at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96) + at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109) + at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.RunRules.evaluate(RunRules.java:20) + at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81) + at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at android.support.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runner.JUnitCore.run(JUnitCore.java:137) + at org.junit.runner.JUnitCore.run(JUnitCore.java:115) + at android.support.test.internal.runner.TestExecutor.execute(TestExecutor.java:56) + at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344) + at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330) + at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253) + at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074) + +INSTRUMENTATION_STATUS: test=failingTest +INSTRUMENTATION_STATUS_CODE: -2 +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=2 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=4 +INSTRUMENTATION_STATUS: stream= +INSTRUMENTATION_STATUS: test=assumptionFailureTest +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=2 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=4 +INSTRUMENTATION_STATUS: stack=org.junit.AssumptionViolatedException: Assumption failure reason + at org.junit.Assume.assumeTrue(Assume.java:59) + at com.my.package.test.BasicTest.assumptionFailureTest(BasicTest.java:61) + at java.lang.reflect.Method.invoke(Native Method) + at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57) + at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) + at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59) + at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) + at android.support.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80) + at android.support.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61) + at android.support.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433) + at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96) + at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109) + at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.RunRules.evaluate(RunRules.java:20) + at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81) + at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at android.support.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runner.JUnitCore.run(JUnitCore.java:137) + at org.junit.runner.JUnitCore.run(JUnitCore.java:115) + at android.support.test.internal.runner.TestExecutor.execute(TestExecutor.java:56) + at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344) + at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330) + at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253) + at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074) + +INSTRUMENTATION_STATUS: stream= +INSTRUMENTATION_STATUS: test=assumptionFailureTest +INSTRUMENTATION_STATUS_CODE: -4 +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=3 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=4 +INSTRUMENTATION_STATUS: stream= +INSTRUMENTATION_STATUS: test=ignoredTest +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=3 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=4 +INSTRUMENTATION_STATUS: stream= +INSTRUMENTATION_STATUS: test=ignoredTest +INSTRUMENTATION_STATUS_CODE: -3 +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=4 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=4 +INSTRUMENTATION_STATUS: stream= +INSTRUMENTATION_STATUS: test=passingTest +INSTRUMENTATION_STATUS_CODE: 1 +INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest +INSTRUMENTATION_STATUS: current=4 +INSTRUMENTATION_STATUS: id=AndroidJUnitRunner +INSTRUMENTATION_STATUS: numtests=4 +INSTRUMENTATION_STATUS: stream=. +INSTRUMENTATION_STATUS: test=passingTest +INSTRUMENTATION_STATUS_CODE: 0 +INSTRUMENTATION_RESULT: stream= + +Time: 4.131 +There was 1 failure: +1) failingTest(com.my.package.test.BasicTest) +java.lang.UnsupportedOperationException: dummy failing test + at com.my.package.test.BasicTest.failingTest(BasicTest.java:40) + at java.lang.reflect.Method.invoke(Native Method) + at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57) + at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) + at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59) + at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) + at android.support.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80) + at android.support.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61) + at android.support.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433) + at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96) + at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109) + at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) + at org.junit.rules.RunRules.evaluate(RunRules.java:20) + at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81) + at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84) + at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at android.support.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runners.Suite.runChild(Suite.java:128) + at org.junit.runners.Suite.runChild(Suite.java:27) + at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292) + at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73) + at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290) + at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60) + at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270) + at org.junit.runners.ParentRunner.run(ParentRunner.java:370) + at org.junit.runner.JUnitCore.run(JUnitCore.java:137) + at org.junit.runner.JUnitCore.run(JUnitCore.java:115) + at android.support.test.internal.runner.TestExecutor.execute(TestExecutor.java:56) + at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344) + at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330) + at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253) + at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074) + +FAILURES!!! +Tests run: 3, Failures: 1 + + +INSTRUMENTATION_CODE: -1""" + expected_executed = [ + ('com.my.package.test.BasicTest#failingTest', signals.TestFailure), + ('com.my.package.test.BasicTest#passingTest', signals.TestPass), + ] + expected_skipped = [ + ('com.my.package.test.BasicTest#assumptionFailureTest', + signals.TestSkip), + ('com.my.package.test.BasicTest#ignoredTest', signals.TestSkip), + ] + self.assert_run_instrumentation_test( + instrumentation_output, + expected_executed=expected_executed, + expected_skipped=expected_skipped) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py index 1e8f4b2..f7905d9 100755 --- a/tests/mobly/base_test_test.py +++ b/tests/mobly/base_test_test.py @@ -22,6 +22,7 @@ from future.tests.base import unittest from mobly import asserts from mobly import base_test from mobly import config_parser +from mobly import expects from mobly import signals from tests.lib import utils @@ -1106,6 +1107,217 @@ class BaseTestTest(unittest.TestCase): self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION) self.assertEqual(actual_record.extras, MOCK_EXTRA) + def test_expect_true(self): + must_call = mock.Mock() + must_call2 = mock.Mock() + + class MockBaseTest(base_test.BaseTestClass): + def test_func(self): + expects.expect_true( + False, MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA) + must_call('ha') + + def on_fail(self, record): + must_call2('on_fail') + + bt_cls = MockBaseTest(self.mock_test_cls_configs) + bt_cls.run(test_names=['test_func']) + must_call.assert_called_once_with('ha') + must_call2.assert_called_once_with('on_fail') + actual_record = bt_cls.results.failed[0] + self.assertEqual(actual_record.test_name, 'test_func') + self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION) + self.assertEqual(actual_record.extras, MOCK_EXTRA) + + def test_expect_multiple_fails(self): + must_call = mock.Mock() + must_call2 = mock.Mock() + + class MockBaseTest(base_test.BaseTestClass): + def test_func(self): + expects.expect_true(False, 'msg 1', extras='1') + expects.expect_true(False, 'msg 2', extras='2') + must_call('ha') + + def on_fail(self, record): + must_call2('on_fail') + + bt_cls = MockBaseTest(self.mock_test_cls_configs) + bt_cls.run(test_names=['test_func']) + must_call.assert_called_once_with('ha') + must_call2.assert_called_once_with('on_fail') + actual_record = bt_cls.results.failed[0] + self.assertEqual(actual_record.test_name, 'test_func') + self.assertEqual(actual_record.details, 'msg 1') + self.assertEqual(actual_record.extras, '1') + self.assertEqual(len(actual_record.extra_errors), 1) + second_error = list(actual_record.extra_errors.values())[0] + self.assertEqual(second_error.details, 'msg 2') + self.assertEqual(second_error.extras, '2') + + def test_expect_two_tests(self): + """Errors in `expect` should not leak across tests. + """ + must_call = mock.Mock() + + class MockBaseTest(base_test.BaseTestClass): + def test_1(self): + expects.expect_true( + False, MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA) + must_call('ha') + + def test_2(self): + pass + + bt_cls = MockBaseTest(self.mock_test_cls_configs) + bt_cls.run(test_names=['test_1', 'test_2']) + must_call.assert_called_once_with('ha') + actual_record = bt_cls.results.failed[0] + self.assertEqual(actual_record.test_name, 'test_1') + self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION) + self.assertEqual(actual_record.extras, MOCK_EXTRA) + another_record = bt_cls.results.passed[0] + self.assertEqual(another_record.test_name, 'test_2') + + def test_expect_no_op(self): + """Tests don't fail when expect is not triggered. + """ + must_call = mock.Mock() + + class MockBaseTest(base_test.BaseTestClass): + def test_1(self): + expects.expect_true( + True, MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA) + must_call('ha') + + def test_2(self): + expects.expect_false( + False, MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA) + must_call('ha') + + bt_cls = MockBaseTest(self.mock_test_cls_configs) + bt_cls.run(test_names=['test_1', 'test_2']) + must_call.assert_called_with('ha') + self.assertEqual(len(bt_cls.results.passed), 2) + + def test_expect_in_teardown_test(self): + must_call = mock.Mock() + must_call2 = mock.Mock() + + class MockBaseTest(base_test.BaseTestClass): + def test_func(self): + pass + + def teardown_test(self): + expects.expect_true( + False, MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA) + must_call('ha') + + def on_fail(self, record): + must_call2('on_fail') + + bt_cls = MockBaseTest(self.mock_test_cls_configs) + bt_cls.run(test_names=['test_func']) + must_call.assert_called_once_with('ha') + must_call2.assert_called_once_with('on_fail') + actual_record = bt_cls.results.error[0] + self.assertEqual(actual_record.test_name, 'test_func') + self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION) + self.assertEqual(actual_record.extras, MOCK_EXTRA) + + def test_expect_false(self): + must_call = mock.Mock() + + class MockBaseTest(base_test.BaseTestClass): + def test_func(self): + expects.expect_false( + True, MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA) + must_call('ha') + + bt_cls = MockBaseTest(self.mock_test_cls_configs) + bt_cls.run(test_names=['test_func']) + must_call.assert_called_once_with('ha') + actual_record = bt_cls.results.failed[0] + self.assertEqual(actual_record.test_name, 'test_func') + self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION) + self.assertEqual(actual_record.extras, MOCK_EXTRA) + + def test_expect_equal(self): + must_call = mock.Mock() + + class MockBaseTest(base_test.BaseTestClass): + def test_func(self): + expects.expect_equal( + 1, 2, MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA) + must_call('ha') + + bt_cls = MockBaseTest(self.mock_test_cls_configs) + bt_cls.run(test_names=['test_func']) + must_call.assert_called_once_with('ha') + actual_record = bt_cls.results.failed[0] + self.assertEqual(actual_record.test_name, 'test_func') + self.assertEqual(actual_record.details, + '1 != 2 ' + MSG_EXPECTED_EXCEPTION) + self.assertEqual(actual_record.extras, MOCK_EXTRA) + + def test_expect_no_raises_default_msg(self): + must_call = mock.Mock() + + class MockBaseTest(base_test.BaseTestClass): + def test_func(self): + with expects.expect_no_raises(extras=MOCK_EXTRA): + raise Exception(MSG_EXPECTED_EXCEPTION) + must_call('ha') + + bt_cls = MockBaseTest(self.mock_test_cls_configs) + bt_cls.run(test_names=['test_func']) + must_call.assert_called_once_with('ha') + actual_record = bt_cls.results.failed[0] + self.assertEqual(actual_record.test_name, 'test_func') + self.assertEqual( + actual_record.details, + 'Got an unexpected exception: %s' % MSG_EXPECTED_EXCEPTION) + self.assertEqual(actual_record.extras, MOCK_EXTRA) + + def test_expect_no_raises_custom_msg(self): + must_call = mock.Mock() + msg = 'Some step unexpected failed' + + class MockBaseTest(base_test.BaseTestClass): + def test_func(self): + with expects.expect_no_raises(message=msg, extras=MOCK_EXTRA): + raise Exception(MSG_EXPECTED_EXCEPTION) + must_call('ha') + + bt_cls = MockBaseTest(self.mock_test_cls_configs) + bt_cls.run(test_names=['test_func']) + must_call.assert_called_once_with('ha') + actual_record = bt_cls.results.failed[0] + self.assertEqual(actual_record.test_name, 'test_func') + self.assertEqual(actual_record.details, + '%s: %s' % (msg, MSG_EXPECTED_EXCEPTION)) + self.assertEqual(actual_record.extras, MOCK_EXTRA) + + def test_expect_true_and_assert_true(self): + """Error thrown by assert_true should be considered the termination. + """ + must_call = mock.Mock() + + class MockBaseTest(base_test.BaseTestClass): + def test_func(self): + expects.expect_true( + False, MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA) + must_call('ha') + asserts.assert_true(False, 'failed from assert_true') + + bt_cls = MockBaseTest(self.mock_test_cls_configs) + bt_cls.run(test_names=['test_func']) + must_call.assert_called_once_with('ha') + actual_record = bt_cls.results.failed[0] + self.assertEqual(actual_record.test_name, 'test_func') + self.assertEqual(actual_record.details, 'failed from assert_true') + self.assertIsNone(actual_record.extras) + def test_unpack_userparams_required(self): """Missing a required param should raise an error.""" required = ["some_param"] diff --git a/tests/mobly/controllers/android_device_lib/adb_test.py b/tests/mobly/controllers/android_device_lib/adb_test.py index 5c5f6a8..382ed01 100755 --- a/tests/mobly/controllers/android_device_lib/adb_test.py +++ b/tests/mobly/controllers/android_device_lib/adb_test.py @@ -14,10 +14,32 @@ import mock +from collections import OrderedDict from future.tests.base import unittest from mobly.controllers.android_device_lib import adb +# Mock parameters for instrumentation. +MOCK_INSTRUMENTATION_PACKAGE = 'com.my.instrumentation.tests' +MOCK_INSTRUMENTATION_RUNNER = 'com.my.instrumentation.runner' +MOCK_INSTRUMENTATION_OPTIONS = OrderedDict([ + ('option1', 'value1'), + ('option2', 'value2'), +]) +# Mock android instrumentation commands. +MOCK_BASIC_INSTRUMENTATION_COMMAND = ('am instrument -r -w com.my' + '.instrumentation.tests/com.android' + '.common.support.test.runner' + '.AndroidJUnitRunner') +MOCK_RUNNER_INSTRUMENTATION_COMMAND = ('am instrument -r -w com.my' + '.instrumentation.tests/com.my' + '.instrumentation.runner') +MOCK_OPTIONS_INSTRUMENTATION_COMMAND = ('am instrument -r -w -e option1 value1' + ' -e option2 value2 com.my' + '.instrumentation.tests/com.android' + '.common.support.test.runner' + '.AndroidJUnitRunner') + class AdbTest(unittest.TestCase): """Unit tests for mobly.controllers.android_device_lib.adb. @@ -31,8 +53,8 @@ class AdbTest(unittest.TestCase): # the created process object in adb._exec_cmd() mock_psutil_process.return_value = mock.Mock() - mock_proc.communicate = mock.Mock(return_value=("out".encode('utf-8'), - "err".encode('utf-8'))) + mock_proc.communicate = mock.Mock( + return_value=('out'.encode('utf-8'), 'err'.encode('utf-8'))) mock_proc.returncode = 0 return (mock_psutil_process, mock_popen) @@ -43,8 +65,8 @@ class AdbTest(unittest.TestCase): self._mock_process(mock_psutil_process, mock_Popen) reply = adb.AdbProxy()._exec_cmd( - ["fake_cmd"], shell=False, timeout=None) - self.assertEqual("out", reply.decode('utf-8')) + ['fake_cmd'], shell=False, timeout=None) + self.assertEqual('out', reply.decode('utf-8')) @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen') @mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process') @@ -54,8 +76,8 @@ class AdbTest(unittest.TestCase): mock_popen.return_value.returncode = 1 with self.assertRaisesRegex(adb.AdbError, - "Error executing adb cmd .*"): - adb.AdbProxy()._exec_cmd(["fake_cmd"], shell=False, timeout=None) + 'Error executing adb cmd .*'): + adb.AdbProxy()._exec_cmd(['fake_cmd'], shell=False, timeout=None) @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen') @mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process') @@ -63,8 +85,8 @@ class AdbTest(unittest.TestCase): mock_popen): self._mock_process(mock_psutil_process, mock_popen) - reply = adb.AdbProxy()._exec_cmd(["fake_cmd"], shell=False, timeout=1) - self.assertEqual("out", reply.decode('utf-8')) + reply = adb.AdbProxy()._exec_cmd(['fake_cmd'], shell=False, timeout=1) + self.assertEqual('out', reply.decode('utf-8')) @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen') @mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process') @@ -76,8 +98,8 @@ class AdbTest(unittest.TestCase): adb.psutil.TimeoutExpired('Timed out')) with self.assertRaisesRegex(adb.AdbTimeoutError, - "Timed out Adb cmd .*"): - adb.AdbProxy()._exec_cmd(["fake_cmd"], shell=False, timeout=0.1) + 'Timed out Adb cmd .*'): + adb.AdbProxy()._exec_cmd(['fake_cmd'], shell=False, timeout=0.1) @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen') @mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process') @@ -85,8 +107,8 @@ class AdbTest(unittest.TestCase): mock_popen): self._mock_process(mock_psutil_process, mock_popen) with self.assertRaisesRegex(adb.AdbError, - "Timeout is a negative value: .*"): - adb.AdbProxy()._exec_cmd(["fake_cmd"], shell=False, timeout=-1) + 'Timeout is a negative value: .*'): + adb.AdbProxy()._exec_cmd(['fake_cmd'], shell=False, timeout=-1) def test_exec_adb_cmd(self): with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd: @@ -110,6 +132,43 @@ class AdbTest(unittest.TestCase): mock_exec_cmd.assert_called_once_with( '"adb" -s "12345" shell arg1 arg2', shell=True, timeout=None) + def test_instrument_without_parameters(self): + """Verifies the AndroidDevice object's instrument command is correct in + the basic case. + """ + with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd: + adb.AdbProxy().instrument(MOCK_INSTRUMENTATION_PACKAGE) + mock_exec_cmd.assert_called_once_with( + ['adb', 'shell', MOCK_BASIC_INSTRUMENTATION_COMMAND], + shell=False, + timeout=None) + + def test_instrument_with_runner(self): + """Verifies the AndroidDevice object's instrument command is correct + with a runner specified. + """ + with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd: + adb.AdbProxy().instrument( + MOCK_INSTRUMENTATION_PACKAGE, + runner=MOCK_INSTRUMENTATION_RUNNER) + mock_exec_cmd.assert_called_once_with( + ['adb', 'shell', MOCK_RUNNER_INSTRUMENTATION_COMMAND], + shell=False, + timeout=None) + + def test_instrument_with_options(self): + """Verifies the AndroidDevice object's instrument command is correct + with options. + """ + with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd: + adb.AdbProxy().instrument( + MOCK_INSTRUMENTATION_PACKAGE, + options=MOCK_INSTRUMENTATION_OPTIONS) + mock_exec_cmd.assert_called_once_with( + ['adb', 'shell', MOCK_OPTIONS_INSTRUMENTATION_COMMAND], + shell=False, + timeout=None) + -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() diff --git a/tests/mobly/controllers/android_device_test.py b/tests/mobly/controllers/android_device_test.py index 949b745..33dd23c 100755 --- a/tests/mobly/controllers/android_device_test.py +++ b/tests/mobly/controllers/android_device_test.py @@ -17,6 +17,7 @@ import mock import os import shutil import tempfile + from future.tests.base import unittest from mobly.controllers import android_device @@ -292,6 +293,22 @@ class AndroidDeviceTest(unittest.TestCase): with self.assertRaisesRegex(android_device.Error, expected_msg): ad.take_bug_report('test_something', 'sometime') + @mock.patch( + 'mobly.controllers.android_device_lib.adb.AdbProxy', + return_value=mock_android_device.MockAdbProxy(1)) + @mock.patch( + 'mobly.controllers.android_device_lib.fastboot.FastbootProxy', + return_value=mock_android_device.MockFastbootProxy(1)) + @mock.patch('mobly.utils.create_dir') + def test_AndroidDevice_take_bug_report_with_destination( + self, create_dir_mock, FastbootProxy, MockAdbProxy): + mock_serial = 1 + ad = android_device.AndroidDevice(serial=mock_serial) + dest = tempfile.gettempdir() + ad.take_bug_report("test_something", "sometime", destination=dest) + expected_path = os.path.join(dest) + create_dir_mock.assert_called_with(expected_path) + @mock.patch( 'mobly.controllers.android_device_lib.adb.AdbProxy', return_value=mock_android_device.MockAdbProxy(
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 6 }
1.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work future==1.0.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/google/mobly.git@b4bd28313361c9ce0de33c591625b675eac507e7#egg=mobly mock==1.0.1 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work portpicker==1.6.0 psutil==7.0.0 pytest @ file:///croot/pytest_1738938843180/work pytz==2025.2 PyYAML==6.0.2 timeout-decorator==0.5.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: mobly channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - future==1.0.0 - mock==1.0.1 - portpicker==1.6.0 - psutil==7.0.0 - pytz==2025.2 - pyyaml==6.0.2 - timeout-decorator==0.5.0 prefix: /opt/conda/envs/mobly
[ "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_parse_instrumentation_options_with_mixed_user_params", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_parse_instrumentation_options_with_no_instrumentation_params", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_parse_instrumentation_options_with_no_user_params", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_parse_instrumentation_options_with_only_instrumentation_params", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_assumption_failure_test", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_crashed_test", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_crashing_test", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_failing_test", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_ignored_test", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_invalid_syntax", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_missing_runner", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_missing_test_package", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_multiple_tests", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_no_output", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_no_tests", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_passing_test", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_prefix_test", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_runner_setup_crash", "tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_runner_teardown_crash", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_on_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_setup_test", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test", "tests/mobly/base_test_test.py::BaseTestTest::test_abort_setup_class", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_assert_true", "tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions", "tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention", "tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list", "tests/mobly/base_test_test.py::BaseTestTest::test_current_test_info", "tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name", "tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests", "tests/mobly/base_test_test.py::BaseTestTest::test_exception_objects_in_record", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_equal", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_false", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_in_teardown_test", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_multiple_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_no_op", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_no_raises_custom_msg", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_no_raises_default_msg", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_true", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_true_and_assert_true", "tests/mobly/base_test_test.py::BaseTestTest::test_expect_two_tests", "tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded", "tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded", "tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests", "tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name", "tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run", "tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run", "tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_cannot_modify_original_record", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_both_test_and_teardown_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_setup_fails_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_cannot_modify_original_record", "tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_procedure_function_gets_correct_record", "tests/mobly/base_test_test.py::BaseTestTest::test_promote_extra_errors_to_termination_signal", "tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list", "tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_and_teardown_execution_count", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal", "tests/mobly/base_test_test.py::BaseTestTest::test_skip", "tests/mobly/base_test_test.py::BaseTestTest::test_skip_if", "tests/mobly/base_test_test.py::BaseTestTest::test_skip_in_setup_test", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_class_fail_by_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass", "tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required", "tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing", "tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd", "tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_shell_true", "tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_error_no_timeout", "tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_no_timeout_success", "tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_timed_out", "tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_with_negative_timeout_value", "tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_with_timeout_success", "tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_options", "tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_runner", "tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_without_parameters", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_build_info", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_cat_adb_log", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_debug_tag", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_instantiation", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_attribute_name", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_package", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_snippet_name", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_snippet_cleanup", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fail", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fallback", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_with_destination", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat_with_user_param", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_dict_list", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_empty_config", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_no_valid_config", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_not_list_config", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_pickup_all", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_string_list", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_usb_id", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_no_match", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial_and_extra_field", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_too_many_matches", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_devices_no_match", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_devices_success_with_extra_field", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_start_services_on_ads" ]
[]
[]
[]
Apache License 2.0
1,777
[ "docs/conf.py", "mobly/asserts.py", "mobly/controllers/android_device.py", "mobly/controllers/android_device_lib/adb.py", "mobly/expects.py", "mobly/records.py", "README.md", "docs/instrumentation_tutorial.md" ]
[ "docs/conf.py", "mobly/asserts.py", "mobly/controllers/android_device.py", "mobly/controllers/android_device_lib/adb.py", "mobly/expects.py", "mobly/records.py", "README.md", "docs/instrumentation_tutorial.md" ]
stfc__fparser-47
108c0f201abbabbde3ceb9fcc3ff7ffa4b0c3dbd
2017-10-18 12:31:33
d2feb470be5b707937c384b074f92762df0a2481
coveralls: [![Coverage Status](https://coveralls.io/builds/13772596/badge)](https://coveralls.io/builds/13772596) Coverage increased (+0.04%) to 82.144% when pulling **39d093df3ba0182d045aea2f651a512d9e8f59aa on open_wo_named_unit** into **3cc5c52f00654edb58b6a7f8bfba6e7225818045 on master**. arporter: I've just tested this branch with the ORCA2_LIM configuration of NEMO. Modulo the problem with INCLUDE statements (they get removed), fparser2 successfully parses and re-generates the whole code base which I've then compiled (with the Intel compiler). arporter: In trying to get test coverage for the missed lines highlighted in the review I discovered that the parser failed if a keyword is misspelt (e.g "aunit=23" instead of "unit=23"). I've fixed that bug and obtained coverage of the modified lines. It's probable that this bug will exist in other places in the code but I haven't checked. This possibly points to the need to have a KeywordValueList base class that would cut down on code duplication.
diff --git a/src/fparser/Fortran2003.py b/src/fparser/Fortran2003.py index 429ca80..20f3dca 100644 --- a/src/fparser/Fortran2003.py +++ b/src/fparser/Fortran2003.py @@ -161,8 +161,16 @@ class Base(ComparableMixin): subclasses = {} @show_result - def __new__(cls, string, parent_cls = None): + def __new__(cls, string, parent_cls=None): """ + Create a new instance of this object. + + :param cls: the class of object to create + :type cls: :py:type:`type` + :param string: (source of) Fortran string to parse + :type string: str or :py:class:`FortranReaderBase` + :param parent_cls: the parent class of this object + :type parent_cls: :py:type:`type` """ if parent_cls is None: parent_cls = [cls] @@ -218,12 +226,9 @@ class Base(ComparableMixin): obj = None if obj is not None: return obj - else: raise AssertionError(repr(result)) errmsg = '%s: %r' % (cls.__name__, string) - #if isinstance(string, FortranReaderBase) and string.fifo_item: - # errmsg += ' while reaching %s' % (string.fifo_item[-1]) raise NoMatchError(errmsg) ## def restore_reader(self): @@ -5379,6 +5384,7 @@ class Internal_File_Variable(Base): # R903 """ subclass_names = ['Char_Variable'] + class Open_Stmt(StmtBase, CALLBase): # R904 """ <open-stmt> = OPEN ( <connect-spec-list> ) @@ -5387,10 +5393,15 @@ class Open_Stmt(StmtBase, CALLBase): # R904 use_names = ['Connect_Spec_List'] @staticmethod def match(string): - return CALLBase.match('OPEN', Connect_Spec_List, string, require_rhs=True) + # The Connect_Spec_List class is generated automatically + # by code at the end of this module + return CALLBase.match('OPEN', Connect_Spec_List, string, + require_rhs=True) -class Connect_Spec(KeywordValueBase): # R905 + +class Connect_Spec(KeywordValueBase): """ + R905 <connect-spec> = [ UNIT = ] <file-unit-number> | ACCESS = <scalar-default-char-expr> | ACTION = <scalar-default-char-expr> @@ -5412,26 +5423,40 @@ class Connect_Spec(KeywordValueBase): # R905 | STATUS = <scalar-default-char-expr> """ subclass_names = [] - use_names = ['File_Unit_Number', 'Scalar_Default_Char_Expr', 'Label', 'File_Name_Expr', 'Iomsg_Variable', + use_names = ['File_Unit_Number', 'Scalar_Default_Char_Expr', 'Label', + 'File_Name_Expr', 'Iomsg_Variable', 'Scalar_Int_Expr', 'Scalar_Int_Variable'] + + @staticmethod def match(string): - for (k,v) in [\ - (['ACCESS','ACTION','ASYNCHRONOUS','BLANK','DECIMAL','DELIM','ENCODING', - 'FORM','PAD','POSITION','ROUND','SIGN','STATUS'], Scalar_Default_Char_Expr), - ('ERR', Label), - ('FILE',File_Name_Expr), - ('IOSTAT', Scalar_Int_Variable), - ('IOMSG', Iomsg_Variable), - ('RECL', Scalar_Int_Expr), - ('UNIT', File_Unit_Number), - ]: + ''' + :param str string: Fortran code to check for a match + :return: 2-tuple containing the keyword and value or None if the + supplied string is not a match + :rtype: 2-tuple containing keyword (e.g. "UNIT") and associated value + ''' + if "=" not in string: + # The only argument which need not be named is the unit number + return 'UNIT', File_Unit_Number(string) + # We have a keyword-value pair. Check whether it is valid... + for (keyword, value) in [ + (['ACCESS', 'ACTION', 'ASYNCHRONOUS', 'BLANK', 'DECIMAL', + 'DELIM', 'ENCODING', 'FORM', 'PAD', 'POSITION', 'ROUND', + 'SIGN', 'STATUS'], Scalar_Default_Char_Expr), + ('ERR', Label), + ('FILE', File_Name_Expr), + ('IOSTAT', Scalar_Int_Variable), + ('IOMSG', Iomsg_Variable), + ('RECL', Scalar_Int_Expr), + ('UNIT', File_Unit_Number)]: try: - obj = KeywordValueBase.match(k, v, string, upper_lhs = True) + obj = KeywordValueBase.match(keyword, value, string, + upper_lhs=True) except NoMatchError: obj = None - if obj is not None: return obj - return 'UNIT', File_Unit_Number - match = staticmethod(match) + if obj is not None: + return obj + return None class File_Name_Expr(Base): # R906 @@ -6027,7 +6052,7 @@ items : (Inquire_Spec_List, Scalar_Int_Variable, Output_Item_List) class Inquire_Spec(KeywordValueBase): # R930 """ -:F03R:`930`:: + :F03R:`930`:: <inquire-spec> = [ UNIT = ] <file-unit-number> | FILE = <file-name-expr> | ACCESS = <scalar-default-char-variable> @@ -6065,9 +6090,9 @@ class Inquire_Spec(KeywordValueBase): # R930 | UNFORMATTED = <scalar-default-char-variable> | WRITE = <scalar-default-char-variable> -Attributes ----------- -items : (str, instance) + Attributes + ---------- + items : (str, instance) """ subclass_names = [] use_names = ['File_Unit_Number', 'File_Name_Expr', @@ -6077,6 +6102,18 @@ items : (str, instance) @staticmethod def match(string): + ''' + :param str string: The string to check for conformance with an + Inquire_Spec + :return: 2-tuple of name (e.g. "UNIT") and value or None if + string is not a valid Inquire_Spec + :rtype: 2-tuple where first object represents the name and the + second the value. + ''' + if "=" not in string: + # The only argument which need not be named is the unit number + return 'UNIT', File_Unit_Number(string) + # We have a keyword-value pair. Check whether it is valid... for (keyword, value) in [ (['ACCESS', 'ACTION', 'ASYNCHRONOUS', 'BLANK', 'DECIMAL', 'DELIM', 'DIRECT', 'ENCODING', 'FORM', 'NAME', 'PAD', @@ -6092,11 +6129,14 @@ items : (str, instance) ('IOMSG', Iomsg_Variable), ('FILE', File_Name_Expr), ('UNIT', File_Unit_Number)]: - obj = KeywordValueBase.match(keyword, value, string, - upper_lhs=True) + try: + obj = KeywordValueBase.match(keyword, value, string, + upper_lhs=True) + except NoMatchError: + obj = None if obj is not None: return obj - return 'UNIT', File_Unit_Number(string) + return None ############################################################################### ############################### SECTION 10 #################################### @@ -7561,14 +7601,16 @@ ClassType = type(Base) _names = dir() for clsname in _names: cls = eval(clsname) - if not (isinstance(cls, ClassType) and issubclass(cls, Base) and not cls.__name__.endswith('Base')): continue + if not (isinstance(cls, ClassType) and issubclass(cls, Base) and + not cls.__name__.endswith('Base')): + continue names = getattr(cls, 'subclass_names', []) + getattr(cls, 'use_names', []) for n in names: if n in _names: continue if n.endswith('_List'): _names.append(n) n = n[:-5] - #print 'Generating %s_List' % (n) + # Generate 'list' class exec('''\ class %s_List(SequenceBase): subclass_names = [\'%s\']
fparser2: generate correct OPEN call when unit number argument is not named When parsing the following OPEN call: OPEN( idrst, FILE = TRIM(cdname), FORM = 'unformatted', ACCESS = 'direct' & & , RECL = 8, STATUS = 'old', ACTION = 'read', IOSTAT = ios, ERR = 987 ) fparser2 generates: OPEN(UNIT = <class 'fparser.Fortran2003.File_Unit_Number'>, FILE = TRIM(cdname), FORM = 'unformatted', ACCESS = 'direct', RECL = 8, STATUS = 'old', ACTION = 'read', IOSTAT = ios, ERR = 987) i.e. the fact that the unit number argument isn't named in the original call appears to cause problems.
stfc/fparser
diff --git a/src/fparser/tests/fparser2/test_Fortran2003.py b/src/fparser/tests/fparser2/test_Fortran2003.py index 9b33289..e373b81 100644 --- a/src/fparser/tests/fparser2/test_Fortran2003.py +++ b/src/fparser/tests/fparser2/test_Fortran2003.py @@ -2811,7 +2811,7 @@ def test_Inquire_Stmt(): # R929 def test_Inquire_Spec(): # R930 ''' Test that we recognise the various possible forms of - inquire list ''' + entries in an inquire list ''' cls = Inquire_Spec obj = cls('1') assert isinstance(obj, cls), repr(obj) @@ -2837,6 +2837,128 @@ def test_Inquire_Spec(): # R930 assert_equal(str(obj), 'DIRECT = a') +def test_Inquire_Spec_List(): # pylint: disable=invalid-name + ''' Test that we recognise the various possible forms of + inquire list - R930 + ''' + # Inquire_Spec_List is generated at runtime in Fortran2003.py + cls = Inquire_Spec_List + + obj = cls('unit=23, file="a_file.dat"') + assert isinstance(obj, cls) + assert str(obj) == 'UNIT = 23, FILE = "a_file.dat"' + + # Invalid list (afile= instead of file=) + with pytest.raises(NoMatchError) as excinfo: + _ = cls('unit=23, afile="a_file.dat"') + assert "NoMatchError: Inquire_Spec_List: 'unit=23, afile=" in str(excinfo) + + +def test_Open_Stmt(): + ''' Check that we correctly parse and re-generate the various forms + of OPEN statement (R904)''' + cls = Open_Stmt + obj = cls("open(23, file='some_file.txt')") + assert isinstance(obj, cls) + assert str(obj) == "OPEN(UNIT = 23, FILE = 'some_file.txt')" + obj = cls("open(unit=23, file='some_file.txt')") + assert isinstance(obj, cls) + assert str(obj) == "OPEN(UNIT = 23, FILE = 'some_file.txt')" + + +def test_Connect_Spec(): + ''' Tests for individual elements of Connect_Spec (R905) ''' + cls = Connect_Spec + # Incorrect name for a member of the list + with pytest.raises(NoMatchError) as excinfo: + _ = cls("afile='a_file.dat'") + assert 'NoMatchError: Connect_Spec: "afile=' in str(excinfo) + + +def test_Connect_Spec_List(): # pylint: disable=invalid-name + ''' + Check that we correctly parse the various valid forms of + connect specification (R905) + ''' + cls = Connect_Spec_List + obj = cls("22, access='direct'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, ACCESS = 'direct'" + + obj = cls("22, action='read'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, ACTION = 'read'" + + obj = cls("22, asynchronous='YES'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, ASYNCHRONOUS = 'YES'" + + obj = cls("22, blank='NULL'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, BLANK = 'NULL'" + + obj = cls("22, decimal='COMMA'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, DECIMAL = 'COMMA'" + + obj = cls("22, delim='APOSTROPHE'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, DELIM = 'APOSTROPHE'" + + obj = cls("22, err=109") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, ERR = 109" + + obj = cls("22, encoding='DEFAULT'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, ENCODING = 'DEFAULT'" + + obj = cls("22, file='a_file.dat'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, FILE = 'a_file.dat'" + + obj = cls("22, file='a_file.dat', form='FORMATTED'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', FORM = 'FORMATTED'" + + obj = cls("22, file='a_file.dat', iomsg=my_string") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', IOMSG = my_string" + + obj = cls("22, file='a_file.dat', iostat=ierr") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', IOSTAT = ierr" + + obj = cls("22, file='a_file.dat', pad='YES'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', PAD = 'YES'" + + obj = cls("22, file='a_file.dat', position='APPEND'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', POSITION = 'APPEND'" + + obj = cls("22, file='a_file.dat', recl=100") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', RECL = 100" + + obj = cls("22, file='a_file.dat', round='UP'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', ROUND = 'UP'" + + obj = cls("22, file='a_file.dat', sign='PLUS'") + assert isinstance(obj, cls) + assert str(obj) == "UNIT = 22, FILE = 'a_file.dat', SIGN = 'PLUS'" + + obj = cls("22, file='a_file.dat', sign='PLUS', status='OLD'") + assert isinstance(obj, cls) + assert str(obj) == ("UNIT = 22, FILE = 'a_file.dat', SIGN = 'PLUS', " + "STATUS = 'OLD'") + + # Incorrect name for a member of the list + with pytest.raises(NoMatchError) as excinfo: + _ = cls("unit=22, afile='a_file.dat', sign='PLUS', status='OLD'") + assert 'NoMatchError: Connect_Spec_List: "unit=22, afile=' in str(excinfo) + ############################################################################### ############################### SECTION 10 #################################### @@ -3664,42 +3786,43 @@ def test_Contains(): # R1237 if 0: - nof_needed_tests = 0 - nof_needed_match = 0 - total_needs = 0 - total_classes = 0 - for name in dir(): - obj = eval(name) - if not isinstance(obj, ClassType): continue - if not issubclass(obj, Base): continue - clsname = obj.__name__ - if clsname.endswith('Base'): continue - total_classes += 1 - subclass_names = obj.__dict__.get('subclass_names',None) - use_names = obj.__dict__.get('use_names',None) - if not use_names: continue - match = obj.__dict__.get('match',None) + NOF_NEEDED_TESTS = 0 + NOF_NEEDED_MATCH = 0 + TOTAL_NEEDS = 0 + TOTAL_CLASSES = 0 + for NAME in dir(): + OBJ = eval(NAME) + if not isinstance(OBJ, ClassType): continue + if not issubclass(OBJ, Base): continue + CLSNAME = OBJ.__name__ + if CLSNAME.endswith('Base'): continue + TOTAL_CLASSES += 1 + SUBCLASS_NAMES = OBJ.__dict__.get('subclass_names', None) + USE_NAMES = OBJ.__dict__.get('use_names', None) + if not USE_NAMES: continue + MATCH = OBJ.__dict__.get('match', None) try: - test_cls = eval('test_%s' % (clsname)) + TEST_CLS = eval('test_{0}'.format(CLSNAME)) except NameError: - test_cls = None - total_needs += 1 - if match is None: - if test_cls is None: - print('Needs tests:', clsname) - print('Needs match implementation:', clsname) - nof_needed_tests += 1 - nof_needed_match += 1 + TEST_CLS = None + TOTAL_NEEDS += 1 + if MATCH is None: + if TEST_CLS is None: + print('Needs tests:', CLSNAME) + print('Needs match implementation:', CLSNAME) + NOF_NEEDED_TESTS += 1 + NOF_NEEDED_MATCH += 1 else: - print('Needs match implementation:', clsname) - nof_needed_match += 1 + print('Needs match implementation:', CLSNAME) + NOF_NEEDED_MATCH += 1 else: - if test_cls is None: - print('Needs tests:', clsname) - nof_needed_tests += 1 + if TEST_CLS is None: + print('Needs tests:', CLSNAME) + NOF_NEEDED_TESTS += 1 continue print('-----') - print('Nof match implementation needs:',nof_needed_match,'out of',total_needs) - print('Nof tests needs:',nof_needed_tests,'out of',total_needs) - print('Total number of classes:',total_classes) + print('Nof match implementation needs:', NOF_NEEDED_MATCH, + 'out of', TOTAL_NEEDS) + print('Nof tests needs:', NOF_NEEDED_TESTS, 'out of', TOTAL_NEEDS) + print('Total number of classes:', TOTAL_CLASSES) print('-----')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 1 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "numpy>=1.16.0", "pandas>=1.0.0", "six", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 -e git+https://github.com/stfc/fparser.git@108c0f201abbabbde3ceb9fcc3ff7ffa4b0c3dbd#egg=fparser iniconfig==2.1.0 nose==1.3.7 numpy==2.0.2 packaging==24.2 pandas==2.2.3 pluggy==1.5.0 pytest==8.3.5 pytest-asyncio==0.26.0 python-dateutil==2.9.0.post0 pytz==2025.2 six==1.17.0 tomli==2.2.1 typing_extensions==4.13.0 tzdata==2025.2
name: fparser channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - nose==1.3.7 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - pluggy==1.5.0 - pytest==8.3.5 - pytest-asyncio==0.26.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - six==1.17.0 - tomli==2.2.1 - typing-extensions==4.13.0 - tzdata==2025.2 prefix: /opt/conda/envs/fparser
[ "src/fparser/tests/fparser2/test_Fortran2003.py::test_Open_Stmt" ]
[ "src/fparser/tests/fparser2/test_Fortran2003.py::test_Inquire_Spec_List", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Connect_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Connect_Spec_List", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Edit_Desc" ]
[ "src/fparser/tests/fparser2/test_Fortran2003.py::test_Program", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Specification_Part", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Name", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Literal_Constant", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Value", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Intrinsic_Type_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Kind_Selector", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Signed_Int_Literal_Constant", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Int_Literal_Constant", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Binary_Constant", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Octal_Constant", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Hex_Constant", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Signed_Real_Literal_Constant", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Real_Literal_Constant", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Char_Selector", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Complex_Literal_Constant", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Name", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Length_Selector", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Char_Length", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Char_Literal_Constant", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Logical_Literal_Constant", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Derived_Type_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Attr_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_End_Type_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Sequence_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Def_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Decl", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Attr_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Component_Attr_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Component_Decl", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Proc_Component_Def_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Bound_Procedure_Part", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Proc_Binding_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Specific_Binding", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Generic_Binding", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Final_Binding", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Derived_Type_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Spec_List", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Structure_Constructor_2", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Structure_Constructor", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Component_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Component_Spec_List", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Enum_Def", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Enum_Def_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Array_Constructor", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Ac_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Ac_Value_List", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Ac_Implied_Do", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Ac_Implied_Do_Control", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Declaration_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Declaration_Type_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Attr_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Dimension_Attr_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Intent_Attr_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Entity_Decl", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Target_Entity_Decl", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Access_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Language_Binding_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Explicit_Shape_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Upper_Bound", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Assumed_Shape_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Deferred_Shape_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Assumed_Size_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Access_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Data_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Data_Stmt_Set", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Data_Implied_Do", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Dimension_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Intent_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Optional_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Parameter_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Named_Constant_Def", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Pointer_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Pointer_Decl", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Protected_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Save_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Saved_Entity", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Target_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Value_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Volatile_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Implicit_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Implicit_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Letter_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Namelist_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Equivalence_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Common_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Common_Block_Object", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Substring", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Substring_Range", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Data_Ref", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Part_Ref", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Param_Inquiry", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Array_Section", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Section_Subscript", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Section_Subscript_List", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Subscript_Triplet", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Allocate_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Alloc_Opt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Nullify_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Deallocate_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Primary", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Parenthesis", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Level_1_Expr", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Mult_Operand", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Add_Operand", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Level_2_Expr", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Level_2_Unary_Expr", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Level_3_Expr", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Level_4_Expr", "src/fparser/tests/fparser2/test_Fortran2003.py::test_And_Operand", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Or_Operand", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Equiv_Operand", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Level_5_Expr", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Expr", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Logical_Expr", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Logical_Initialization_Expr", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Assignment_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Pointer_Assignment_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Proc_Component_Ref", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Where_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Where_Construct", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Where_Construct_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Forall_Construct", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Forall_Header", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Forall_Triplet_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_If_Construct", "src/fparser/tests/fparser2/test_Fortran2003.py::test_if_nonblock_do", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Case_Construct", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Case_Selector", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Associate_Construct", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Select_Type_Construct", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Select_Type_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Type_Guard_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Block_Label_Do_Construct", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Block_Nonlabel_Do_Construct", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Label_Do_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Nonblock_Do_Construct", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Continue_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Stop_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Io_Unit", "src/fparser/tests/fparser2/test_Fortran2003.py::test_read_stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_write_stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Print_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Io_Control_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Io_Control_Spec_List", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Format", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Io_Implied_Do", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Io_Implied_Do_Control", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Wait_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Wait_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Backspace_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Endfile_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Rewind_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Position_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Flush_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Flush_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Inquire_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Inquire_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Format_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Format_Specification", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Format_Item", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Format_Item_List", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Main_Program", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Module", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Module_Subprogram_Part", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Use_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Module_Nature", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Rename", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Block_Data", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Interface_Block", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Interface_Specification", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Interface_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_End_Interface_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Interface_Body", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Subroutine_Body", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Function_Body", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Procedure_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Generic_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Dtio_Generic_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Import_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_External_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Procedure_Declaration_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Proc_Attr_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Proc_Decl", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Intrinsic_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Function_Reference", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Call_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Procedure_Designator", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Actual_Arg_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Actual_Arg_Spec_List", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Alt_Return_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Function_Subprogram", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Function_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Dummy_Arg_Name", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Prefix", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Prefix_Spec", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Suffix", "src/fparser/tests/fparser2/test_Fortran2003.py::test_End_Function_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Subroutine_Subprogram", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Subroutine_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Dummy_Arg", "src/fparser/tests/fparser2/test_Fortran2003.py::test_End_Subroutine_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Entry_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Return_Stmt", "src/fparser/tests/fparser2/test_Fortran2003.py::test_Contains" ]
[]
BSD License
1,778
[ "src/fparser/Fortran2003.py" ]
[ "src/fparser/Fortran2003.py" ]
jupyter__nbgrader-895
fc13b045cc085bb2a5355131b8e0f20dd7607884
2017-10-18 20:34:17
5bc6f37c39c8b10b8f60440b2e6d9487e63ef3f1
mpacer: Also, my tests pass on both py2 and py3, not sure why they would be failing on Appveyor. The errors also seem unreleated to what I touched. mpacer: So much code sharing led me to think this should be genericised. The method for defining the gradebook update method seemed a little hacky, but I couldn't think of any other way to do pass through the gradebook instance from within the context manager. jhamrick: Unfortunately it does look like replacing the traitlets with properties causes issues when building the docs 😞 mpacer: I can use traitlet's dynamic defaults instead :). mpacer: I actually don't think that name was ever a traitlet, or we wouldn't be able to run into this problem. mpacer: also… looking at the doc generation code, it needs to be a class attribute, not an instance attribute, so traitlets couldn't work. ```python name = cls.name.replace(" ", "-") ```
diff --git a/nbgrader/apps/dbapp.py b/nbgrader/apps/dbapp.py index 8bab3443..ee9a543c 100644 --- a/nbgrader/apps/dbapp.py +++ b/nbgrader/apps/dbapp.py @@ -6,11 +6,11 @@ import os import shutil from textwrap import dedent -from traitlets import default, Unicode, Bool +from traitlets import default, Unicode, Bool, List from datetime import datetime from . import NbGrader -from ..api import Gradebook, MissingEntry +from ..api import Gradebook, MissingEntry, Student, Assignment from .. import dbutil aliases = { @@ -116,16 +116,43 @@ class DbStudentRemoveApp(NbGrader): gb.remove_student(student_id) -class DbStudentImportApp(NbGrader): - - name = 'nbgrader-db-student-import' - description = 'Import students into the nbgrader database from a CSV file' +class DbGenericImportApp(NbGrader): aliases = aliases flags = flags + expected_keys = List(help="These are the keys expected by the database") + + def db_update_method_name(self): + """ + Name of the update method used on the Gradebook for this import app. + It is expected to have the signature: + * instance_id : string, identifies which instance you are updating based on self.primary_key + * instance : dictionary, contents for the update from the parsed csv rows; unpacked as kwargs + """ + raise NotImplementedError + + name = "" + description = "" + + + @property + def table_class(self): + raise NotImplementedError + + @property + def primary_key_default(self): + """ + The key for the instance_id passed to the get_db_update_method. + """ + raise NotImplementedError + + @default("expected_keys") + def expected_keys_default(self): + return self.table_class.__table__.c.keys() + def start(self): - super(DbStudentImportApp, self).start() + super(DbGenericImportApp, self).start() if len(self.extra_args) != 1: self.fail("Path to CSV file not provided.") @@ -133,31 +160,69 @@ class DbStudentImportApp(NbGrader): path = self.extra_args[0] if not os.path.exists(path): self.fail("No such file: '%s'", path) - self.log.info("Importing students from: '%s'", path) + self.log.info("Importing from: '%s'", path) - allowed_keys = ["last_name", "first_name", "email", "id"] with Gradebook(self.coursedir.db_url) as gb: with open(path, 'r') as fh: reader = csv.DictReader(fh) + reader.fieldnames = self._preprocess_keys(reader.fieldnames) for row in reader: - if "id" not in row: - self.fail("Malformatted CSV file: must contain a column for 'id'") + if self.primary_key not in row: + self.fail("Malformatted CSV file: must contain a column for '%s'" % self.primary_key) # make sure all the keys are actually allowed in the database, # and that any empty strings are parsed as None - student = {} + instance = {} for key, val in row.items(): - if key not in allowed_keys: + if key not in self.expected_keys: continue if val == '': - student[key] = None + instance[key] = None else: - student[key] = val - student_id = student.pop("id") + instance[key] = val + instance_primary_key = instance.pop(self.primary_key) + + + self.log.info("Creating/updating %s with %s '%s': %s", + self.table_class.__name__, + self.primary_key, + instance_primary_key, + instance) + db_update_method = getattr(gb, self.db_update_method_name) + db_update_method(instance_primary_key, **instance) - self.log.info("Creating/updating student with ID '%s': %s", student_id, student) - gb.update_or_create_student(student_id, **student) + + def _preprocess_keys(self, keys): + """ + Helper function for preprocessing keys + """ + proposed_keys = [key.strip() for key in keys] + unknown_keys = [k for k in proposed_keys if k not in self.expected_keys] + if unknown_keys: + self.log.info("Unknown keys in csv: '%s'", + (', '.join(unknown_keys[:-1]) + + 'and ' + + unknown_keys[-1])) + return proposed_keys + + +class DbStudentImportApp(DbGenericImportApp): + + name = 'nbgrader-db-student-import' + description = 'Import students into the nbgrader database from a CSV file' + + @property + def table_class(self): + return Student + + @property + def primary_key(self): + return "id" + + @property + def db_update_method_name(self): + return "update_or_create_student" class DbStudentListApp(NbGrader): @@ -258,49 +323,22 @@ class DbAssignmentRemoveApp(NbGrader): gb.remove_assignment(assignment_id) -class DbAssignmentImportApp(NbGrader): +class DbAssignmentImportApp(DbGenericImportApp): name = 'nbgrader-db-assignment-import' description = 'Import assignments into the nbgrader database from a CSV file' - aliases = aliases - flags = flags - - def start(self): - super(DbAssignmentImportApp, self).start() - - if len(self.extra_args) != 1: - self.fail("Path to CSV file not provided.") - - path = self.extra_args[0] - if not os.path.exists(path): - self.fail("No such file: '%s'", path) - self.log.info("Importing assignments from: '%s'", path) - - allowed_keys = ["duedate", "name"] - - with Gradebook(self.coursedir.db_url) as gb: - with open(path, 'r') as fh: - reader = csv.DictReader(fh) - for row in reader: - if "name" not in row: - self.fail("Malformatted CSV file: must contain a column for 'name'") - - # make sure all the keys are actually allowed in the database, - # and that any empty strings are parsed as None - assignment = {} - for key, val in row.items(): - if key not in allowed_keys: - continue - if val == '': - assignment[key] = None - else: - assignment[key] = val - assignment_id = assignment.pop("name") + @property + def table_class(self): + return Assignment - self.log.info("Creating/updating assignment with name '%s': %s", assignment_id, assignment) - gb.update_or_create_assignment(assignment_id, **assignment) + @property + def primary_key(self): + return "name" + @property + def db_update_method_name(self): + return "update_or_create_assignment" class DbAssignmentListApp(NbGrader):
Not importing emails when Importing students from .csv ### Operating system `Ubuntu 16.04` ### `nbgrader --version` `nbgrader version 0.5.2` ### Expected behavior I'm creating the students database from a `csv` file. My csv files has: `id,last_name,first_name,email` so I expect when I do `nbgrader db student list` to have all the fields for each student. ### Actual behavior For some reason the field corresponding to email it's not being import. For example in the csv file I have: `id,last_name,first_name,email` `stud1, smith, blabla, [email protected]` and when creating the database I got: `Creating/updating student with ID 'stud1': {'last_name': 'smith', 'first_name': 'blabla'}` when listing students I see: `stud1 (smith, blabla) -- None` Any clue what is going on?
jupyter/nbgrader
diff --git a/nbgrader/tests/apps/test_nbgrader_db.py b/nbgrader/tests/apps/test_nbgrader_db.py index fd0803cb..5b7789da 100644 --- a/nbgrader/tests/apps/test_nbgrader_db.py +++ b/nbgrader/tests/apps/test_nbgrader_db.py @@ -176,6 +176,28 @@ class TestNbGraderDb(BaseTestApp): assert student.first_name is None assert student.email is None + + def test_student_import_csv_spaces(self, db, temp_cwd): + with open("students.csv", "w") as fh: + fh.write(dedent( + """ + id,first_name,last_name, email + foo,abc,xyz,[email protected] + bar,,, + """ + ).strip()) + + run_nbgrader(["db", "student", "import", "students.csv", "--db", db]) + with Gradebook(db) as gb: + student = gb.find_student("foo") + assert student.last_name == "xyz" + assert student.first_name == "abc" + assert student.email == "[email protected]" + student = gb.find_student("bar") + assert student.last_name is None + assert student.first_name is None + assert student.email is None + def test_assignment_add(self, db): run_nbgrader(["db", "assignment", "add", "foo", "--db", db]) with Gradebook(db) as gb: @@ -256,6 +278,24 @@ class TestNbGraderDb(BaseTestApp): assignment = gb.find_assignment("bar") assert assignment.duedate is None + + def test_assignment_import_csv_spaces(self, db, temp_cwd): + with open("assignments.csv", "w") as fh: + fh.write(dedent( + """ + name, duedate + foo,Sun Jan 8 2017 4:31:22 PM + bar, + """ + ).strip()) + + run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db]) + with Gradebook(db) as gb: + assignment = gb.find_assignment("foo") + assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22) + assignment = gb.find_assignment("bar") + assert assignment.duedate is None + # check that it fails when no id column is given with open("assignments.csv", "w") as fh: fh.write(dedent( @@ -284,7 +324,6 @@ class TestNbGraderDb(BaseTestApp): assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22) assignment = gb.find_assignment("bar") assert assignment.duedate is None - def test_upgrade_nodb(self, temp_cwd): # test upgrading without a database run_nbgrader(["db", "upgrade"])
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 1 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -r dev-requirements.txt -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-rerunfailures", "coverage", "selenium", "invoke", "sphinx", "codecov", "cov-core", "nbval" ], "pre_install": [ "pip install -U pip wheel setuptools" ], "python": "3.5", "reqs_path": [ "dev-requirements.txt", "dev-requirements-windows.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 alembic==1.7.7 anyio==3.6.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 backcall==0.2.0 bleach==4.1.0 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 codecov==2.1.13 comm==0.1.4 contextvars==2.4 cov-core==1.15.0 coverage==6.2 dataclasses==0.8 decorator==5.1.1 defusedxml==0.7.1 docutils==0.18.1 entrypoints==0.4 greenlet==2.0.2 idna==3.10 imagesize==1.4.1 immutables==0.19 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 invoke==2.2.0 ipykernel==5.5.6 ipython==7.16.3 ipython-genutils==0.2.0 ipywidgets==7.8.5 jedi==0.17.2 Jinja2==3.0.3 json5==0.9.16 jsonschema==3.2.0 jupyter==1.1.1 jupyter-client==7.1.2 jupyter-console==6.4.3 jupyter-core==4.9.2 jupyter-server==1.13.1 jupyterlab==3.2.9 jupyterlab-pygments==0.1.2 jupyterlab-server==2.10.3 jupyterlab_widgets==1.1.11 Mako==1.1.6 MarkupSafe==2.0.1 mistune==0.8.4 nbclassic==0.3.5 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 -e git+https://github.com/jupyter/nbgrader.git@fc13b045cc085bb2a5355131b8e0f20dd7607884#egg=nbgrader nbval==0.10.0 nest-asyncio==1.6.0 notebook==6.4.10 packaging==21.3 pandocfilters==1.5.1 parso==0.7.1 pexpect==4.9.0 pickleshare==0.7.5 pluggy==1.0.0 prometheus-client==0.17.1 prompt-toolkit==3.0.36 ptyprocess==0.7.0 py==1.11.0 pycparser==2.21 pyenchant==3.2.2 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 pytest-rerunfailures==10.3 python-dateutil==2.9.0.post0 pytz==2025.2 pyzmq==25.1.2 requests==2.27.1 selenium==3.141.0 Send2Trash==1.8.3 six==1.17.0 sniffio==1.2.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 sphinxcontrib-spelling==7.7.0 SQLAlchemy==1.4.54 terminado==0.12.1 testpath==0.6.0 tomli==1.2.3 tornado==6.1 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 wcwidth==0.2.13 webencodings==0.5.1 websocket-client==1.3.1 widgetsnbextension==3.6.10 zipp==3.6.0
name: nbgrader channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - alembic==1.7.7 - anyio==3.6.2 - argon2-cffi==21.3.0 - argon2-cffi-bindings==21.2.0 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - backcall==0.2.0 - bleach==4.1.0 - cffi==1.15.1 - charset-normalizer==2.0.12 - codecov==2.1.13 - comm==0.1.4 - contextvars==2.4 - cov-core==1.15.0 - coverage==6.2 - dataclasses==0.8 - decorator==5.1.1 - defusedxml==0.7.1 - docutils==0.18.1 - entrypoints==0.4 - greenlet==2.0.2 - idna==3.10 - imagesize==1.4.1 - immutables==0.19 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - invoke==2.2.0 - ipykernel==5.5.6 - ipython==7.16.3 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - jedi==0.17.2 - jinja2==3.0.3 - json5==0.9.16 - jsonschema==3.2.0 - jupyter==1.1.1 - jupyter-client==7.1.2 - jupyter-console==6.4.3 - jupyter-core==4.9.2 - jupyter-server==1.13.1 - jupyterlab==3.2.9 - jupyterlab-pygments==0.1.2 - jupyterlab-server==2.10.3 - jupyterlab-widgets==1.1.11 - mako==1.1.6 - markupsafe==2.0.1 - mistune==0.8.4 - nbclassic==0.3.5 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nbval==0.10.0 - nest-asyncio==1.6.0 - notebook==6.4.10 - packaging==21.3 - pandocfilters==1.5.1 - parso==0.7.1 - pexpect==4.9.0 - pickleshare==0.7.5 - pip==21.3.1 - pluggy==1.0.0 - prometheus-client==0.17.1 - prompt-toolkit==3.0.36 - ptyprocess==0.7.0 - py==1.11.0 - pycparser==2.21 - pyenchant==3.2.2 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-rerunfailures==10.3 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyzmq==25.1.2 - requests==2.27.1 - selenium==3.141.0 - send2trash==1.8.3 - setuptools==59.6.0 - six==1.17.0 - sniffio==1.2.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - sphinxcontrib-spelling==7.7.0 - sqlalchemy==1.4.54 - terminado==0.12.1 - testpath==0.6.0 - tomli==1.2.3 - tornado==6.1 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - wcwidth==0.2.13 - webencodings==0.5.1 - websocket-client==1.3.1 - widgetsnbextension==3.6.10 - zipp==3.6.0 prefix: /opt/conda/envs/nbgrader
[ "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_import_csv_spaces", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_import_csv_spaces" ]
[]
[ "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_help", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_no_args", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_add", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_remove", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_remove_with_submissions", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_list", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_import", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_add", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_remove", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_remove_with_submissions", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_list", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_import", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_upgrade_nodb", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_upgrade_current_db", "nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_upgrade_old_db" ]
[]
BSD 3-Clause "New" or "Revised" License
1,779
[ "nbgrader/apps/dbapp.py" ]
[ "nbgrader/apps/dbapp.py" ]
oasis-open__cti-python-stix2-88
482135465bb9c7bb8cd1ec5eb600d3990b65e092
2017-10-18 22:39:51
ef6dade6f6773edd14aa16a2e4566e50bf74cbb4
diff --git a/stix2/base.py b/stix2/base.py index 5307393..b0cf6ff 100644 --- a/stix2/base.py +++ b/stix2/base.py @@ -40,7 +40,14 @@ class _STIXBase(collections.Mapping): """Base class for STIX object types""" def object_properties(self): - return list(self._properties.keys()) + props = set(self._properties.keys()) + custom_props = list(set(self._inner.keys()) - props) + custom_props.sort() + + all_properties = list(self._properties.keys()) + all_properties.extend(custom_props) # Any custom properties to the bottom + + return all_properties def _check_property(self, prop_name, prop, kwargs): if prop_name not in kwargs: diff --git a/stix2/core.py b/stix2/core.py index 8ee11f5..20bd187 100644 --- a/stix2/core.py +++ b/stix2/core.py @@ -7,9 +7,9 @@ from .base import _STIXBase from .common import MarkingDefinition from .properties import IDProperty, ListProperty, Property, TypeProperty from .sdo import (AttackPattern, Campaign, CourseOfAction, Identity, Indicator, - IntrusionSet, Malware, ObservedData, Report, ThreatActor, - Tool, Vulnerability) -from .sro import Relationship, Sighting + IntrusionSet, Malware, ObservedData, Report, + STIXDomainObject, ThreatActor, Tool, Vulnerability) +from .sro import Relationship, Sighting, STIXRelationshipObject from .utils import get_dict @@ -20,6 +20,11 @@ class STIXObjectProperty(Property): super(STIXObjectProperty, self).__init__() def clean(self, value): + # Any STIX Object (SDO, SRO, or Marking Definition) can be added to + # a bundle with no further checks. + if isinstance(value, (STIXDomainObject, STIXRelationshipObject, + MarkingDefinition)): + return value try: dictified = get_dict(value) except ValueError: diff --git a/stix2/sources/__init__.py b/stix2/sources/__init__.py index 49cb3f3..1fe9391 100644 --- a/stix2/sources/__init__.py +++ b/stix2/sources/__init__.py @@ -44,36 +44,40 @@ class DataStore(object): self.source = source self.sink = sink - def get(self, stix_id): + def get(self, stix_id, allow_custom=False): """Retrieve the most recent version of a single STIX object by ID. Translate get() call to the appropriate DataSource call. Args: stix_id (str): the id of the STIX object to retrieve. + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: stix_obj: the single most recent version of the STIX object specified by the "id". """ - return self.source.get(stix_id) + return self.source.get(stix_id, allow_custom=allow_custom) - def all_versions(self, stix_id): + def all_versions(self, stix_id, allow_custom=False): """Retrieve all versions of a single STIX object by ID. Implement: Translate all_versions() call to the appropriate DataSource call Args: stix_id (str): the id of the STIX object to retrieve. + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: stix_objs (list): a list of STIX objects """ - return self.source.all_versions(stix_id) + return self.source.all_versions(stix_id, allow_custom=allow_custom) - def query(self, query): + def query(self, query=None, allow_custom=False): """Retrieve STIX objects matching a set of filters. Implement: Specific data source API calls, processing, @@ -82,6 +86,8 @@ class DataStore(object): Args: query (list): a list of filters (which collectively are the query) to conduct search on. + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: stix_objs (list): a list of STIX objects @@ -89,15 +95,17 @@ class DataStore(object): """ return self.source.query(query=query) - def add(self, stix_objs): + def add(self, stix_objs, allow_custom=False): """Store STIX objects. Translates add() to the appropriate DataSink call. Args: stix_objs (list): a list of STIX objects + allow_custom (bool): whether to allow custom objects/properties or + not. Default: False. """ - return self.sink.add(stix_objs) + return self.sink.add(stix_objs, allow_custom=allow_custom) class DataSink(object): @@ -111,7 +119,7 @@ class DataSink(object): def __init__(self): self.id = make_id() - def add(self, stix_objs): + def add(self, stix_objs, allow_custom=False): """Store STIX objects. Implement: Specific data sink API calls, processing, @@ -120,6 +128,8 @@ class DataSink(object): Args: stix_objs (list): a list of STIX objects (where each object is a STIX object) + allow_custom (bool): whether to allow custom objects/properties or + not. Default: False. """ raise NotImplementedError() @@ -139,7 +149,7 @@ class DataSource(object): self.id = make_id() self.filters = set() - def get(self, stix_id, _composite_filters=None): + def get(self, stix_id, _composite_filters=None, allow_custom=False): """ Implement: Specific data source API calls, processing, functionality required for retrieving data from the data source @@ -148,9 +158,10 @@ class DataSource(object): stix_id (str): the id of the STIX 2.0 object to retrieve. Should return a single object, the most recent version of the object specified by the "id". - _composite_filters (set): set of filters passed from the parent the CompositeDataSource, not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: stix_obj: the STIX object @@ -158,7 +169,7 @@ class DataSource(object): """ raise NotImplementedError() - def all_versions(self, stix_id, _composite_filters=None): + def all_versions(self, stix_id, _composite_filters=None, allow_custom=False): """ Implement: Similar to get() except returns list of all object versions of the specified "id". In addition, implement the specific data @@ -169,9 +180,10 @@ class DataSource(object): stix_id (str): The id of the STIX 2.0 object to retrieve. Should return a list of objects, all the versions of the object specified by the "id". - _composite_filters (set): set of filters passed from the parent CompositeDataSource, not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: stix_objs (list): a list of STIX objects @@ -179,7 +191,7 @@ class DataSource(object): """ raise NotImplementedError() - def query(self, query, _composite_filters=None): + def query(self, query=None, _composite_filters=None, allow_custom=False): """ Implement:Implement the specific data source API calls, processing, functionality required for retrieving query from the data source @@ -187,9 +199,10 @@ class DataSource(object): Args: query (list): a list of filters (which collectively are the query) to conduct search on - _composite_filters (set): a set of filters passed from the parent CompositeDataSource, not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: stix_objs (list): a list of STIX objects @@ -224,7 +237,7 @@ class CompositeDataSource(DataSource): super(CompositeDataSource, self).__init__() self.data_sources = [] - def get(self, stix_id, _composite_filters=None): + def get(self, stix_id, _composite_filters=None, allow_custom=False): """Retrieve STIX object by STIX ID Federated retrieve method, iterates through all DataSources @@ -238,10 +251,11 @@ class CompositeDataSource(DataSource): Args: stix_id (str): the id of the STIX object to retrieve. - _composite_filters (list): a list of filters passed from a CompositeDataSource (i.e. if this CompositeDataSource is attached to another parent CompositeDataSource), not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: stix_obj: the STIX object to be returned. @@ -259,7 +273,7 @@ class CompositeDataSource(DataSource): # for every configured Data Source, call its retrieve handler for ds in self.data_sources: - data = ds.get(stix_id=stix_id, _composite_filters=all_filters) + data = ds.get(stix_id=stix_id, _composite_filters=all_filters, allow_custom=allow_custom) if data: all_data.append(data) @@ -274,7 +288,7 @@ class CompositeDataSource(DataSource): return stix_obj - def all_versions(self, stix_id, _composite_filters=None): + def all_versions(self, stix_id, _composite_filters=None, allow_custom=False): """Retrieve STIX objects by STIX ID Federated all_versions retrieve method - iterates through all DataSources @@ -285,10 +299,11 @@ class CompositeDataSource(DataSource): Args: stix_id (str): id of the STIX objects to retrieve - _composite_filters (list): a list of filters passed from a CompositeDataSource (i.e. if this CompositeDataSource is attached to a parent CompositeDataSource), not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: all_data (list): list of STIX objects that have the specified id @@ -307,7 +322,7 @@ class CompositeDataSource(DataSource): # retrieve STIX objects from all configured data sources for ds in self.data_sources: - data = ds.all_versions(stix_id=stix_id, _composite_filters=all_filters) + data = ds.all_versions(stix_id=stix_id, _composite_filters=all_filters, allow_custom=allow_custom) all_data.extend(data) # remove exact duplicates (where duplicates are STIX 2.0 objects @@ -317,7 +332,7 @@ class CompositeDataSource(DataSource): return all_data - def query(self, query=None, _composite_filters=None): + def query(self, query=None, _composite_filters=None, allow_custom=False): """Retrieve STIX objects that match query Federate the query to all DataSources attached to the @@ -325,10 +340,11 @@ class CompositeDataSource(DataSource): Args: query (list): list of filters to search on - _composite_filters (list): a list of filters passed from a CompositeDataSource (i.e. if this CompositeDataSource is attached to a parent CompositeDataSource), not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: all_data (list): list of STIX objects to be returned @@ -353,7 +369,7 @@ class CompositeDataSource(DataSource): # federate query to all attached data sources, # pass composite filters to id for ds in self.data_sources: - data = ds.query(query=query, _composite_filters=all_filters) + data = ds.query(query=query, _composite_filters=all_filters, allow_custom=allow_custom) all_data.extend(data) # remove exact duplicates (where duplicates are STIX 2.0 diff --git a/stix2/sources/filesystem.py b/stix2/sources/filesystem.py index 103b882..34dbcf0 100644 --- a/stix2/sources/filesystem.py +++ b/stix2/sources/filesystem.py @@ -8,51 +8,54 @@ TODO: import json import os -from stix2.base import _STIXBase +from stix2.common import MarkingDefinition from stix2.core import Bundle, parse +from stix2.sdo import STIXDomainObject from stix2.sources import DataSink, DataSource, DataStore from stix2.sources.filters import Filter, apply_common_filters +from stix2.sro import STIXRelationshipObject from stix2.utils import deduplicate class FileSystemStore(DataStore): - """FileSystemStore + """Interface to a file directory of STIX objects. - Provides an interface to an file directory of STIX objects. FileSystemStore is a wrapper around a paired FileSystemSink and FileSystemSource. Args: stix_dir (str): path to directory of STIX objects + bundlify (bool): Whether to wrap objects in bundles when saving them. + Default: False. Attributes: source (FileSystemSource): FuleSystemSource - sink (FileSystemSink): FileSystemSink """ - def __init__(self, stix_dir): + def __init__(self, stix_dir, bundlify=False): super(FileSystemStore, self).__init__() self.source = FileSystemSource(stix_dir=stix_dir) - self.sink = FileSystemSink(stix_dir=stix_dir) + self.sink = FileSystemSink(stix_dir=stix_dir, bundlify=bundlify) class FileSystemSink(DataSink): - """FileSystemSink - - Provides an interface for adding/pushing STIX objects - to file directory of STIX objects. + """Interface for adding/pushing STIX objects to file directory of STIX + objects. Can be paired with a FileSystemSource, together as the two components of a FileSystemStore. Args: - stix_dir (str): path to directory of STIX objects + stix_dir (str): path to directory of STIX objects. + bundlify (bool): Whether to wrap objects in bundles when saving them. + Default: False. """ - def __init__(self, stix_dir): + def __init__(self, stix_dir, bundlify=False): super(FileSystemSink, self).__init__() self._stix_dir = os.path.abspath(stix_dir) + self.bundlify = bundlify if not os.path.exists(self._stix_dir): raise ValueError("directory path for STIX data does not exist") @@ -61,62 +64,69 @@ class FileSystemSink(DataSink): def stix_dir(self): return self._stix_dir - def add(self, stix_data=None): - """add STIX objects to file directory + def _check_path_and_write(self, stix_obj): + """Write the given STIX object to a file in the STIX file directory. + """ + path = os.path.join(self._stix_dir, stix_obj["type"], stix_obj["id"] + ".json") - Args: - stix_data (STIX object OR dict OR str OR list): valid STIX 2.0 content - in a STIX object(or list of), dict (or list of), or a STIX 2.0 - json encoded string + if not os.path.exists(os.path.dirname(path)): + os.makedirs(os.path.dirname(path)) - TODO: Bundlify STIX content or no? When dumping to disk. - """ - def _check_path_and_write(stix_dir, stix_obj): - path = os.path.join(stix_dir, stix_obj["type"], stix_obj["id"] + ".json") + if self.bundlify: + stix_obj = Bundle(stix_obj) + + with open(path, "w") as f: + f.write(str(stix_obj)) - if not os.path.exists(os.path.dirname(path)): - os.makedirs(os.path.dirname(path)) + def add(self, stix_data=None, allow_custom=False): + """Add STIX objects to file directory. + + Args: + stix_data (STIX object OR dict OR str OR list): valid STIX 2.0 content + in a STIX object (or list of), dict (or list of), or a STIX 2.0 + json encoded string. + allow_custom (bool): whether to allow custom objects/properties or + not. Default: False. - with open(path, "w") as f: - # Bundle() can take dict or STIX obj as argument - f.write(str(Bundle(stix_obj))) + Note: + ``stix_data`` can be a Bundle object, but each object in it will be + saved separately; you will be able to retrieve any of the objects + the Bundle contained, but not the Bundle itself. - if isinstance(stix_data, _STIXBase): + """ + if isinstance(stix_data, (STIXDomainObject, STIXRelationshipObject, MarkingDefinition)): # adding python STIX object - _check_path_and_write(self._stix_dir, stix_data) + self._check_path_and_write(stix_data) - elif isinstance(stix_data, dict): + elif isinstance(stix_data, (str, dict)): + stix_data = parse(stix_data, allow_custom) if stix_data["type"] == "bundle": - # adding json-formatted Bundle - extracting STIX objects - for stix_obj in stix_data["objects"]: + # extract STIX objects + for stix_obj in stix_data.get("objects", []): self.add(stix_obj) else: # adding json-formatted STIX - _check_path_and_write(self._stix_dir, stix_data) + self._check_path_and_write(stix_data) - elif isinstance(stix_data, str): - # adding json encoded string of STIX content - stix_data = parse(stix_data) - if stix_data["type"] == "bundle": - for stix_obj in stix_data["objects"]: - self.add(stix_obj) - else: - self.add(stix_data) + elif isinstance(stix_data, Bundle): + # recursively add individual STIX objects + for stix_obj in stix_data.get("objects", []): + self.add(stix_obj) elif isinstance(stix_data, list): - # if list, recurse call on individual STIX objects + # recursively add individual STIX objects for stix_obj in stix_data: self.add(stix_obj) else: - raise ValueError("stix_data must be a STIX object(or list of), json formatted STIX(or list of) or a json formatted STIX bundle") + raise TypeError("stix_data must be a STIX object (or list of), " + "JSON formatted STIX (or list of), " + "or a JSON formatted STIX bundle") class FileSystemSource(DataSource): - """FileSystemSource - - Provides an interface for searching/retrieving - STIX objects from a STIX object file directory. + """Interface for searching/retrieving STIX objects from a STIX object file + directory. Can be paired with a FileSystemSink, together as the two components of a FileSystemStore. @@ -136,14 +146,15 @@ class FileSystemSource(DataSource): def stix_dir(self): return self._stix_dir - def get(self, stix_id, _composite_filters=None): - """retrieve STIX object from file directory via STIX ID + def get(self, stix_id, _composite_filters=None, allow_custom=False): + """Retrieve STIX object from file directory via STIX ID. Args: stix_id (str): The STIX ID of the STIX object to be retrieved. - composite_filters (set): set of filters passed from the parent CompositeDataSource, not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: (STIX object): STIX object that has the supplied STIX ID. @@ -153,47 +164,49 @@ class FileSystemSource(DataSource): """ query = [Filter("id", "=", stix_id)] - all_data = self.query(query=query, _composite_filters=_composite_filters) + all_data = self.query(query=query, _composite_filters=_composite_filters, allow_custom=allow_custom) if all_data: stix_obj = sorted(all_data, key=lambda k: k['modified'])[0] - stix_obj = parse(stix_obj) else: stix_obj = None return stix_obj - def all_versions(self, stix_id, _composite_filters=None): - """retrieve STIX object from file directory via STIX ID, all versions + def all_versions(self, stix_id, _composite_filters=None, allow_custom=False): + """Retrieve STIX object from file directory via STIX ID, all versions. Note: Since FileSystem sources/sinks don't handle multiple versions of a STIX object, this operation is unnecessary. Pass call to get(). Args: stix_id (str): The STIX ID of the STIX objects to be retrieved. - composite_filters (set): set of filters passed from the parent CompositeDataSource, not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: (list): of STIX objects that has the supplied STIX ID. The STIX objects are loaded from their json files, parsed into a python STIX objects and then returned + """ - return [self.get(stix_id=stix_id, _composite_filters=_composite_filters)] + return [self.get(stix_id=stix_id, _composite_filters=_composite_filters, allow_custom=allow_custom)] - def query(self, query=None, _composite_filters=None): - """search and retrieve STIX objects based on the complete query + def query(self, query=None, _composite_filters=None, allow_custom=False): + """Search and retrieve STIX objects based on the complete query. A "complete query" includes the filters from the query, the filters - attached to MemorySource, and any filters passed from a - CompositeDataSource (i.e. _composite_filters) + attached to this FileSystemSource, and any filters passed from a + CompositeDataSource (i.e. _composite_filters). Args: query (list): list of filters to search on - composite_filters (set): set of filters passed from the CompositeDataSource, not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: (list): list of STIX objects that matches the supplied @@ -209,7 +222,7 @@ class FileSystemSource(DataSource): if not isinstance(query, list): # make sure dont make set from a Filter object, # need to make a set from a list of Filter objects (even if just one Filter) - query = list(query) + query = [query] query = set(query) # combine all query filters @@ -254,8 +267,8 @@ class FileSystemSource(DataSource): # so query will look in all STIX directories that are not # the specified type. Compile correct dir paths for dir in os.listdir(self._stix_dir): - if os.path.abspath(dir) not in declude_paths: - include_paths.append(os.path.abspath(dir)) + if os.path.abspath(os.path.join(self._stix_dir, dir)) not in declude_paths: + include_paths.append(os.path.abspath(os.path.join(self._stix_dir, dir))) # grab stix object ID as well - if present in filters, as # may forgo the loading of STIX content into memory @@ -273,34 +286,32 @@ class FileSystemSource(DataSource): for path in include_paths: for root, dirs, files in os.walk(path): for file_ in files: - if id_: - if id_ == file_.split(".")[0]: - # since ID is specified in one of filters, can evaluate against filename first without loading - stix_obj = json.load(open(os.path.join(root, file_)))["objects"][0] - # check against other filters, add if match - all_data.extend(apply_common_filters([stix_obj], query)) - else: + if not id_ or id_ == file_.split(".")[0]: # have to load into memory regardless to evaluate other filters - stix_obj = json.load(open(os.path.join(root, file_)))["objects"][0] + stix_obj = json.load(open(os.path.join(root, file_))) + if stix_obj.get('type', '') == 'bundle': + stix_obj = stix_obj['objects'][0] + # check against other filters, add if match all_data.extend(apply_common_filters([stix_obj], query)) all_data = deduplicate(all_data) # parse python STIX objects from the STIX object dicts - stix_objs = [parse(stix_obj_dict) for stix_obj_dict in all_data] + stix_objs = [parse(stix_obj_dict, allow_custom) for stix_obj_dict in all_data] return stix_objs def _parse_file_filters(self, query): - """utility method to extract STIX common filters - that can used to possibly speed up querying STIX objects - from the file system + """Extract STIX common filters. + + Possibly speeds up querying STIX objects from the file system. Extracts filters that are for the "id" and "type" field of a STIX object. As the file directory is organized by STIX object type with filenames that are equivalent to the STIX object ID, these filters can be used first to reduce the - search space of a FileSystemStore(or FileSystemSink) + search space of a FileSystemStore (or FileSystemSink). + """ file_filters = set() for filter_ in query: diff --git a/stix2/sources/memory.py b/stix2/sources/memory.py index 9dc7062..2d1705d 100644 --- a/stix2/sources/memory.py +++ b/stix2/sources/memory.py @@ -24,16 +24,18 @@ from stix2.sources import DataSink, DataSource, DataStore from stix2.sources.filters import Filter, apply_common_filters -def _add(store, stix_data=None): - """Adds STIX objects to MemoryStore/Sink. +def _add(store, stix_data=None, allow_custom=False): + """Add STIX objects to MemoryStore/Sink. Adds STIX objects to an in-memory dictionary for fast lookup. Recursive function, breaks down STIX Bundles and lists. Args: stix_data (list OR dict OR STIX object): STIX objects to be added - """ + allow_custom (bool): whether to allow custom objects/properties or + not. Default: False. + """ if isinstance(stix_data, _STIXBase): # adding a python STIX object store._data[stix_data["id"]] = stix_data @@ -41,35 +43,35 @@ def _add(store, stix_data=None): elif isinstance(stix_data, dict): if stix_data["type"] == "bundle": # adding a json bundle - so just grab STIX objects - for stix_obj in stix_data["objects"]: - _add(store, stix_obj) + for stix_obj in stix_data.get("objects", []): + _add(store, stix_obj, allow_custom=allow_custom) else: # adding a json STIX object store._data[stix_data["id"]] = stix_data elif isinstance(stix_data, str): # adding json encoded string of STIX content - stix_data = parse(stix_data) + stix_data = parse(stix_data, allow_custom=allow_custom) if stix_data["type"] == "bundle": # recurse on each STIX object in bundle - for stix_obj in stix_data: - _add(store, stix_obj) + for stix_obj in stix_data.get("objects", []): + _add(store, stix_obj, allow_custom=allow_custom) else: _add(store, stix_data) elif isinstance(stix_data, list): # STIX objects are in a list- recurse on each object for stix_obj in stix_data: - _add(store, stix_obj) + _add(store, stix_obj, allow_custom=allow_custom) else: - raise TypeError("stix_data must be as STIX object(or list of),json formatted STIX (or list of), or a json formatted STIX bundle") + raise TypeError("stix_data must be a STIX object (or list of), JSON formatted STIX (or list of), or a JSON formatted STIX bundle") class MemoryStore(DataStore): - """Provides an interface to an in-memory dictionary - of STIX objects. MemoryStore is a wrapper around a paired - MemorySink and MemorySource + """Interface to an in-memory dictionary of STIX objects. + + MemoryStore is a wrapper around a paired MemorySink and MemorySource. Note: It doesn't make sense to create a MemoryStore by passing in existing MemorySource and MemorySink because there could @@ -77,36 +79,54 @@ class MemoryStore(DataStore): Args: stix_data (list OR dict OR STIX object): STIX content to be added + allow_custom (bool): whether to allow custom objects/properties or + not. Default: False. Attributes: _data (dict): the in-memory dict that holds STIX objects - source (MemorySource): MemorySource - sink (MemorySink): MemorySink """ - - def __init__(self, stix_data=None): + def __init__(self, stix_data=None, allow_custom=False): super(MemoryStore, self).__init__() self._data = {} if stix_data: - _add(self, stix_data) + _add(self, stix_data, allow_custom=allow_custom) - self.source = MemorySource(stix_data=self._data, _store=True) - self.sink = MemorySink(stix_data=self._data, _store=True) + self.source = MemorySource(stix_data=self._data, _store=True, allow_custom=allow_custom) + self.sink = MemorySink(stix_data=self._data, _store=True, allow_custom=allow_custom) - def save_to_file(self, file_path): - return self.sink.save_to_file(file_path=file_path) + def save_to_file(self, file_path, allow_custom=False): + """Write SITX objects from in-memory dictionary to JSON file, as a STIX + Bundle. + + Args: + file_path (str): file path to write STIX data to + allow_custom (bool): whether to allow custom objects/properties or + not. Default: False. + + """ + return self.sink.save_to_file(file_path=file_path, allow_custom=allow_custom) - def load_from_file(self, file_path): - return self.source.load_from_file(file_path=file_path) + def load_from_file(self, file_path, allow_custom=False): + """Load STIX data from JSON file. + + File format is expected to be a single JSON + STIX object or JSON STIX bundle. + + Args: + file_path (str): file path to load STIX data from + allow_custom (bool): whether to allow custom objects/properties or + not. Default: False. + + """ + return self.source.load_from_file(file_path=file_path, allow_custom=allow_custom) class MemorySink(DataSink): - """Provides an interface for adding/pushing STIX objects - to an in-memory dictionary. + """Interface for adding/pushing STIX objects to an in-memory dictionary. Designed to be paired with a MemorySource, together as the two components of a MemoryStore. @@ -114,51 +134,43 @@ class MemorySink(DataSink): Args: stix_data (dict OR list): valid STIX 2.0 content in bundle or a list. - _store (bool): if the MemorySink is a part of a DataStore, in which case "stix_data" is a direct reference to shared memory with DataSource. Not user supplied + allow_custom (bool): whether to allow custom objects/properties or + not. Default: False. Attributes: _data (dict): the in-memory dict that holds STIX objects. If apart of a MemoryStore, dict is shared between with a MemorySource - """ - def __init__(self, stix_data=None, _store=False): + """ + def __init__(self, stix_data=None, _store=False, allow_custom=False): super(MemorySink, self).__init__() self._data = {} if _store: self._data = stix_data elif stix_data: - _add(self, stix_data) + _add(self, stix_data, allow_custom=allow_custom) - def add(self, stix_data): - """add STIX objects to in-memory dictionary maintained by - the MemorySink (MemoryStore) + def add(self, stix_data, allow_custom=False): + _add(self, stix_data, allow_custom=allow_custom) + add.__doc__ = _add.__doc__ - see "_add()" for args documentation - """ - _add(self, stix_data) - - def save_to_file(self, file_path): - """write SITX objects in in-memory dictionary to json file, as a STIX Bundle - - Args: - file_path (str): file path to write STIX data to - - """ + def save_to_file(self, file_path, allow_custom=False): file_path = os.path.abspath(file_path) if not os.path.exists(os.path.dirname(file_path)): os.makedirs(os.path.dirname(file_path)) with open(file_path, "w") as f: - f.write(str(Bundle(self._data.values()))) + f.write(str(Bundle(self._data.values(), allow_custom=allow_custom))) + save_to_file.__doc__ = MemoryStore.save_to_file.__doc__ class MemorySource(DataSource): - """Provides an interface for searching/retrieving - STIX objects from an in-memory dictionary. + """Interface for searching/retrieving STIX objects from an in-memory + dictionary. Designed to be paired with a MemorySink, together as the two components of a MemoryStore. @@ -166,42 +178,44 @@ class MemorySource(DataSource): Args: stix_data (dict OR list OR STIX object): valid STIX 2.0 content in bundle or list. - _store (bool): if the MemorySource is a part of a DataStore, in which case "stix_data" is a direct reference to shared memory with DataSink. Not user supplied + allow_custom (bool): whether to allow custom objects/properties or + not. Default: False. Attributes: _data (dict): the in-memory dict that holds STIX objects. If apart of a MemoryStore, dict is shared between with a MemorySink - """ - def __init__(self, stix_data=None, _store=False): + """ + def __init__(self, stix_data=None, _store=False, allow_custom=False): super(MemorySource, self).__init__() self._data = {} if _store: self._data = stix_data elif stix_data: - _add(self, stix_data) + _add(self, stix_data, allow_custom=allow_custom) - def get(self, stix_id, _composite_filters=None): - """retrieve STIX object from in-memory dict via STIX ID + def get(self, stix_id, _composite_filters=None, allow_custom=False): + """Retrieve STIX object from in-memory dict via STIX ID. Args: stix_id (str): The STIX ID of the STIX object to be retrieved. - composite_filters (set): set of filters passed from the parent CompositeDataSource, not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: (dict OR STIX object): STIX object that has the supplied ID. As the MemoryStore(i.e. MemorySink) adds STIX objects to memory as they are supplied (either as python dictionary or STIX object), it is returned in the same form as it as added - """ + """ if _composite_filters is None: # if get call is only based on 'id', no need to search, just retrieve from dict try: @@ -213,7 +227,7 @@ class MemorySource(DataSource): # if there are filters from the composite level, process full query query = [Filter("id", "=", stix_id)] - all_data = self.query(query=query, _composite_filters=_composite_filters) + all_data = self.query(query=query, _composite_filters=_composite_filters, allow_custom=allow_custom) if all_data: # reduce to most recent version @@ -223,17 +237,18 @@ class MemorySource(DataSource): else: return None - def all_versions(self, stix_id, _composite_filters=None): - """retrieve STIX objects from in-memory dict via STIX ID, all versions of it + def all_versions(self, stix_id, _composite_filters=None, allow_custom=False): + """Retrieve STIX objects from in-memory dict via STIX ID, all versions of it Note: Since Memory sources/sinks don't handle multiple versions of a STIX object, this operation is unnecessary. Translate call to get(). Args: stix_id (str): The STIX ID of the STIX 2 object to retrieve. - composite_filters (set): set of filters passed from the parent CompositeDataSource, not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: (list): list of STIX objects that has the supplied ID. As the @@ -242,26 +257,27 @@ class MemorySource(DataSource): is returned in the same form as it as added """ - return [self.get(stix_id=stix_id, _composite_filters=_composite_filters)] + return [self.get(stix_id=stix_id, _composite_filters=_composite_filters, allow_custom=allow_custom)] - def query(self, query=None, _composite_filters=None): - """search and retrieve STIX objects based on the complete query + def query(self, query=None, _composite_filters=None, allow_custom=False): + """Search and retrieve STIX objects based on the complete query. A "complete query" includes the filters from the query, the filters - attached to MemorySource, and any filters passed from a - CompositeDataSource (i.e. _composite_filters) + attached to this MemorySource, and any filters passed from a + CompositeDataSource (i.e. _composite_filters). Args: query (list): list of filters to search on - composite_filters (set): set of filters passed from the CompositeDataSource, not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: (list): list of STIX objects that matches the supplied query. As the MemoryStore(i.e. MemorySink) adds STIX objects to memory as they are supplied (either as python dictionary or STIX object), it - is returned in the same form as it as added + is returned in the same form as it as added. """ if query is None: @@ -270,7 +286,7 @@ class MemorySource(DataSource): if not isinstance(query, list): # make sure dont make set from a Filter object, # need to make a set from a list of Filter objects (even if just one Filter) - query = list(query) + query = [query] query = set(query) # combine all query filters @@ -284,15 +300,8 @@ class MemorySource(DataSource): return all_data - def load_from_file(self, file_path): - """load STIX data from json file - - File format is expected to be a single json - STIX object or json STIX bundle - - Args: - file_path (str): file path to load STIX data from - """ + def load_from_file(self, file_path, allow_custom=False): file_path = os.path.abspath(file_path) stix_data = json.load(open(file_path, "r")) - _add(self, stix_data) + _add(self, stix_data, allow_custom=allow_custom) + load_from_file.__doc__ = MemoryStore.load_from_file.__doc__ diff --git a/stix2/sources/taxii.py b/stix2/sources/taxii.py index 4c659ed..0bd42d8 100644 --- a/stix2/sources/taxii.py +++ b/stix2/sources/taxii.py @@ -17,7 +17,7 @@ class TAXIICollectionStore(DataStore): around a paired TAXIICollectionSink and TAXIICollectionSource. Args: - collection (taxii2.Collection): TAXII Collection instance + collection (taxii2.Collection): TAXII Collection instance """ def __init__(self, collection): super(TAXIICollectionStore, self).__init__() @@ -37,39 +37,40 @@ class TAXIICollectionSink(DataSink): super(TAXIICollectionSink, self).__init__() self.collection = collection - def add(self, stix_data): - """add/push STIX content to TAXII Collection endpoint + def add(self, stix_data, allow_custom=False): + """Add/push STIX content to TAXII Collection endpoint Args: stix_data (STIX object OR dict OR str OR list): valid STIX 2.0 content in a STIX object (or Bundle), STIX onject dict (or Bundle dict), or a STIX 2.0 json encoded string, or list of any of the following + allow_custom (bool): whether to allow custom objects/properties or + not. Default: False. """ - if isinstance(stix_data, _STIXBase): # adding python STIX object - bundle = dict(Bundle(stix_data)) + bundle = dict(Bundle(stix_data, allow_custom=allow_custom)) elif isinstance(stix_data, dict): # adding python dict (of either Bundle or STIX obj) if stix_data["type"] == "bundle": bundle = stix_data else: - bundle = dict(Bundle(stix_data)) + bundle = dict(Bundle(stix_data, allow_custom=allow_custom)) elif isinstance(stix_data, list): # adding list of something - recurse on each for obj in stix_data: - self.add(obj) + self.add(obj, allow_custom=allow_custom) elif isinstance(stix_data, str): # adding json encoded string of STIX content - stix_data = parse(stix_data) + stix_data = parse(stix_data, allow_custom=allow_custom) if stix_data["type"] == "bundle": bundle = dict(stix_data) else: - bundle = dict(Bundle(stix_data)) + bundle = dict(Bundle(stix_data, allow_custom=allow_custom)) else: raise TypeError("stix_data must be as STIX object(or list of),json formatted STIX (or list of), or a json formatted STIX bundle") @@ -89,22 +90,22 @@ class TAXIICollectionSource(DataSource): super(TAXIICollectionSource, self).__init__() self.collection = collection - def get(self, stix_id, _composite_filters=None): - """retrieve STIX object from local/remote STIX Collection + def get(self, stix_id, _composite_filters=None, allow_custom=False): + """Retrieve STIX object from local/remote STIX Collection endpoint. Args: stix_id (str): The STIX ID of the STIX object to be retrieved. - composite_filters (set): set of filters passed from the parent CompositeDataSource, not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: (STIX object): STIX object that has the supplied STIX ID. The STIX object is received from TAXII has dict, parsed into a python STIX object and then returned - """ # combine all query filters query = set() @@ -120,7 +121,7 @@ class TAXIICollectionSource(DataSource): stix_obj = list(apply_common_filters(stix_objs, query)) if len(stix_obj): - stix_obj = parse(stix_obj[0]) + stix_obj = parse(stix_obj[0], allow_custom=allow_custom) if stix_obj.id != stix_id: # check - was added to handle erroneous TAXII servers stix_obj = None @@ -129,15 +130,16 @@ class TAXIICollectionSource(DataSource): return stix_obj - def all_versions(self, stix_id, _composite_filters=None): - """retrieve STIX object from local/remote TAXII Collection + def all_versions(self, stix_id, _composite_filters=None, allow_custom=False): + """Retrieve STIX object from local/remote TAXII Collection endpoint, all versions of it Args: stix_id (str): The STIX ID of the STIX objects to be retrieved. - composite_filters (set): set of filters passed from the parent CompositeDataSource, not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: (see query() as all_versions() is just a wrapper) @@ -149,7 +151,7 @@ class TAXIICollectionSource(DataSource): Filter("match[version]", "=", "all") ] - all_data = self.query(query=query, _composite_filters=_composite_filters) + all_data = self.query(query=query, _composite_filters=_composite_filters, allow_custom=allow_custom) # parse STIX objects from TAXII returned json all_data = [parse(stix_obj) for stix_obj in all_data] @@ -159,8 +161,8 @@ class TAXIICollectionSource(DataSource): return all_data_clean - def query(self, query=None, _composite_filters=None): - """search and retreive STIX objects based on the complete query + def query(self, query=None, _composite_filters=None, allow_custom=False): + """Search and retreive STIX objects based on the complete query A "complete query" includes the filters from the query, the filters attached to MemorySource, and any filters passed from a @@ -168,9 +170,10 @@ class TAXIICollectionSource(DataSource): Args: query (list): list of filters to search on - composite_filters (set): set of filters passed from the CompositeDataSource, not user supplied + allow_custom (bool): whether to retrieve custom objects/properties + or not. Default: False. Returns: (list): list of STIX objects that matches the supplied @@ -178,7 +181,6 @@ class TAXIICollectionSource(DataSource): parsed into python STIX objects and then returned. """ - if query is None: query = set() else: @@ -198,7 +200,7 @@ class TAXIICollectionSource(DataSource): taxii_filters = self._parse_taxii_filters(query) # query TAXII collection - all_data = self.collection.get_objects(filters=taxii_filters)["objects"] + all_data = self.collection.get_objects(filters=taxii_filters, allow_custom=allow_custom)["objects"] # deduplicate data (before filtering as reduces wasted filtering) all_data = deduplicate(all_data) @@ -207,7 +209,7 @@ class TAXIICollectionSource(DataSource): all_data = list(apply_common_filters(all_data, query)) # parse python STIX objects from the STIX object dicts - stix_objs = [parse(stix_obj_dict) for stix_obj_dict in all_data] + stix_objs = [parse(stix_obj_dict, allow_custom=allow_custom) for stix_obj_dict in all_data] return stix_objs @@ -229,7 +231,6 @@ class TAXIICollectionSource(DataSource): for 'requests.get()'. """ - params = {} for filter_ in query: diff --git a/stix2/utils.py b/stix2/utils.py index 8df4323..4623f28 100644 --- a/stix2/utils.py +++ b/stix2/utils.py @@ -34,7 +34,7 @@ class STIXdatetime(dt.datetime): def deduplicate(stix_obj_list): - """Deduplicate a list of STIX objects to a unique set + """Deduplicate a list of STIX objects to a unique set. Reduces a set of STIX objects to unique set by looking at 'id' and 'modified' fields - as a unique object version @@ -44,7 +44,6 @@ def deduplicate(stix_obj_list): of deduplicate(),that if the "stix_obj_list" argument has multiple STIX objects of the same version, the last object version found in the list will be the one that is returned. - () Args: stix_obj_list (list): list of STIX objects (dicts) @@ -56,7 +55,11 @@ def deduplicate(stix_obj_list): unique_objs = {} for obj in stix_obj_list: - unique_objs[(obj['id'], obj['modified'])] = obj + try: + unique_objs[(obj['id'], obj['modified'])] = obj + except KeyError: + # Handle objects with no `modified` property, e.g. marking-definition + unique_objs[(obj['id'], obj['created'])] = obj return list(unique_objs.values())
Unable to add() Bundle. example usage: ``` bundle = Bundle(*bArgs, allow_custom=True) store.add(bundle) ``` ``` Traceback (most recent call last): File "stix.py", line 166, in <module> store.add(bundle) File "/usr/local/lib/python2.7/dist-packages/stix2/environment.py", line 147, in add return self.sink.add(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/stix2/sources/filesystem.py", line 86, in add _check_path_and_write(self._stix_dir, stix_data) File "/usr/local/lib/python2.7/dist-packages/stix2/sources/filesystem.py", line 82, in _check_path_and_write f.write(str(Bundle(stix_obj))) File "/usr/local/lib/python2.7/dist-packages/stix2/core.py", line 62, in __init__ super(Bundle, self).__init__(**kwargs) File "/usr/local/lib/python2.7/dist-packages/stix2/base.py", line 121, in __init__ self._check_property(prop_name, prop_metadata, setting_kwargs) File "/usr/local/lib/python2.7/dist-packages/stix2/base.py", line 57, in _check_property raise InvalidValueError(self.__class__, prop_name, reason=str(exc)) stix2.exceptions.InvalidValueError: Invalid value for Bundle 'objects': This property may not contain a Bundle object ``` FYI i'm using the patched code submitted in #73
oasis-open/cti-python-stix2
diff --git a/stix2/test/stix2_data/course-of-action/course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd.json b/stix2/test/stix2_data/course-of-action/course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd.json index 5bfb8bb..cb9cfe2 100755 --- a/stix2/test/stix2_data/course-of-action/course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd.json +++ b/stix2/test/stix2_data/course-of-action/course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd.json @@ -1,16 +1,9 @@ { - "id": "bundle--2ed6ab6a-ca68-414f-8493-e4db8b75dd51", - "objects": [ - { - "created": "2017-05-31T21:30:41.022744Z", - "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", - "description": "Identify unnecessary system utilities or potentially malicious software that may be used to collect data from a network share, and audit and/or block them by using whitelisting[[CiteRef::Beechey 2010]] tools, like AppLocker,[[CiteRef::Windows Commands JPCERT]][[CiteRef::NSA MS AppLocker]] or Software Restriction Policies[[CiteRef::Corio 2008]] where appropriate.[[CiteRef::TechNet Applocker vs SRP]]", - "id": "course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd", - "modified": "2017-05-31T21:30:41.022744Z", - "name": "Data from Network Shared Drive Mitigation", - "type": "course-of-action" - } - ], - "spec_version": "2.0", - "type": "bundle" + "created": "2017-05-31T21:30:41.022744Z", + "created_by_ref": "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5", + "description": "Identify unnecessary system utilities or potentially malicious software that may be used to collect data from a network share, and audit and/or block them by using whitelisting[[CiteRef::Beechey 2010]] tools, like AppLocker,[[CiteRef::Windows Commands JPCERT]][[CiteRef::NSA MS AppLocker]] or Software Restriction Policies[[CiteRef::Corio 2008]] where appropriate.[[CiteRef::TechNet Applocker vs SRP]]", + "id": "course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd", + "modified": "2017-05-31T21:30:41.022744Z", + "name": "Data from Network Shared Drive Mitigation", + "type": "course-of-action" } \ No newline at end of file diff --git a/stix2/test/test_bundle.py b/stix2/test/test_bundle.py index c7c95a8..24bbd43 100644 --- a/stix2/test/test_bundle.py +++ b/stix2/test/test_bundle.py @@ -158,3 +158,10 @@ def test_parse_unknown_type(): with pytest.raises(stix2.exceptions.ParseError) as excinfo: stix2.parse(unknown) assert str(excinfo.value) == "Can't parse unknown object type 'other'! For custom types, use the CustomObject decorator." + + +def test_stix_object_property(): + prop = stix2.core.STIXObjectProperty() + + identity = stix2.Identity(name="test", identity_class="individual") + assert prop.clean(identity) is identity diff --git a/stix2/test/test_custom.py b/stix2/test/test_custom.py index 48529b9..c5726b8 100644 --- a/stix2/test/test_custom.py +++ b/stix2/test/test_custom.py @@ -91,6 +91,7 @@ def test_custom_property_in_bundled_object(): bundle = stix2.Bundle(identity, allow_custom=True) assert bundle.objects[0].x_foo == "bar" + assert '"x_foo": "bar"' in str(bundle) @stix2.sdo.CustomObject('x-new-type', [ diff --git a/stix2/test/test_data_sources.py b/stix2/test/test_data_sources.py index 689fe8c..6f47de8 100644 --- a/stix2/test/test_data_sources.py +++ b/stix2/test/test_data_sources.py @@ -1,17 +1,13 @@ -import os - import pytest from taxii2client import Collection -from stix2 import (Campaign, FileSystemSink, FileSystemSource, FileSystemStore, - Filter, MemorySource, MemoryStore) +from stix2 import Filter, MemorySource from stix2.sources import (CompositeDataSource, DataSink, DataSource, DataStore, make_id, taxii) from stix2.sources.filters import apply_common_filters from stix2.utils import deduplicate COLLECTION_URL = 'https://example.com/api1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/' -FS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "stix2_data") class MockTAXIIClient(object): @@ -148,28 +144,6 @@ def test_ds_abstract_class_smoke(): ds3.query([Filter("id", "=", "malware--fdd60b30-b67c-11e3-b0b9-f01faf20d111")]) -def test_memory_store_smoke(): - # Initialize MemoryStore with dict - ms = MemoryStore(STIX_OBJS1) - - # Add item to sink - ms.add(dict(id="bundle--%s" % make_id(), - objects=STIX_OBJS2, - spec_version="2.0", - type="bundle")) - - resp = ms.all_versions("indicator--d81f86b9-975b-bc0b-775e-810c5ad45a4f") - assert len(resp) == 1 - - resp = ms.get("indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f") - assert resp["id"] == "indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f" - - query = [Filter('type', '=', 'malware')] - - resp = ms.query(query) - assert len(resp) == 0 - - def test_ds_taxii(collection): ds = taxii.TAXIICollectionSource(collection) assert ds.collection is not None @@ -512,207 +486,3 @@ def test_composite_datasource_operations(): # STIX_OBJS2 has indicator with later time, one with different id, one with # original time in STIX_OBJS1 assert len(results) == 3 - - -def test_filesytem_source(): - # creation - fs_source = FileSystemSource(FS_PATH) - assert fs_source.stix_dir == FS_PATH - - # get object - mal = fs_source.get("malware--6b616fc1-1505-48e3-8b2c-0d19337bff38") - assert mal.id == "malware--6b616fc1-1505-48e3-8b2c-0d19337bff38" - assert mal.name == "Rover" - - # all versions - (currently not a true all versions call as FileSystem cant have multiple versions) - id_ = fs_source.get("identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5") - assert id_.id == "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5" - assert id_.name == "The MITRE Corporation" - assert id_.type == "identity" - - # query - intrusion_sets = fs_source.query([Filter("type", '=', "intrusion-set")]) - assert len(intrusion_sets) == 2 - assert "intrusion-set--a653431d-6a5e-4600-8ad3-609b5af57064" in [is_.id for is_ in intrusion_sets] - assert "intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a" in [is_.id for is_ in intrusion_sets] - - is_1 = [is_ for is_ in intrusion_sets if is_.id == "intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a"][0] - assert "DragonOK" in is_1.aliases - assert len(is_1.external_references) == 4 - - # query2 - is_2 = fs_source.query([Filter("external_references.external_id", '=', "T1027")]) - assert len(is_2) == 1 - - is_2 = is_2[0] - assert is_2.id == "attack-pattern--b3d682b6-98f2-4fb0-aa3b-b4df007ca70a" - assert is_2.type == "attack-pattern" - - -def test_filesystem_sink(): - # creation - fs_sink = FileSystemSink(FS_PATH) - assert fs_sink.stix_dir == FS_PATH - - fs_source = FileSystemSource(FS_PATH) - - # Test all the ways stix objects can be added (via different supplied forms) - - # add python stix object - camp1 = Campaign(name="Hannibal", - objective="Targeting Italian and Spanish Diplomat internet accounts", - aliases=["War Elephant"]) - - fs_sink.add(camp1) - - assert os.path.exists(os.path.join(FS_PATH, "campaign", camp1.id + ".json")) - - camp1_r = fs_source.get(camp1.id) - assert camp1_r.id == camp1.id - assert camp1_r.name == "Hannibal" - assert "War Elephant" in camp1_r.aliases - - # add stix object dict - camp2 = { - "name": "Aurelius", - "type": "campaign", - "objective": "German and French Intelligence Services", - "aliases": ["Purple Robes"], - "id": "campaign--111111b6-1112-4fb0-111b-b111107ca70a", - "created": "2017-05-31T21:31:53.197755Z" - } - - fs_sink.add(camp2) - - assert os.path.exists(os.path.join(FS_PATH, "campaign", camp2["id"] + ".json")) - - camp2_r = fs_source.get(camp2["id"]) - assert camp2_r.id == camp2["id"] - assert camp2_r.name == camp2["name"] - assert "Purple Robes" in camp2_r.aliases - - # add stix bundle dict - bund = { - "type": "bundle", - "id": "bundle--112211b6-1112-4fb0-111b-b111107ca70a", - "spec_version": "2.0", - "objects": [ - { - "name": "Atilla", - "type": "campaign", - "objective": "Bulgarian, Albanian and Romanian Intelligence Services", - "aliases": ["Huns"], - "id": "campaign--133111b6-1112-4fb0-111b-b111107ca70a", - "created": "2017-05-31T21:31:53.197755Z" - } - ] - } - - fs_sink.add(bund) - - assert os.path.exists(os.path.join(FS_PATH, "campaign", bund["objects"][0]["id"] + ".json")) - - camp3_r = fs_source.get(bund["objects"][0]["id"]) - assert camp3_r.id == bund["objects"][0]["id"] - assert camp3_r.name == bund["objects"][0]["name"] - assert "Huns" in camp3_r.aliases - - # add json-encoded stix obj - camp4 = '{"type": "campaign", "id":"campaign--144111b6-1112-4fb0-111b-b111107ca70a",'\ - ' "created":"2017-05-31T21:31:53.197755Z", "name": "Ghengis Khan", "objective": "China and Russian infrastructure"}' - - fs_sink.add(camp4) - - assert os.path.exists(os.path.join(FS_PATH, "campaign", "campaign--144111b6-1112-4fb0-111b-b111107ca70a" + ".json")) - - camp4_r = fs_source.get("campaign--144111b6-1112-4fb0-111b-b111107ca70a") - assert camp4_r.id == "campaign--144111b6-1112-4fb0-111b-b111107ca70a" - assert camp4_r.name == "Ghengis Khan" - - # add json-encoded stix bundle - bund2 = '{"type": "bundle", "id": "bundle--332211b6-1132-4fb0-111b-b111107ca70a",' \ - ' "spec_version": "2.0", "objects": [{"type": "campaign", "id": "campaign--155155b6-1112-4fb0-111b-b111107ca70a",' \ - ' "created":"2017-05-31T21:31:53.197755Z", "name": "Spartacus", "objective": "Oppressive regimes of Africa and Middle East"}]}' - fs_sink.add(bund2) - - assert os.path.exists(os.path.join(FS_PATH, "campaign", "campaign--155155b6-1112-4fb0-111b-b111107ca70a" + ".json")) - - camp5_r = fs_source.get("campaign--155155b6-1112-4fb0-111b-b111107ca70a") - assert camp5_r.id == "campaign--155155b6-1112-4fb0-111b-b111107ca70a" - assert camp5_r.name == "Spartacus" - - # add list of objects - camp6 = Campaign(name="Comanche", - objective="US Midwest manufacturing firms, oil refineries, and businesses", - aliases=["Horse Warrior"]) - - camp7 = { - "name": "Napolean", - "type": "campaign", - "objective": "Central and Eastern Europe military commands and departments", - "aliases": ["The Frenchmen"], - "id": "campaign--122818b6-1112-4fb0-111b-b111107ca70a", - "created": "2017-05-31T21:31:53.197755Z" - } - - fs_sink.add([camp6, camp7]) - - assert os.path.exists(os.path.join(FS_PATH, "campaign", camp6.id + ".json")) - assert os.path.exists(os.path.join(FS_PATH, "campaign", "campaign--122818b6-1112-4fb0-111b-b111107ca70a" + ".json")) - - camp6_r = fs_source.get(camp6.id) - assert camp6_r.id == camp6.id - assert "Horse Warrior" in camp6_r.aliases - - camp7_r = fs_source.get(camp7["id"]) - assert camp7_r.id == camp7["id"] - assert "The Frenchmen" in camp7_r.aliases - - # remove all added objects - os.remove(os.path.join(FS_PATH, "campaign", camp1_r.id + ".json")) - os.remove(os.path.join(FS_PATH, "campaign", camp2_r.id + ".json")) - os.remove(os.path.join(FS_PATH, "campaign", camp3_r.id + ".json")) - os.remove(os.path.join(FS_PATH, "campaign", camp4_r.id + ".json")) - os.remove(os.path.join(FS_PATH, "campaign", camp5_r.id + ".json")) - os.remove(os.path.join(FS_PATH, "campaign", camp6_r.id + ".json")) - os.remove(os.path.join(FS_PATH, "campaign", camp7_r.id + ".json")) - - # remove campaign dir (that was added in course of testing) - os.rmdir(os.path.join(FS_PATH, "campaign")) - - -def test_filesystem_store(): - # creation - fs_store = FileSystemStore(FS_PATH) - - # get() - coa = fs_store.get("course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd") - assert coa.id == "course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd" - assert coa.type == "course-of-action" - - # all versions() - (note at this time, all_versions() is still not applicable to FileSystem, as only one version is ever stored) - rel = fs_store.all_versions("relationship--70dc6b5c-c524-429e-a6ab-0dd40f0482c1")[0] - assert rel.id == "relationship--70dc6b5c-c524-429e-a6ab-0dd40f0482c1" - assert rel.type == "relationship" - - # query() - tools = fs_store.query([Filter("labels", "in", "tool")]) - assert len(tools) == 2 - assert "tool--242f3da3-4425-4d11-8f5c-b842886da966" in [tool.id for tool in tools] - assert "tool--03342581-f790-4f03-ba41-e82e67392e23" in [tool.id for tool in tools] - - # add() - camp1 = Campaign(name="Great Heathen Army", - objective="Targeting the government of United Kingdom and insitutions affiliated with the Church Of England", - aliases=["Ragnar"]) - fs_store.add(camp1) - - camp1_r = fs_store.get(camp1.id) - assert camp1_r.id == camp1.id - assert camp1_r.name == camp1.name - - # remove - os.remove(os.path.join(FS_PATH, "campaign", camp1_r.id + ".json")) - - # remove campaign dir - os.rmdir(os.path.join(FS_PATH, "campaign")) diff --git a/stix2/test/test_filesystem.py b/stix2/test/test_filesystem.py new file mode 100644 index 0000000..7aaa3f5 --- /dev/null +++ b/stix2/test/test_filesystem.py @@ -0,0 +1,377 @@ +import os +import shutil + +import pytest + +from stix2 import (Bundle, Campaign, CustomObject, FileSystemSink, + FileSystemSource, FileSystemStore, Filter, properties) + +FS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "stix2_data") + + [email protected] +def fs_store(): + # create + yield FileSystemStore(FS_PATH) + + # remove campaign dir + shutil.rmtree(os.path.join(FS_PATH, "campaign"), True) + + [email protected] +def fs_source(): + # create + fs = FileSystemSource(FS_PATH) + assert fs.stix_dir == FS_PATH + yield fs + + # remove campaign dir + shutil.rmtree(os.path.join(FS_PATH, "campaign"), True) + + [email protected] +def fs_sink(): + # create + fs = FileSystemSink(FS_PATH) + assert fs.stix_dir == FS_PATH + yield fs + + # remove campaign dir + shutil.rmtree(os.path.join(FS_PATH, "campaign"), True) + + +def test_filesystem_source_nonexistent_folder(): + with pytest.raises(ValueError) as excinfo: + FileSystemSource('nonexistent-folder') + assert "for STIX data does not exist" in str(excinfo) + + +def test_filesystem_sink_nonexistent_folder(): + with pytest.raises(ValueError) as excinfo: + FileSystemSink('nonexistent-folder') + assert "for STIX data does not exist" in str(excinfo) + + +def test_filesytem_source_get_object(fs_source): + # get object + mal = fs_source.get("malware--6b616fc1-1505-48e3-8b2c-0d19337bff38") + assert mal.id == "malware--6b616fc1-1505-48e3-8b2c-0d19337bff38" + assert mal.name == "Rover" + + +def test_filesytem_source_get_nonexistent_object(fs_source): + ind = fs_source.get("indicator--6b616fc1-1505-48e3-8b2c-0d19337bff38") + assert ind is None + + +def test_filesytem_source_all_versions(fs_source): + # all versions - (currently not a true all versions call as FileSystem cant have multiple versions) + id_ = fs_source.get("identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5") + assert id_.id == "identity--c78cb6e5-0c4b-4611-8297-d1b8b55e40b5" + assert id_.name == "The MITRE Corporation" + assert id_.type == "identity" + + +def test_filesytem_source_query_single(fs_source): + # query2 + is_2 = fs_source.query([Filter("external_references.external_id", '=', "T1027")]) + assert len(is_2) == 1 + + is_2 = is_2[0] + assert is_2.id == "attack-pattern--b3d682b6-98f2-4fb0-aa3b-b4df007ca70a" + assert is_2.type == "attack-pattern" + + +def test_filesytem_source_query_multiple(fs_source): + # query + intrusion_sets = fs_source.query([Filter("type", '=', "intrusion-set")]) + assert len(intrusion_sets) == 2 + assert "intrusion-set--a653431d-6a5e-4600-8ad3-609b5af57064" in [is_.id for is_ in intrusion_sets] + assert "intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a" in [is_.id for is_ in intrusion_sets] + + is_1 = [is_ for is_ in intrusion_sets if is_.id == "intrusion-set--f3bdec95-3d62-42d9-a840-29630f6cdc1a"][0] + assert "DragonOK" in is_1.aliases + assert len(is_1.external_references) == 4 + + +def test_filesystem_sink_add_python_stix_object(fs_sink, fs_source): + # add python stix object + camp1 = Campaign(name="Hannibal", + objective="Targeting Italian and Spanish Diplomat internet accounts", + aliases=["War Elephant"]) + + fs_sink.add(camp1) + + assert os.path.exists(os.path.join(FS_PATH, "campaign", camp1.id + ".json")) + + camp1_r = fs_source.get(camp1.id) + assert camp1_r.id == camp1.id + assert camp1_r.name == "Hannibal" + assert "War Elephant" in camp1_r.aliases + + os.remove(os.path.join(FS_PATH, "campaign", camp1_r.id + ".json")) + + +def test_filesystem_sink_add_stix_object_dict(fs_sink, fs_source): + # add stix object dict + camp2 = { + "name": "Aurelius", + "type": "campaign", + "objective": "German and French Intelligence Services", + "aliases": ["Purple Robes"], + "id": "campaign--111111b6-1112-4fb0-111b-b111107ca70a", + "created": "2017-05-31T21:31:53.197755Z" + } + + fs_sink.add(camp2) + + assert os.path.exists(os.path.join(FS_PATH, "campaign", camp2["id"] + ".json")) + + camp2_r = fs_source.get(camp2["id"]) + assert camp2_r.id == camp2["id"] + assert camp2_r.name == camp2["name"] + assert "Purple Robes" in camp2_r.aliases + + os.remove(os.path.join(FS_PATH, "campaign", camp2_r.id + ".json")) + + +def test_filesystem_sink_add_stix_bundle_dict(fs_sink, fs_source): + # add stix bundle dict + bund = { + "type": "bundle", + "id": "bundle--112211b6-1112-4fb0-111b-b111107ca70a", + "spec_version": "2.0", + "objects": [ + { + "name": "Atilla", + "type": "campaign", + "objective": "Bulgarian, Albanian and Romanian Intelligence Services", + "aliases": ["Huns"], + "id": "campaign--133111b6-1112-4fb0-111b-b111107ca70a", + "created": "2017-05-31T21:31:53.197755Z" + } + ] + } + + fs_sink.add(bund) + + assert os.path.exists(os.path.join(FS_PATH, "campaign", bund["objects"][0]["id"] + ".json")) + + camp3_r = fs_source.get(bund["objects"][0]["id"]) + assert camp3_r.id == bund["objects"][0]["id"] + assert camp3_r.name == bund["objects"][0]["name"] + assert "Huns" in camp3_r.aliases + + os.remove(os.path.join(FS_PATH, "campaign", camp3_r.id + ".json")) + + +def test_filesystem_sink_add_json_stix_object(fs_sink, fs_source): + # add json-encoded stix obj + camp4 = '{"type": "campaign", "id":"campaign--144111b6-1112-4fb0-111b-b111107ca70a",'\ + ' "created":"2017-05-31T21:31:53.197755Z", "name": "Ghengis Khan", "objective": "China and Russian infrastructure"}' + + fs_sink.add(camp4) + + assert os.path.exists(os.path.join(FS_PATH, "campaign", "campaign--144111b6-1112-4fb0-111b-b111107ca70a" + ".json")) + + camp4_r = fs_source.get("campaign--144111b6-1112-4fb0-111b-b111107ca70a") + assert camp4_r.id == "campaign--144111b6-1112-4fb0-111b-b111107ca70a" + assert camp4_r.name == "Ghengis Khan" + + os.remove(os.path.join(FS_PATH, "campaign", camp4_r.id + ".json")) + + +def test_filesystem_sink_json_stix_bundle(fs_sink, fs_source): + # add json-encoded stix bundle + bund2 = '{"type": "bundle", "id": "bundle--332211b6-1132-4fb0-111b-b111107ca70a",' \ + ' "spec_version": "2.0", "objects": [{"type": "campaign", "id": "campaign--155155b6-1112-4fb0-111b-b111107ca70a",' \ + ' "created":"2017-05-31T21:31:53.197755Z", "name": "Spartacus", "objective": "Oppressive regimes of Africa and Middle East"}]}' + fs_sink.add(bund2) + + assert os.path.exists(os.path.join(FS_PATH, "campaign", "campaign--155155b6-1112-4fb0-111b-b111107ca70a" + ".json")) + + camp5_r = fs_source.get("campaign--155155b6-1112-4fb0-111b-b111107ca70a") + assert camp5_r.id == "campaign--155155b6-1112-4fb0-111b-b111107ca70a" + assert camp5_r.name == "Spartacus" + + os.remove(os.path.join(FS_PATH, "campaign", camp5_r.id + ".json")) + + +def test_filesystem_sink_add_objects_list(fs_sink, fs_source): + # add list of objects + camp6 = Campaign(name="Comanche", + objective="US Midwest manufacturing firms, oil refineries, and businesses", + aliases=["Horse Warrior"]) + + camp7 = { + "name": "Napolean", + "type": "campaign", + "objective": "Central and Eastern Europe military commands and departments", + "aliases": ["The Frenchmen"], + "id": "campaign--122818b6-1112-4fb0-111b-b111107ca70a", + "created": "2017-05-31T21:31:53.197755Z" + } + + fs_sink.add([camp6, camp7]) + + assert os.path.exists(os.path.join(FS_PATH, "campaign", camp6.id + ".json")) + assert os.path.exists(os.path.join(FS_PATH, "campaign", "campaign--122818b6-1112-4fb0-111b-b111107ca70a" + ".json")) + + camp6_r = fs_source.get(camp6.id) + assert camp6_r.id == camp6.id + assert "Horse Warrior" in camp6_r.aliases + + camp7_r = fs_source.get(camp7["id"]) + assert camp7_r.id == camp7["id"] + assert "The Frenchmen" in camp7_r.aliases + + # remove all added objects + os.remove(os.path.join(FS_PATH, "campaign", camp6_r.id + ".json")) + os.remove(os.path.join(FS_PATH, "campaign", camp7_r.id + ".json")) + + +def test_filesystem_store_get_stored_as_bundle(fs_store): + coa = fs_store.get("course-of-action--95ddb356-7ba0-4bd9-a889-247262b8946f") + assert coa.id == "course-of-action--95ddb356-7ba0-4bd9-a889-247262b8946f" + assert coa.type == "course-of-action" + + +def test_filesystem_store_get_stored_as_object(fs_store): + coa = fs_store.get("course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd") + assert coa.id == "course-of-action--d9727aee-48b8-4fdb-89e2-4c49746ba4dd" + assert coa.type == "course-of-action" + + +def test_filesystem_store_all_versions(fs_store): + # all versions() - (note at this time, all_versions() is still not applicable to FileSystem, as only one version is ever stored) + rel = fs_store.all_versions("relationship--70dc6b5c-c524-429e-a6ab-0dd40f0482c1")[0] + assert rel.id == "relationship--70dc6b5c-c524-429e-a6ab-0dd40f0482c1" + assert rel.type == "relationship" + + +def test_filesystem_store_query(fs_store): + # query() + tools = fs_store.query([Filter("labels", "in", "tool")]) + assert len(tools) == 2 + assert "tool--242f3da3-4425-4d11-8f5c-b842886da966" in [tool.id for tool in tools] + assert "tool--03342581-f790-4f03-ba41-e82e67392e23" in [tool.id for tool in tools] + + +def test_filesystem_store_query_single_filter(fs_store): + query = Filter("labels", "in", "tool") + tools = fs_store.query(query) + assert len(tools) == 2 + assert "tool--242f3da3-4425-4d11-8f5c-b842886da966" in [tool.id for tool in tools] + assert "tool--03342581-f790-4f03-ba41-e82e67392e23" in [tool.id for tool in tools] + + +def test_filesystem_store_empty_query(fs_store): + results = fs_store.query() # returns all + assert len(results) == 26 + assert "tool--242f3da3-4425-4d11-8f5c-b842886da966" in [obj.id for obj in results] + assert "marking-definition--fa42a846-8d90-4e51-bc29-71d5b4802168" in [obj.id for obj in results] + + +def test_filesystem_store_query_multiple_filters(fs_store): + fs_store.source.filters.add(Filter("labels", "in", "tool")) + tools = fs_store.query(Filter("id", "=", "tool--242f3da3-4425-4d11-8f5c-b842886da966")) + assert len(tools) == 1 + assert tools[0].id == "tool--242f3da3-4425-4d11-8f5c-b842886da966" + + +def test_filesystem_store_query_dont_include_type_folder(fs_store): + results = fs_store.query(Filter("type", "!=", "tool")) + assert len(results) == 24 + + +def test_filesystem_store_add(fs_store): + # add() + camp1 = Campaign(name="Great Heathen Army", + objective="Targeting the government of United Kingdom and insitutions affiliated with the Church Of England", + aliases=["Ragnar"]) + fs_store.add(camp1) + + camp1_r = fs_store.get(camp1.id) + assert camp1_r.id == camp1.id + assert camp1_r.name == camp1.name + + # remove + os.remove(os.path.join(FS_PATH, "campaign", camp1_r.id + ".json")) + + +def test_filesystem_store_add_as_bundle(): + fs_store = FileSystemStore(FS_PATH, bundlify=True) + + camp1 = Campaign(name="Great Heathen Army", + objective="Targeting the government of United Kingdom and insitutions affiliated with the Church Of England", + aliases=["Ragnar"]) + fs_store.add(camp1) + + with open(os.path.join(FS_PATH, "campaign", camp1.id + ".json")) as bundle_file: + assert '"type": "bundle"' in bundle_file.read() + + camp1_r = fs_store.get(camp1.id) + assert camp1_r.id == camp1.id + assert camp1_r.name == camp1.name + + shutil.rmtree(os.path.join(FS_PATH, "campaign"), True) + + +def test_filesystem_add_bundle_object(fs_store): + bundle = Bundle() + fs_store.add(bundle) + + +def test_filesystem_store_add_invalid_object(fs_store): + ind = ('campaign', 'campaign--111111b6-1112-4fb0-111b-b111107ca70a') # tuple isn't valid + with pytest.raises(TypeError) as excinfo: + fs_store.add(ind) + assert 'stix_data must be' in str(excinfo.value) + assert 'a STIX object' in str(excinfo.value) + assert 'JSON formatted STIX' in str(excinfo.value) + assert 'JSON formatted STIX bundle' in str(excinfo.value) + + +def test_filesystem_object_with_custom_property(fs_store): + camp = Campaign(name="Scipio Africanus", + objective="Defeat the Carthaginians", + x_empire="Roman", + allow_custom=True) + + fs_store.add(camp, True) + + camp_r = fs_store.get(camp.id, True) + assert camp_r.id == camp.id + assert camp_r.x_empire == camp.x_empire + + +def test_filesystem_object_with_custom_property_in_bundle(fs_store): + camp = Campaign(name="Scipio Africanus", + objective="Defeat the Carthaginians", + x_empire="Roman", + allow_custom=True) + + bundle = Bundle(camp, allow_custom=True) + fs_store.add(bundle, True) + + camp_r = fs_store.get(camp.id, True) + assert camp_r.id == camp.id + assert camp_r.x_empire == camp.x_empire + + +def test_filesystem_custom_object(fs_store): + @CustomObject('x-new-obj', [ + ('property1', properties.StringProperty(required=True)), + ]) + class NewObj(): + pass + + newobj = NewObj(property1='something') + fs_store.add(newobj, True) + + newobj_r = fs_store.get(newobj.id, True) + assert newobj_r.id == newobj.id + assert newobj_r.property1 == 'something' + + # remove dir + shutil.rmtree(os.path.join(FS_PATH, "x-new-obj"), True) diff --git a/stix2/test/test_memory.py b/stix2/test/test_memory.py new file mode 100644 index 0000000..0603bf7 --- /dev/null +++ b/stix2/test/test_memory.py @@ -0,0 +1,270 @@ +import pytest + +from stix2 import (Bundle, Campaign, CustomObject, Filter, MemorySource, + MemoryStore, properties) +from stix2.sources import make_id + +IND1 = { + "created": "2017-01-27T13:49:53.935Z", + "id": "indicator--d81f86b9-975b-bc0b-775e-810c5ad45a4f", + "labels": [ + "url-watchlist" + ], + "modified": "2017-01-27T13:49:53.935Z", + "name": "Malicious site hosting downloader", + "pattern": "[url:value = 'http://x4z9arb.cn/4712']", + "type": "indicator", + "valid_from": "2017-01-27T13:49:53.935382Z" +} +IND2 = { + "created": "2017-01-27T13:49:53.935Z", + "id": "indicator--d81f86b9-975b-bc0b-775e-810c5ad45a4f", + "labels": [ + "url-watchlist" + ], + "modified": "2017-01-27T13:49:53.935Z", + "name": "Malicious site hosting downloader", + "pattern": "[url:value = 'http://x4z9arb.cn/4712']", + "type": "indicator", + "valid_from": "2017-01-27T13:49:53.935382Z" +} +IND3 = { + "created": "2017-01-27T13:49:53.935Z", + "id": "indicator--d81f86b9-975b-bc0b-775e-810c5ad45a4f", + "labels": [ + "url-watchlist" + ], + "modified": "2017-01-27T13:49:53.936Z", + "name": "Malicious site hosting downloader", + "pattern": "[url:value = 'http://x4z9arb.cn/4712']", + "type": "indicator", + "valid_from": "2017-01-27T13:49:53.935382Z" +} +IND4 = { + "created": "2017-01-27T13:49:53.935Z", + "id": "indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f", + "labels": [ + "url-watchlist" + ], + "modified": "2017-01-27T13:49:53.935Z", + "name": "Malicious site hosting downloader", + "pattern": "[url:value = 'http://x4z9arb.cn/4712']", + "type": "indicator", + "valid_from": "2017-01-27T13:49:53.935382Z" +} +IND5 = { + "created": "2017-01-27T13:49:53.935Z", + "id": "indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f", + "labels": [ + "url-watchlist" + ], + "modified": "2017-01-27T13:49:53.935Z", + "name": "Malicious site hosting downloader", + "pattern": "[url:value = 'http://x4z9arb.cn/4712']", + "type": "indicator", + "valid_from": "2017-01-27T13:49:53.935382Z" +} +IND6 = { + "created": "2017-01-27T13:49:53.935Z", + "id": "indicator--d81f86b9-975b-bc0b-775e-810c5ad45a4f", + "labels": [ + "url-watchlist" + ], + "modified": "2017-01-31T13:49:53.935Z", + "name": "Malicious site hosting downloader", + "pattern": "[url:value = 'http://x4z9arb.cn/4712']", + "type": "indicator", + "valid_from": "2017-01-27T13:49:53.935382Z" +} +IND7 = { + "created": "2017-01-27T13:49:53.935Z", + "id": "indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f", + "labels": [ + "url-watchlist" + ], + "modified": "2017-01-27T13:49:53.935Z", + "name": "Malicious site hosting downloader", + "pattern": "[url:value = 'http://x4z9arb.cn/4712']", + "type": "indicator", + "valid_from": "2017-01-27T13:49:53.935382Z" +} +IND8 = { + "created": "2017-01-27T13:49:53.935Z", + "id": "indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f", + "labels": [ + "url-watchlist" + ], + "modified": "2017-01-27T13:49:53.935Z", + "name": "Malicious site hosting downloader", + "pattern": "[url:value = 'http://x4z9arb.cn/4712']", + "type": "indicator", + "valid_from": "2017-01-27T13:49:53.935382Z" +} + +STIX_OBJS2 = [IND6, IND7, IND8] +STIX_OBJS1 = [IND1, IND2, IND3, IND4, IND5] + + [email protected] +def mem_store(): + yield MemoryStore(STIX_OBJS1) + + [email protected] +def mem_source(): + yield MemorySource(STIX_OBJS1) + + +def test_memory_source_get(mem_source): + resp = mem_source.get("indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f") + assert resp["id"] == "indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f" + + +def test_memory_source_get_nonexistant_object(mem_source): + resp = mem_source.get("tool--d81f86b8-975b-bc0b-775e-810c5ad45a4f") + assert resp is None + + +def test_memory_store_all_versions(mem_store): + # Add bundle of items to sink + mem_store.add(dict(id="bundle--%s" % make_id(), + objects=STIX_OBJS2, + spec_version="2.0", + type="bundle")) + + resp = mem_store.all_versions("indicator--d81f86b9-975b-bc0b-775e-810c5ad45a4f") + assert len(resp) == 1 # MemoryStore can only store 1 version of each object + + +def test_memory_store_query(mem_store): + query = [Filter('type', '=', 'malware')] + resp = mem_store.query(query) + assert len(resp) == 0 + + +def test_memory_store_query_single_filter(mem_store): + query = Filter('id', '=', 'indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f') + resp = mem_store.query(query) + assert len(resp) == 1 + + +def test_memory_store_query_empty_query(mem_store): + resp = mem_store.query() + # sort since returned in random order + resp = sorted(resp, key=lambda k: k['id']) + assert len(resp) == 2 + assert resp[0]['id'] == 'indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f' + assert resp[0]['modified'] == '2017-01-27T13:49:53.935Z' + assert resp[1]['id'] == 'indicator--d81f86b9-975b-bc0b-775e-810c5ad45a4f' + assert resp[1]['modified'] == '2017-01-27T13:49:53.936Z' + + +def test_memory_store_query_multiple_filters(mem_store): + mem_store.source.filters.add(Filter('type', '=', 'indicator')) + query = Filter('id', '=', 'indicator--d81f86b8-975b-bc0b-775e-810c5ad45a4f') + resp = mem_store.query(query) + assert len(resp) == 1 + + +def test_memory_store_add_stix_object_str(mem_store): + # add stix object string + camp_id = "campaign--111111b6-1112-4fb0-111b-b111107ca70a" + camp_name = "Aurelius" + camp_alias = "Purple Robes" + camp = """{ + "name": "%s", + "type": "campaign", + "objective": "German and French Intelligence Services", + "aliases": ["%s"], + "id": "%s", + "created": "2017-05-31T21:31:53.197755Z" + }""" % (camp_name, camp_alias, camp_id) + + mem_store.add(camp) + + camp_r = mem_store.get(camp_id) + assert camp_r["id"] == camp_id + assert camp_r["name"] == camp_name + assert camp_alias in camp_r["aliases"] + + +def test_memory_store_add_stix_bundle_str(mem_store): + # add stix bundle string + camp_id = "campaign--133111b6-1112-4fb0-111b-b111107ca70a" + camp_name = "Atilla" + camp_alias = "Huns" + bund = """{ + "type": "bundle", + "id": "bundle--112211b6-1112-4fb0-111b-b111107ca70a", + "spec_version": "2.0", + "objects": [ + { + "name": "%s", + "type": "campaign", + "objective": "Bulgarian, Albanian and Romanian Intelligence Services", + "aliases": ["%s"], + "id": "%s", + "created": "2017-05-31T21:31:53.197755Z" + } + ] + }""" % (camp_name, camp_alias, camp_id) + + mem_store.add(bund) + + camp_r = mem_store.get(camp_id) + assert camp_r["id"] == camp_id + assert camp_r["name"] == camp_name + assert camp_alias in camp_r["aliases"] + + +def test_memory_store_add_invalid_object(mem_store): + ind = ('indicator', IND1) # tuple isn't valid + with pytest.raises(TypeError) as excinfo: + mem_store.add(ind) + assert 'stix_data must be' in str(excinfo.value) + assert 'a STIX object' in str(excinfo.value) + assert 'JSON formatted STIX' in str(excinfo.value) + assert 'JSON formatted STIX bundle' in str(excinfo.value) + + +def test_memory_store_object_with_custom_property(mem_store): + camp = Campaign(name="Scipio Africanus", + objective="Defeat the Carthaginians", + x_empire="Roman", + allow_custom=True) + + mem_store.add(camp, True) + + camp_r = mem_store.get(camp.id, True) + assert camp_r.id == camp.id + assert camp_r.x_empire == camp.x_empire + + +def test_memory_store_object_with_custom_property_in_bundle(mem_store): + camp = Campaign(name="Scipio Africanus", + objective="Defeat the Carthaginians", + x_empire="Roman", + allow_custom=True) + + bundle = Bundle(camp, allow_custom=True) + mem_store.add(bundle, True) + + bundle_r = mem_store.get(bundle.id, True) + camp_r = bundle_r['objects'][0] + assert camp_r.id == camp.id + assert camp_r.x_empire == camp.x_empire + + +def test_memory_store_custom_object(mem_store): + @CustomObject('x-new-obj', [ + ('property1', properties.StringProperty(required=True)), + ]) + class NewObj(): + pass + + newobj = NewObj(property1='something') + mem_store.add(newobj, True) + + newobj_r = mem_store.get(newobj.id, True) + assert newobj_r.id == newobj.id + assert newobj_r.property1 == 'something'
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 7 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "coverage" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 antlr4-python3-runtime==4.9.3 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 bleach==4.1.0 bump2version==1.0.1 bumpversion==0.6.0 certifi==2021.5.30 cfgv==3.3.1 charset-normalizer==2.0.12 coverage==6.2 decorator==5.1.1 defusedxml==0.7.1 distlib==0.3.9 docutils==0.18.1 entrypoints==0.4 filelock==3.4.1 identify==2.4.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.2.3 iniconfig==1.1.1 ipython-genutils==0.2.0 Jinja2==3.0.3 jsonschema==3.2.0 jupyter-client==7.1.2 jupyter-core==4.9.2 jupyterlab-pygments==0.1.2 MarkupSafe==2.0.1 mistune==0.8.4 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nbsphinx==0.8.8 nest-asyncio==1.6.0 nodeenv==1.6.0 packaging==21.3 pandocfilters==1.5.1 platformdirs==2.4.0 pluggy==1.0.0 pre-commit==2.17.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 pyzmq==25.1.2 requests==2.27.1 simplejson==3.20.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-prompt==1.5.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 -e git+https://github.com/oasis-open/cti-python-stix2.git@482135465bb9c7bb8cd1ec5eb600d3990b65e092#egg=stix2 stix2-patterns==2.0.0 taxii2-client==2.3.0 testpath==0.6.0 toml==0.10.2 tomli==1.2.3 tornado==6.1 tox==3.28.0 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 virtualenv==20.16.2 webencodings==0.5.1 zipp==3.6.0
name: cti-python-stix2 channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - antlr4-python3-runtime==4.9.3 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - bleach==4.1.0 - bump2version==1.0.1 - bumpversion==0.6.0 - cfgv==3.3.1 - charset-normalizer==2.0.12 - coverage==6.2 - decorator==5.1.1 - defusedxml==0.7.1 - distlib==0.3.9 - docutils==0.18.1 - entrypoints==0.4 - filelock==3.4.1 - identify==2.4.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.2.3 - iniconfig==1.1.1 - ipython-genutils==0.2.0 - jinja2==3.0.3 - jsonschema==3.2.0 - jupyter-client==7.1.2 - jupyter-core==4.9.2 - jupyterlab-pygments==0.1.2 - markupsafe==2.0.1 - mistune==0.8.4 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nbsphinx==0.8.8 - nest-asyncio==1.6.0 - nodeenv==1.6.0 - packaging==21.3 - pandocfilters==1.5.1 - platformdirs==2.4.0 - pluggy==1.0.0 - pre-commit==2.17.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - pyzmq==25.1.2 - requests==2.27.1 - simplejson==3.20.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-prompt==1.5.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - stix2-patterns==2.0.0 - taxii2-client==2.3.0 - testpath==0.6.0 - toml==0.10.2 - tomli==1.2.3 - tornado==6.1 - tox==3.28.0 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - virtualenv==20.16.2 - webencodings==0.5.1 - zipp==3.6.0 prefix: /opt/conda/envs/cti-python-stix2
[ "stix2/test/test_bundle.py::test_stix_object_property", "stix2/test/test_custom.py::test_custom_property_in_bundled_object", "stix2/test/test_filesystem.py::test_filesystem_store_get_stored_as_object", "stix2/test/test_filesystem.py::test_filesystem_store_query_multiple_filters", "stix2/test/test_filesystem.py::test_filesystem_store_add_as_bundle", "stix2/test/test_filesystem.py::test_filesystem_add_bundle_object", "stix2/test/test_filesystem.py::test_filesystem_store_add_invalid_object", "stix2/test/test_filesystem.py::test_filesystem_object_with_custom_property", "stix2/test/test_filesystem.py::test_filesystem_object_with_custom_property_in_bundle", "stix2/test/test_filesystem.py::test_filesystem_custom_object", "stix2/test/test_memory.py::test_memory_store_query_single_filter", "stix2/test/test_memory.py::test_memory_store_query_empty_query", "stix2/test/test_memory.py::test_memory_store_query_multiple_filters", "stix2/test/test_memory.py::test_memory_store_add_stix_bundle_str", "stix2/test/test_memory.py::test_memory_store_add_invalid_object", "stix2/test/test_memory.py::test_memory_store_object_with_custom_property", "stix2/test/test_memory.py::test_memory_store_object_with_custom_property_in_bundle", "stix2/test/test_memory.py::test_memory_store_custom_object" ]
[ "stix2/test/test_filesystem.py::test_filesytem_source_query_single", "stix2/test/test_filesystem.py::test_filesystem_store_query", "stix2/test/test_filesystem.py::test_filesystem_store_query_single_filter", "stix2/test/test_filesystem.py::test_filesystem_store_empty_query", "stix2/test/test_filesystem.py::test_filesystem_store_query_dont_include_type_folder" ]
[ "stix2/test/test_bundle.py::test_empty_bundle", "stix2/test/test_bundle.py::test_bundle_with_wrong_type", "stix2/test/test_bundle.py::test_bundle_id_must_start_with_bundle", "stix2/test/test_bundle.py::test_bundle_with_wrong_spec_version", "stix2/test/test_bundle.py::test_create_bundle", "stix2/test/test_bundle.py::test_create_bundle_with_positional_args", "stix2/test/test_bundle.py::test_create_bundle_with_positional_listarg", "stix2/test/test_bundle.py::test_create_bundle_with_listarg_and_positional_arg", "stix2/test/test_bundle.py::test_create_bundle_with_listarg_and_kwarg", "stix2/test/test_bundle.py::test_create_bundle_with_arg_listarg_and_kwarg", "stix2/test/test_bundle.py::test_create_bundle_invalid", "stix2/test/test_bundle.py::test_parse_bundle", "stix2/test/test_bundle.py::test_parse_unknown_type", "stix2/test/test_custom.py::test_identity_custom_property", "stix2/test/test_custom.py::test_identity_custom_property_invalid", "stix2/test/test_custom.py::test_identity_custom_property_allowed", "stix2/test/test_custom.py::test_parse_identity_custom_property[{\\n", "stix2/test/test_custom.py::test_custom_object_type", "stix2/test/test_custom.py::test_custom_object_no_init", "stix2/test/test_custom.py::test_parse_custom_object_type", "stix2/test/test_custom.py::test_parse_unregistered_custom_object_type", "stix2/test/test_custom.py::test_custom_observable_object", "stix2/test/test_custom.py::test_custom_observable_object_no_init", "stix2/test/test_custom.py::test_custom_observable_object_invalid_ref_property", "stix2/test/test_custom.py::test_custom_observable_object_invalid_refs_property", "stix2/test/test_custom.py::test_custom_observable_object_invalid_refs_list_property", "stix2/test/test_custom.py::test_custom_observable_object_invalid_valid_refs", "stix2/test/test_custom.py::test_custom_no_properties_raises_exception", "stix2/test/test_custom.py::test_custom_wrong_properties_arg_raises_exception", "stix2/test/test_custom.py::test_parse_custom_observable_object", "stix2/test/test_custom.py::test_parse_unregistered_custom_observable_object", "stix2/test/test_custom.py::test_parse_invalid_custom_observable_object", "stix2/test/test_custom.py::test_observable_custom_property", "stix2/test/test_custom.py::test_observable_custom_property_invalid", "stix2/test/test_custom.py::test_observable_custom_property_allowed", "stix2/test/test_custom.py::test_observed_data_with_custom_observable_object", "stix2/test/test_custom.py::test_custom_extension", "stix2/test/test_custom.py::test_custom_extension_wrong_observable_type", "stix2/test/test_custom.py::test_custom_extension_invalid_observable", "stix2/test/test_custom.py::test_custom_extension_no_properties", "stix2/test/test_custom.py::test_custom_extension_empty_properties", "stix2/test/test_custom.py::test_custom_extension_no_init", "stix2/test/test_custom.py::test_parse_observable_with_custom_extension", "stix2/test/test_custom.py::test_parse_observable_with_unregistered_custom_extension", "stix2/test/test_data_sources.py::test_ds_abstract_class_smoke", "stix2/test/test_data_sources.py::test_ds_taxii", "stix2/test/test_data_sources.py::test_ds_taxii_name", "stix2/test/test_data_sources.py::test_parse_taxii_filters", "stix2/test/test_data_sources.py::test_add_get_remove_filter", "stix2/test/test_data_sources.py::test_apply_common_filters", "stix2/test/test_data_sources.py::test_filters0", "stix2/test/test_data_sources.py::test_filters1", "stix2/test/test_data_sources.py::test_filters2", "stix2/test/test_data_sources.py::test_filters3", "stix2/test/test_data_sources.py::test_filters4", "stix2/test/test_data_sources.py::test_filters5", "stix2/test/test_data_sources.py::test_deduplicate", "stix2/test/test_data_sources.py::test_add_remove_composite_datasource", "stix2/test/test_data_sources.py::test_composite_datasource_operations", "stix2/test/test_filesystem.py::test_filesystem_source_nonexistent_folder", "stix2/test/test_filesystem.py::test_filesystem_sink_nonexistent_folder", "stix2/test/test_filesystem.py::test_filesytem_source_get_object", "stix2/test/test_filesystem.py::test_filesytem_source_get_nonexistent_object", "stix2/test/test_filesystem.py::test_filesytem_source_all_versions", "stix2/test/test_filesystem.py::test_filesytem_source_query_multiple", "stix2/test/test_filesystem.py::test_filesystem_sink_add_python_stix_object", "stix2/test/test_filesystem.py::test_filesystem_sink_add_stix_object_dict", "stix2/test/test_filesystem.py::test_filesystem_sink_add_stix_bundle_dict", "stix2/test/test_filesystem.py::test_filesystem_sink_add_json_stix_object", "stix2/test/test_filesystem.py::test_filesystem_sink_json_stix_bundle", "stix2/test/test_filesystem.py::test_filesystem_sink_add_objects_list", "stix2/test/test_filesystem.py::test_filesystem_store_get_stored_as_bundle", "stix2/test/test_filesystem.py::test_filesystem_store_all_versions", "stix2/test/test_filesystem.py::test_filesystem_store_add", "stix2/test/test_memory.py::test_memory_source_get", "stix2/test/test_memory.py::test_memory_source_get_nonexistant_object", "stix2/test/test_memory.py::test_memory_store_all_versions", "stix2/test/test_memory.py::test_memory_store_query", "stix2/test/test_memory.py::test_memory_store_add_stix_object_str" ]
[]
BSD 3-Clause "New" or "Revised" License
1,780
[ "stix2/base.py", "stix2/sources/__init__.py", "stix2/sources/taxii.py", "stix2/sources/memory.py", "stix2/core.py", "stix2/sources/filesystem.py", "stix2/utils.py" ]
[ "stix2/base.py", "stix2/sources/__init__.py", "stix2/sources/taxii.py", "stix2/sources/memory.py", "stix2/core.py", "stix2/sources/filesystem.py", "stix2/utils.py" ]
CORE-GATECH-GROUP__serpent-tools-40
ebd70724628e6108a10b47338c31d3fcb3af7d03
2017-10-19 21:23:42
224ef748f519903554f346d48071e58b43dcf902
diff --git a/.travis.yml b/.travis.yml index 19f06e3..98b5729 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,5 @@ +env: + - ONTRAVIS=True language: python python: - "3.6" diff --git a/serpentTools/objects/__init__.py b/serpentTools/objects/__init__.py index 2ef74d4..53949d9 100644 --- a/serpentTools/objects/__init__.py +++ b/serpentTools/objects/__init__.py @@ -1,7 +1,7 @@ """Objects used to support the parsing.""" -class _SupportingObject(object): +class SupportingObject(object): """ Base supporting object. @@ -9,27 +9,15 @@ class _SupportingObject(object): ---------- container: Some parser from serpentTools.parsers Container that created this object - name: str - Name of this specific object, e.g. material name, detector name, etc. """ def __init__(self, container): - self._container = container - self._filePath = container.filePath + self.origin = container.filePath - def __repr__(self): - return '<{} from {}>'.format(self.whatAmI(), self._filePath) + def __str__(self): + return '<{} from {}>'.format(self.__class__.__name__, self.origin) - def whatAmI(self): - return type(self).__name__ - - def __getattr__(self, item): - """Search for the item in the containers metadata.""" - if item in self._container.metadata: - return self._container.metadata[item] - raise AttributeError('{} has object has no attribute \'{}\'' - .format(self, item)) @staticmethod def _convertVariableName(variable): @@ -42,13 +30,13 @@ class _SupportingObject(object): for item in lowerSplits[1:]]) -class _NamedObject(_SupportingObject): +class NamedObject(SupportingObject): """Class for named objects like materials and detectors.""" def __init__(self, container, name): - _SupportingObject.__init__(self, container) + SupportingObject.__init__(self, container) self.name = name - def __repr__(self): - return '<{} {} from {}>'.format(self.whatAmI(), - self.name, self._filePath) \ No newline at end of file + def __str__(self): + return '<{} {} from {}>'.format(self.__class__.__name__, + self.name, self.origin) diff --git a/serpentTools/objects/materials.py b/serpentTools/objects/materials.py index 49e2ff4..ac65c6a 100644 --- a/serpentTools/objects/materials.py +++ b/serpentTools/objects/materials.py @@ -4,11 +4,12 @@ import numpy from matplotlib import pyplot from serpentTools.settings import messages -from serpentTools.objects import _NamedObject +from serpentTools.objects import NamedObject -class DepletedMaterial(_NamedObject): - """Class for storing material data from ``_dep.m`` files. +class DepletedMaterial(NamedObject): + """ + Class for storing material data from ``_dep.m`` files. Parameters ---------- @@ -20,39 +21,67 @@ class DepletedMaterial(_NamedObject): Attributes ---------- - zai: numpy.array + zai: numpy.array or None Isotope id's - names: numpy.array + names: numpy.array or None Names of isotopes - days: numpy.array + days: numpy.array or None Days overwhich the material was depleted - adens: numpy.array + adens: numpy.array or None Atomic density over time for each nuclide + mdens: numpy.array or None + Mass density over time for each nuclide + burnup: numpy.array or None + Burnup of the material over time """ def __init__(self, parser, name): - _NamedObject.__init__(self, parser, name) - self._varData = {} - - def __getattr__(self, item): - """ - Allows the user to get items like ``zai`` and ``adens`` - with ``self.zai`` and ``self.adens``, respectively. - """ - if item in self._varData: - return self._varData[item] - return _NamedObject.__getattr__(self, item) + NamedObject.__init__(self, parser, name) + self.data = {} + self.zai = parser.metadata.get('zai', None) + self.names = parser.metadata.get('names', None) + self.days = parser.metadata.get('days', None) + self.__burnup__ = None + self.__adens__ = None + self.__mdens__ = None def __getitem__(self, item): - if item not in self._varData: - if item not in self._container.metadata: - raise KeyError('{} has no item {}'.format(self, item)) - return self._container.metadata[item] - return self._varData[item] + if item not in self.data: + raise KeyError('Key {} not found on material {}' + .format(item, self.name)) + return self.data[item] + + @property + def burnup(self): + if 'burnup' not in self.data: + raise AttributeError('Burnup for material {} has not been loaded' + .format(self.name)) + if self.__burnup__ is None: + self.__burnup__ = self.data['burnup'] + return self.__burnup__ + + @property + def adens(self): + if 'adens' not in self.data: + raise AttributeError('Atomic densities for material {} have not ' + 'been loaded'.format(self.name)) + if self.__adens__ is None: + self.__adens__ = self.data['adens'] + return self.__adens__ + + @property + def mdens(self): + if 'mdens' not in self.data: + raise AttributeError('Mass densities for material {} has not been ' + 'loaded'.format(self.name)) + if self.__mdens__ is None: + self.__mdens__ = self.data['mdens'] + return self.__mdens__ def addData(self, variable, rawData): - """Add data straight from the file onto a variable. + """ + Add data straight from the file onto a variable. Parameters ---------- @@ -70,10 +99,20 @@ class DepletedMaterial(_NamedObject): for line in rawData: if line: scratch.append([float(item) for item in line.split()]) - self._varData[newName] = numpy.array(scratch) + self.data[newName] = numpy.array(scratch) + @messages.depreciated def getXY(self, xUnits, yUnits, timePoints=None, names=None): - """Return x values for given time, and corresponding isotope values. + """Depreciated. Use getValues instead""" + if timePoints is None: + timePoints = self.days + return self.getValues(xUnits, yUnits, timePoints, names), self.days + else: + return self.getValues(xUnits, yUnits, timePoints, names) + + def getValues(self, xUnits, yUnits, timePoints=None, names=None): + """ + Return x values for given time, and corresponding isotope values. Parameters ---------- @@ -92,8 +131,6 @@ class DepletedMaterial(_NamedObject): ------- numpy.array Array of values. - numpy.array - Vector of time points only if ``timePoints`` is ``None`` Raises ------ @@ -104,20 +141,18 @@ class DepletedMaterial(_NamedObject): If at least one of the days requested is not present """ if timePoints is not None: - returnX = False timeCheck = self._checkTimePoints(xUnits, timePoints) if any(timeCheck): raise KeyError('The following times were not present in file {}' - '\n{}'.format(self._container.filePath, + '\n{}'.format(self.origin, ', '.join(timeCheck))) - else: - returnX = True - if names and 'names' not in self._container.metadata: - raise AttributeError('Parser {} has not stored the isotope names.' - .format(self._container)) + if names and self.names is None: + raise AttributeError( + 'Isotope names not stored on DepletedMaterial {}.' + .format(self.name)) xVals, colIndices = self._getXSlice(xUnits, timePoints) rowIndices = self._getIsoID(names) - allY = self[yUnits] + allY = self.data[yUnits] if allY.shape[0] == 1 or len(allY.shape) == 1: # vector yVals = allY[colIndices] if colIndices else allY else: @@ -125,17 +160,15 @@ class DepletedMaterial(_NamedObject): for isoID, rowId in enumerate(rowIndices): yVals[isoID, :] = (allY[rowId][colIndices] if colIndices else allY[rowId][:]) - if returnX: - return yVals, xVals return yVals def _checkTimePoints(self, xUnits, timePoints): - valid = self[xUnits] + valid = self.days if xUnits == 'days' else self.data[xUnits] badPoints = [str(time) for time in timePoints if time not in valid] return badPoints def _getXSlice(self, xUnits, timePoints): - allX = self[xUnits] + allX = self.days if xUnits == 'days' else self.data[xUnits] if timePoints is not None: colIndices = [indx for indx, xx in enumerate(allX) if xx in timePoints] @@ -157,7 +190,8 @@ class DepletedMaterial(_NamedObject): return rowIDs def plot(self, xUnits, yUnits, timePoints=None, names=None, ax=None): - """Plot some data as a function of time for some or all isotopes. + """ + Plot some data as a function of time for some or all isotopes. Parameters ---------- @@ -185,9 +219,10 @@ class DepletedMaterial(_NamedObject): getXY """ - xVals, yVals = self.getXY(xUnits, yUnits, timePoints, names) - ax = ax or pyplot.subplots(1, 1)[1] - labels = names or [None] + xVals = timePoints or self.days + yVals = self.getValues(xUnits, yUnits, xVals, names) + ax = ax or pyplot.axes() + labels = names or [''] for row in range(yVals.shape[0]): ax.plot(xVals, yVals[row], label=labels[row]) return ax diff --git a/serpentTools/objects/readers.py b/serpentTools/objects/readers.py index b26ef67..b67b9d6 100644 --- a/serpentTools/objects/readers.py +++ b/serpentTools/objects/readers.py @@ -17,7 +17,7 @@ class BaseReader(object): self.metadata = {} self.settings = rc.getReaderSettings(readerSettingsLevel) - def __repr__(self): + def __str__(self): return '<{} reading {}>'.format(self.__class__.__name__, self.filePath) def read(self): diff --git a/serpentTools/parsers/depletion.py b/serpentTools/parsers/depletion.py index b6f0dda..87355bc 100644 --- a/serpentTools/parsers/depletion.py +++ b/serpentTools/parsers/depletion.py @@ -69,6 +69,9 @@ class DepletionReader(MaterialReader): self._addTotal(chunk) else: self._addMetadata(chunk) + if 'days' in self.metadata: + for mKey in self.materials: + self.materials[mKey].days = self.metadata['days'] messages.debug('Done reading depletion file') messages.debug(' found {} materials'.format(len(self.materials)))
Explicit attributes for supporting objects Currently, supporting objects look for their attributes first in their own metadata, then in the metadata of the metadata of the parent. This poses some introspection ugliness, as it is difficult to determine what attributes are on the object. Each supporting object should have its attributes explicitly stated and stored. This should improve performance and usability. Maybe use `__slots__` as well?
CORE-GATECH-GROUP/serpent-tools
diff --git a/serpentTools/tests/test_depletion.py b/serpentTools/tests/test_depletion.py index 2e00eec..6e8e0d5 100644 --- a/serpentTools/tests/test_depletion.py +++ b/serpentTools/tests/test_depletion.py @@ -120,18 +120,18 @@ class DepletedMaterialTester(_DepletionTestHelper): self.assertListEqual(self.material.zai, self.reader.metadata['zai']) numpy.testing.assert_equal(self.material.adens, expectedAdens) - numpy.testing.assert_equal(self.material.ingTox, expectedIngTox) + numpy.testing.assert_equal(self.material['ingTox'], expectedIngTox) def test_getXY_burnup_full(self): """ Verify the material can produce the full burnup vector through getXY. """ - actual, _days = self.material.getXY('days', 'burnup', ) + actual = self.material.getValues('days', 'burnup', ) numpy.testing.assert_equal(actual, self.fuelBU) def test_getXY_burnup_slice(self): """Verify depletedMaterial getXY correctly slices a vector.""" - actual = self.material.getXY('days', 'burnup', self.requestedDays) + actual = self.material.getValues('days', 'burnup', self.requestedDays) expected = [0.0E0, 1.90317E-2, 3.60163E-2, 1.74880E-1, 3.45353E-01, 8.49693E-01, 1.66071E0] numpy.testing.assert_equal(actual, expected) @@ -147,27 +147,28 @@ class DepletedMaterialTester(_DepletionTestHelper): [0.00000E+00, 2.90880E-14, 5.57897E-14, 2.75249E-13, 5.46031E-13, 1.35027E-12, 2.64702E-12], ], float) - actual = self.material.getXY('days', 'adens', names=names, - timePoints=self.requestedDays) + actual = self.material.getValues('days', 'adens', names=names, + timePoints=self.requestedDays) numpy.testing.assert_allclose(actual, expected, rtol=1E-4) - def test_getXY_adensAndTime(self): - """Verify correct atomic density and time slice are returned.""" - actualAdens, actualDays = self.material.getXY('days', 'adens', - names=['Xe135']) - numpy.testing.assert_equal(actualDays, self.reader.metadata['days']) - def test_getXY_raisesError_badTime(self): """Verify that a ValueError is raised for non-present requested days.""" badDays = [-1, 0, 50] with self.assertRaises(KeyError): - self.material.getXY('days', 'adens', timePoints=badDays) + self.material.getValues('days', 'adens', timePoints=badDays) def test_fetchData(self): """Verify that key errors are raised when bad data are requested.""" with self.assertRaises(KeyError): _ = self.material['fake units'] + @unittest.skipIf(os.getenv('ONTRAVIS') is not None, + "Plotting doesn't play well with Travis") + def test_plotter(self): + """Verify the plotting functionality is operational.""" + self.material.plot('days', 'adens', timePoints=self.requestedDays, + names=['Xe135', 'U235']) + if __name__ == '__main__': unittest.main()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 5 }
1.00
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
contourpy==1.3.0 cycler==0.12.1 drewtils==0.1.9 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work fonttools==4.56.0 importlib_resources==6.5.2 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work kiwisolver==1.4.7 matplotlib==3.9.4 numpy==2.0.2 packaging @ file:///croot/packaging_1734472117206/work pillow==11.1.0 pluggy @ file:///croot/pluggy_1733169602837/work pyparsing==3.2.3 pytest @ file:///croot/pytest_1738938843180/work python-dateutil==2.9.0.post0 PyYAML==6.0.2 -e git+https://github.com/CORE-GATECH-GROUP/serpent-tools.git@ebd70724628e6108a10b47338c31d3fcb3af7d03#egg=serpentTools six==1.17.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work versioneer==0.29 zipp==3.21.0
name: serpent-tools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - contourpy==1.3.0 - cycler==0.12.1 - drewtils==0.1.9 - fonttools==4.56.0 - importlib-resources==6.5.2 - kiwisolver==1.4.7 - matplotlib==3.9.4 - numpy==2.0.2 - pillow==11.1.0 - pyparsing==3.2.3 - python-dateutil==2.9.0.post0 - pyyaml==6.0.2 - six==1.17.0 - versioneer==0.29 - zipp==3.21.0 prefix: /opt/conda/envs/serpent-tools
[ "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_adens", "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_burnup_full", "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_burnup_slice", "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_getXY_raisesError_badTime", "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_plotter" ]
[]
[ "serpentTools/tests/test_depletion.py::DepletionTester::test_ReadMaterials", "serpentTools/tests/test_depletion.py::DepletionTester::test_metadata", "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_fetchData", "serpentTools/tests/test_depletion.py::DepletedMaterialTester::test_materials" ]
[]
MIT License
1,781
[ "serpentTools/objects/readers.py", "serpentTools/objects/materials.py", ".travis.yml", "serpentTools/objects/__init__.py", "serpentTools/parsers/depletion.py" ]
[ "serpentTools/objects/readers.py", "serpentTools/objects/materials.py", ".travis.yml", "serpentTools/objects/__init__.py", "serpentTools/parsers/depletion.py" ]
peter-wangxu__persist-queue-28
8cd900781aa449d2e921bf5db953d02815110646
2017-10-20 14:55:36
7a2c4d3768dfd6528cc8c1599ef773ebf310697b
codecov[bot]: # [Codecov](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=h1) Report > Merging [#28](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=desc) into [master](https://codecov.io/gh/peter-wangxu/persist-queue/commit/8cd900781aa449d2e921bf5db953d02815110646?src=pr&el=desc) will **increase** coverage by `0.04%`. > The diff coverage is `100%`. [![Impacted file tree graph](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28/graphs/tree.svg?width=650&height=150&src=pr&token=ixVCphJIrs)](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #28 +/- ## ========================================== + Coverage 93.89% 93.94% +0.04% ========================================== Files 6 6 Lines 377 380 +3 Branches 44 44 ========================================== + Hits 354 357 +3 Misses 11 11 Partials 12 12 ``` | Flag | Coverage Δ | | |---|---|---| | #python | `93.94% <100%> (+0.04%)` | :arrow_up: | | [Impacted Files](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [persistqueue/sqlbase.py](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=tree#diff-cGVyc2lzdHF1ZXVlL3NxbGJhc2UucHk=) | `94.5% <100%> (+0.18%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=footer). Last update [8cd9007...b5fa7cf](https://codecov.io/gh/peter-wangxu/persist-queue/pull/28?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/persistqueue/sqlbase.py b/persistqueue/sqlbase.py index 48955f7..e0a7672 100644 --- a/persistqueue/sqlbase.py +++ b/persistqueue/sqlbase.py @@ -79,6 +79,7 @@ class SQLiteBase(object): def _init(self): """Initialize the tables in DB.""" + if self.path == self._MEMORY: self.memory_sql = True log.debug("Initializing Sqlite3 Queue in memory.") @@ -99,19 +100,26 @@ class SQLiteBase(object): if not self.memory_sql: self._putter = self._new_db_connection( self.path, self.multithreading, self.timeout) - + if self.auto_commit is False: + log.warning('auto_commit=False is still experimental,' + 'only use it with care.') + self._getter.isolation_level = "DEFERRED" + self._putter.isolation_level = "DEFERRED" # SQLite3 transaction lock self.tran_lock = threading.Lock() self.put_event = threading.Event() def _new_db_connection(self, path, multithreading, timeout): + conn = None if path == self._MEMORY: - return sqlite3.connect(path, + conn = sqlite3.connect(path, check_same_thread=not multithreading) else: - return sqlite3.connect('{}/data.db'.format(path), + conn = sqlite3.connect('{}/data.db'.format(path), timeout=timeout, check_same_thread=not multithreading) + conn.execute('PRAGMA journal_mode=WAL;') + return conn @with_conditional_transaction def _insert_into(self, *record): @@ -134,7 +142,7 @@ class SQLiteBase(object): def _count(self): sql = 'SELECT COUNT({}) FROM {}'.format(self._key_column, self._table_name) - row = self._putter.execute(sql).fetchone() + row = self._getter.execute(sql).fetchone() return row[0] if row else 0 def _task_done(self): diff --git a/persistqueue/sqlqueue.py b/persistqueue/sqlqueue.py index 6c86f2f..2a53cfe 100644 --- a/persistqueue/sqlqueue.py +++ b/persistqueue/sqlqueue.py @@ -15,6 +15,9 @@ sqlite3.enable_callback_tracebacks(True) log = logging.getLogger(__name__) +# 10 seconds internal for `wait` of event +TICK_FOR_WAIT = 10 + class SQLiteQueue(sqlbase.SQLiteBase): """SQLite3 based FIFO queue.""" @@ -44,7 +47,7 @@ class SQLiteQueue(sqlbase.SQLiteBase): def _pop(self): with self.action_lock: row = self._select() - # Perhaps a sqilite bug, sometimes (None, None) is returned + # Perhaps a sqlite3 bug, sometimes (None, None) is returned # by select, below can avoid these invalid records. if row and row[0] is not None: self._delete(row[0]) @@ -54,23 +57,31 @@ class SQLiteQueue(sqlbase.SQLiteBase): return row[1] # pickled data return None - def get(self, block=False): - unpickled = self._pop() - item = None - if unpickled: - item = pickle.loads(unpickled) + def get(self, block=True, timeout=None): + if not block: + pickled = self._pop() + if not pickled: + raise Empty + elif timeout is None: + # block until a put event. + pickled = self._pop() + while not pickled: + self.put_event.wait(TICK_FOR_WAIT) + pickled = self._pop() + elif timeout < 0: + raise ValueError("'timeout' must be a non-negative number") else: - if block: - end = _time.time() + 10.0 - while not unpickled: - remaining = end - _time.time() - if remaining <= 0.0: - raise Empty - # wait for no more than 10 seconds - self.put_event.wait(remaining) - unpickled = self._pop() - item = pickle.loads(unpickled) - + # block until the timeout reached + endtime = _time.time() + timeout + pickled = self._pop() + while not pickled: + remaining = endtime - _time.time() + if remaining <= 0.0: + raise Empty + self.put_event.wait( + TICK_FOR_WAIT if TICK_FOR_WAIT < remaining else remaining) + pickled = self._pop() + item = pickle.loads(pickled) return item def task_done(self):
FIFOSQLiteQueue: the get() method returns None instead of blocking and if I specify get(block=True) it raises the empty exception
peter-wangxu/persist-queue
diff --git a/tests/test_sqlqueue.py b/tests/test_sqlqueue.py index 1e63431..fe00f42 100644 --- a/tests/test_sqlqueue.py +++ b/tests/test_sqlqueue.py @@ -18,7 +18,7 @@ def task_done_if_required(queue): class SQLite3QueueTest(unittest.TestCase): def setUp(self): self.path = tempfile.mkdtemp(suffix='sqlqueue') - self.auto_commit = False + self.auto_commit = True def tearDown(self): shutil.rmtree(self.path, ignore_errors=True) @@ -30,7 +30,12 @@ class SQLite3QueueTest(unittest.TestCase): task_done_if_required(q) d = q.get() self.assertEqual('first', d) - self.assertRaises(Empty, q.get, block=True) + self.assertRaises(Empty, q.get, block=False) + + # assert with timeout + self.assertRaises(Empty, q.get, block=True, timeout=1.0) + # assert with negative timeout + self.assertRaises(ValueError, q.get, block=True, timeout=-1.0) def test_open_close_single(self): """Write 1 item, close, reopen checking if same item is there""" @@ -75,7 +80,7 @@ class SQLite3QueueTest(unittest.TestCase): q.get() n -= 1 else: - self.assertEqual(None, q.get()) + self.assertRaises(Empty, q.get, block=False) else: q.put('var%d' % random.getrandbits(16)) task_done_if_required(q) @@ -108,7 +113,7 @@ class SQLite3QueueTest(unittest.TestCase): c.join() self.assertEqual(0, m_queue.size) self.assertEqual(0, len(m_queue)) - self.assertIsNone(m_queue.get(block=False)) + self.assertRaises(Empty, m_queue.get, block=False) def test_multi_threaded_multi_producer(self): """Test sqlqueue can be used by multiple producers.""" @@ -175,19 +180,35 @@ class SQLite3QueueTest(unittest.TestCase): self.assertEqual(0, queue.qsize()) for x in range(1000): - self.assertNotEqual(0, counter[x], "0 for counter's index %s" % x) + self.assertNotEqual(0, counter[x], + "not 0 for counter's index %s" % x) -class SQLite3QueueAutoCommitTest(SQLite3QueueTest): +class SQLite3QueueNoAutoCommitTest(SQLite3QueueTest): def setUp(self): self.path = tempfile.mkdtemp(suffix='sqlqueue_auto_commit') - self.auto_commit = True + self.auto_commit = False + + def test_multiple_consumers(self): + """ + FAIL: test_multiple_consumers ( + -tests.test_sqlqueue.SQLite3QueueNoAutoCommitTest) + Test sqlqueue can be used by multiple consumers. + ---------------------------------------------------------------------- + Traceback (most recent call last): + File "persist-queue\tests\test_sqlqueue.py", line 183, + -in test_multiple_consumers + self.assertEqual(0, queue.qsize()) + AssertionError: 0 != 72 + :return: + """ + self.skipTest('Skipped due to a known bug above.') class SQLite3QueueInMemory(SQLite3QueueTest): def setUp(self): self.path = ":memory:" - self.auto_commit = False + self.auto_commit = True def test_open_close_1000(self): self.skipTest('Memory based sqlite is not persistent.') @@ -196,16 +217,22 @@ class SQLite3QueueInMemory(SQLite3QueueTest): self.skipTest('Memory based sqlite is not persistent.') def test_multiple_consumers(self): - # TODO(peter) when the shared-cache feature is available in default - # Python of most Linux distros, this should be easy:). - self.skipTest('In-memory based sqlite needs the support ' - 'of shared-cache') + self.skipTest('Skipped due to occasional crash during ' + 'multithreading mode.') + + def test_multi_threaded_multi_producer(self): + self.skipTest('Skipped due to occasional crash during ' + 'multithreading mode.') + + def test_multi_threaded_parallel(self): + self.skipTest('Skipped due to occasional crash during ' + 'multithreading mode.') class FILOSQLite3QueueTest(unittest.TestCase): def setUp(self): self.path = tempfile.mkdtemp(suffix='filo_sqlqueue') - self.auto_commit = False + self.auto_commit = True def tearDown(self): shutil.rmtree(self.path, ignore_errors=True) @@ -230,7 +257,7 @@ class FILOSQLite3QueueTest(unittest.TestCase): self.assertEqual('foobar', data) -class FILOSQLite3QueueAutoCommitTest(FILOSQLite3QueueTest): +class FILOSQLite3QueueNoAutoCommitTest(FILOSQLite3QueueTest): def setUp(self): self.path = tempfile.mkdtemp(suffix='filo_sqlqueue_auto_commit') - self.auto_commit = True + self.auto_commit = False
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 2 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose2", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt", "test-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 cov-core==1.15.0 coverage==6.2 distlib==0.3.9 dnspython==2.2.1 eventlet==0.33.3 filelock==3.4.1 flake8==5.0.4 greenlet==2.0.2 importlib-metadata==4.2.0 importlib-resources==5.4.0 iniconfig==1.1.1 mccabe==0.7.0 mock==5.2.0 nose2==0.13.0 packaging==21.3 -e git+https://github.com/peter-wangxu/persist-queue.git@8cd900781aa449d2e921bf5db953d02815110646#egg=persist_queue platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 pyparsing==3.1.4 pytest==7.0.1 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 virtualenv==20.16.2 zipp==3.6.0
name: persist-queue channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - cov-core==1.15.0 - coverage==6.2 - distlib==0.3.9 - dnspython==2.2.1 - eventlet==0.33.3 - filelock==3.4.1 - flake8==5.0.4 - greenlet==2.0.2 - importlib-metadata==4.2.0 - importlib-resources==5.4.0 - iniconfig==1.1.1 - mccabe==0.7.0 - mock==5.2.0 - nose2==0.13.0 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pyparsing==3.1.4 - pytest==7.0.1 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - virtualenv==20.16.2 - zipp==3.6.0 prefix: /opt/conda/envs/persist-queue
[ "tests/test_sqlqueue.py::SQLite3QueueTest::test_multi_threaded_parallel", "tests/test_sqlqueue.py::SQLite3QueueTest::test_raise_empty", "tests/test_sqlqueue.py::SQLite3QueueTest::test_random_read_write", "tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_multi_threaded_parallel", "tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_raise_empty", "tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_random_read_write", "tests/test_sqlqueue.py::SQLite3QueueInMemory::test_raise_empty", "tests/test_sqlqueue.py::SQLite3QueueInMemory::test_random_read_write" ]
[]
[ "tests/test_sqlqueue.py::SQLite3QueueTest::test_multi_threaded_multi_producer", "tests/test_sqlqueue.py::SQLite3QueueTest::test_multiple_consumers", "tests/test_sqlqueue.py::SQLite3QueueTest::test_open_close_1000", "tests/test_sqlqueue.py::SQLite3QueueTest::test_open_close_single", "tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_multi_threaded_multi_producer", "tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_open_close_1000", "tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_open_close_single", "tests/test_sqlqueue.py::FILOSQLite3QueueTest::test_open_close_1000", "tests/test_sqlqueue.py::FILOSQLite3QueueNoAutoCommitTest::test_open_close_1000" ]
[]
BSD 3-Clause "New" or "Revised" License
1,783
[ "persistqueue/sqlbase.py", "persistqueue/sqlqueue.py" ]
[ "persistqueue/sqlbase.py", "persistqueue/sqlqueue.py" ]
joblib__joblib-565
2f85cc02f806943119a63ed0ceb4331010af242b
2017-10-20 18:12:37
902fb6bbcf75c461d1b6703e5a01605fc592f214
elsander: Possible considerations: - if we're hashing the object id, do we need to hash any of the other properties at all? - does the object id need to be added for non-numpy-array objects as well? (I could not replicate this problem for a list of lists, for example, but there might be other cases that have this same issue) codecov[bot]: # [Codecov](https://codecov.io/gh/joblib/joblib/pull/565?src=pr&el=h1) Report > Merging [#565](https://codecov.io/gh/joblib/joblib/pull/565?src=pr&el=desc) into [master](https://codecov.io/gh/joblib/joblib/commit/8892839cb883b95fd2de3656bf719488eacb851f?src=pr&el=desc) will **decrease** coverage by `0.04%`. > The diff coverage is `100%`. [![Impacted file tree graph](https://codecov.io/gh/joblib/joblib/pull/565/graphs/tree.svg?token=gA6LF5DGTW&height=150&width=650&src=pr)](https://codecov.io/gh/joblib/joblib/pull/565?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #565 +/- ## ========================================== - Coverage 94.28% 94.24% -0.05% ========================================== Files 39 39 Lines 5008 5004 -4 ========================================== - Hits 4722 4716 -6 - Misses 286 288 +2 ``` | [Impacted Files](https://codecov.io/gh/joblib/joblib/pull/565?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [joblib/test/test\_hashing.py](https://codecov.io/gh/joblib/joblib/pull/565?src=pr&el=tree#diff-am9ibGliL3Rlc3QvdGVzdF9oYXNoaW5nLnB5) | `98.85% <100%> (-0.03%)` | :arrow_down: | | [joblib/hashing.py](https://codecov.io/gh/joblib/joblib/pull/565?src=pr&el=tree#diff-am9ibGliL2hhc2hpbmcucHk=) | `92.98% <100%> (ø)` | :arrow_up: | | [joblib/\_parallel\_backends.py](https://codecov.io/gh/joblib/joblib/pull/565?src=pr&el=tree#diff-am9ibGliL19wYXJhbGxlbF9iYWNrZW5kcy5weQ==) | `94.85% <0%> (-1.41%)` | :arrow_down: | | [joblib/test/test\_parallel.py](https://codecov.io/gh/joblib/joblib/pull/565?src=pr&el=tree#diff-am9ibGliL3Rlc3QvdGVzdF9wYXJhbGxlbC5weQ==) | `95.92% <0%> (+0.21%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/joblib/joblib/pull/565?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/joblib/joblib/pull/565?src=pr&el=footer). Last update [8892839...c5f300c](https://codecov.io/gh/joblib/joblib/pull/565?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). lesteve: Thanks a lot for your PR, I am afraid this is not the right fix. What you need to change is not the hashing but the auto memmapping mechanism. You need to change the lines around here: https://github.com/joblib/joblib/blob/8892839cb883b95fd2de3656bf719488eacb851f/joblib/_memmapping_reducer.py#L252-L256 and add the id of the object to the filename. You probably want to add a test too (the original bug report may provide a good test). elsander: Thanks for the feedback-- this fix is much better, and didn't involve updating any other tests. lesteve: The Windows failure looks like a glitch ... it would be great to push an empty commit and see whether it goes away. The Python 3.4 failure looks weird ... probably you need to use @with_numpy for your test. But why the error only happens with Python 3.4, no clue ... ogrisel: numpy is not a mandatory dependency of joblib and tests that use numpy should be marked as such (see other examples in the test suite). This is the cause of the failed test on travis. Please also add an entry in the changelog and +1 for merge. elsander: I did add an entry in the changelog, unless there's another place to add the change other than `CHANGES.rst`. Just added the decorator, hopefully that will resolve the testing issue. elsander: I'm not sure how to proceed with the Windows error. It failed after pushing a commit, so if it's a glitch, it's a consistent one. It seems to be failing on the Parallel call itself, not on assertion. Unfortunately I know very little about development for Windows... does anyone have thoughts on how to resolve the error? lesteve: I can reproduce the Windows problem so it needs some investigation. You don't happen to have a Windows machine easily accessible with Python installed? Otherwise I'll try to look at it, but I can not promise it will happen before next week. Maybe @ogrisel has a suggestion off the top of his head, because I seem to remember he fixed similar issues about memmap permissions errors on Windows in the past. elsander: Unfortunately I don't, but I won't have time to work on this more until sometime next week anyway. ogrisel: Unfortunately, I don't know why it fails in this case and why it did not fail before. Maybe we can ignore the error under windows and only rely on the `atexit` callback. ogrisel: In particular I do not understand why this new test fails while `test_auto_memmap_on_arrays_from_generator` does not. elsander: Agreed, I don't understand why this test fails on Windows. Is there a flag I can add to ignore the error? I can add that if the maintainers are comfortable with ignoring the error, otherwise I'm not sure how to move forward with this PR, unfortunately. lesteve: I think this is fine to skip the tests on Windows (add a comment explaining why we are skipping it). There is an example of how do this in test_hashing.py: ```py @skipif(sys.platform == 'win32', reason='This test is not stable under windows' ' for some reason') def test_hash_numpy_performance(): ... ``` elsander: Got it! I'll implement this either later this week or early next week. elsander: Sorry for the long delay on this. This fix should do it. GaelVaroquaux: This seems like a PR that we should merge. @lesteve : any specific reason that we did not? GaelVaroquaux: Cc @ogrisel, @tomMoral lesteve: The reason we did not merge it is: https://github.com/joblib/joblib/pull/565#discussion_r157519117. Basically if we pass `X` and `X.copy()` as arguments, we want them to be memmapped to the same array when `mmap_mode='r'`, which is what is used in 99% of the cases. Also IIRC, talking with @ogrisel, it seems like a bad idea in general to allow mmap in write mode or at least there was not an obvious real use case for it. To sum up this is a bug fix in an edge case of a use case we are not entirely sure whether it actually exists for a good reason ... GaelVaroquaux: Cc @ogrisel, @tomMoral GaelVaroquaux: OK, so I now understand the logic: if mmap_mode="r" or mmap_mode="c", the data on the disk is never modified, so the bug cannot happen. Hence it is useful to have the original behavior to save resources. So, the right fix is probably to add "id" in the filename, as @elsander did, but only if not mmap_mode in ('r', 'c'). We should probably also update the test to convince ourselves that the solution is the correct one. @elsander, does this make sense? And do you want to go ahead with the proposed solution? elsander: @GaelVaroquaux Yeah, this makes more sense to me. I'll make a fix today or tomorrow. GaelVaroquaux: Thanks Elizabeth. This is terribly useful. And sorry for the communication delay that left this PR pending. elsander: Ok, I added the logic to special-case `r+` and `w+`. I updated the test, but I couldn't think of a good way to test the behavior of `r` and `c` in the test. It's trivially true that the bug can't show up in these modes, since we don't write to disk, so I couldn't think of a good test to write that was actually testing anything relevant (as in, testing joblib behavior rather than numpy's memmapping). How does this look? GaelVaroquaux: This looks great. Thank you so much. Merging!
diff --git a/CHANGES.rst b/CHANGES.rst index 278586b..ffc1c0c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -4,6 +4,11 @@ Latest changes Development ----------- +Elizabeth Sander + Prevent numpy arrays with the same shape and data from hashing to + the same memmap, to prevent jobs with preallocated arrays from + writing over each other. + Olivier Grisel Make ``Memory.cache`` robust to ``PermissionError (errno 13)`` under diff --git a/joblib/_memmapping_reducer.py b/joblib/_memmapping_reducer.py index 6153c67..d825b4a 100644 --- a/joblib/_memmapping_reducer.py +++ b/joblib/_memmapping_reducer.py @@ -261,8 +261,13 @@ class ArrayMemmapReducer(object): # Find a unique, concurrent safe filename for writing the # content of this array only once. - basename = "{}-{}-{}.pkl".format( - os.getpid(), id(threading.current_thread()), hash(a)) + if self._mmap_mode in ['r', 'c']: + basename = "{}-{}-{}.pkl".format( + os.getpid(), id(threading.current_thread()), hash(a)) + else: + basename = "{}-{}-{}-{}.pkl".format( + os.getpid(), id(threading.current_thread()), + hash(a), id(a)) filename = os.path.join(self._temp_folder, basename) # In case the same array with the same content is passed several
Writable output numpy arrays hashed to the same memmap I'm using `Parallel` to run jobs that all write to (large) output numpy arrays. I allocate these in advance (as zeros), then pass into the functions that are executing in parallel to be populated. In my case, these are distinct arrays of zeros of the same shape and dtype; for that reason they are all hashed to the same value by the Joblib hashers (https://github.com/joblib/joblib/blob/e82b5d6684fcb61b0f778df3e29d2985a1c54208/joblib/hashing.py#L52), and so end up sharing the memory map objects Joblib creates. As a consequence my jobs, instead of producing distinct outputs, end up writing over each other. This behaviour is perfectly fine when the memmap mode is read-only, but is wrong when the memmap mode allows writing. One solution is to include the object identity in the hash whenever the memmap mode allows writing.
joblib/joblib
diff --git a/joblib/test/test_memmapping.py b/joblib/test/test_memmapping.py index df50b4a..16f7954 100644 --- a/joblib/test/test_memmapping.py +++ b/joblib/test/test_memmapping.py @@ -7,8 +7,9 @@ from joblib.test.common import setup_autokill from joblib.test.common import teardown_autokill from joblib.test.common import with_multiprocessing from joblib.test.common import with_dev_shm -from joblib.testing import raises, parametrize +from joblib.testing import raises, parametrize, skipif from joblib.backports import make_memmap +from joblib.parallel import Parallel, delayed from joblib.pool import MemmappingPool from joblib.executor import _TestingMemmappingExecutor @@ -545,3 +546,21 @@ def test_pool_get_temp_dir(tmpdir): if sys.platform.startswith('win'): assert shared_mem is False assert pool_folder.endswith(pool_folder_name) + + +@with_numpy +@skipif(sys.platform == 'win32', reason='This test fails with a ' + 'PermissionError on Windows') +@parametrize("mmap_mode", ["r+", "w+"]) +def test_numpy_arrays_use_different_memory(mmap_mode): + def func(arr, value): + arr[:] = value + return arr + + arrays = [np.zeros((10, 10), dtype='float64') for i in range(10)] + + results = Parallel(mmap_mode=mmap_mode, max_nbytes=0, n_jobs=2)( + delayed(func)(arr, i) for i, arr in enumerate(arrays)) + + for i, arr in enumerate(results): + np.testing.assert_array_equal(arr, i)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 2 }
0.11
{ "env_vars": null, "env_yml_path": [], "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-timeout", "codecov" ], "pre_install": [], "python": "3.6", "reqs_path": [ "continuous_integration/appveyor/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 coverage==6.2 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 -e git+https://github.com/joblib/joblib.git@2f85cc02f806943119a63ed0ceb4331010af242b#egg=joblib numpy==1.19.5 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-timeout==2.1.0 requests==2.27.1 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: joblib channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - numpy==1.19.5 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-timeout==2.1.0 - requests==2.27.1 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/joblib
[ "joblib/test/test_memmapping.py::test_numpy_arrays_use_different_memory[r+]", "joblib/test/test_memmapping.py::test_numpy_arrays_use_different_memory[w+]" ]
[]
[ "joblib/test/test_memmapping.py::test_memmap_based_array_reducing", "joblib/test/test_memmapping.py::test_high_dimension_memmap_array_reducing", "joblib/test/test_memmapping.py::test__strided_from_memmap", "joblib/test/test_memmapping.py::test_pool_with_memmap[multiprocessing]", "joblib/test/test_memmapping.py::test_pool_with_memmap[loky]", "joblib/test/test_memmapping.py::test_pool_with_memmap_array_view[multiprocessing]", "joblib/test/test_memmapping.py::test_pool_with_memmap_array_view[loky]", "joblib/test/test_memmapping.py::test_memmapping_pool_for_large_arrays[multiprocessing]", "joblib/test/test_memmapping.py::test_memmapping_pool_for_large_arrays[loky]", "joblib/test/test_memmapping.py::test_memmapping_pool_for_large_arrays_disabled[multiprocessing]", "joblib/test/test_memmapping.py::test_memmapping_pool_for_large_arrays_disabled[loky]", "joblib/test/test_memmapping.py::test_memmapping_on_large_enough_dev_shm[multiprocessing]", "joblib/test/test_memmapping.py::test_memmapping_on_large_enough_dev_shm[loky]", "joblib/test/test_memmapping.py::test_memmapping_on_too_small_dev_shm[multiprocessing]", "joblib/test/test_memmapping.py::test_memmapping_on_too_small_dev_shm[loky]", "joblib/test/test_memmapping.py::test_memmapping_pool_for_large_arrays_in_return[multiprocessing]", "joblib/test/test_memmapping.py::test_memmapping_pool_for_large_arrays_in_return[loky]", "joblib/test/test_memmapping.py::test_workaround_against_bad_memmap_with_copied_buffers[multiprocessing]", "joblib/test/test_memmapping.py::test_workaround_against_bad_memmap_with_copied_buffers[loky]", "joblib/test/test_memmapping.py::test_pool_memmap_with_big_offset[multiprocessing]", "joblib/test/test_memmapping.py::test_pool_memmap_with_big_offset[loky]", "joblib/test/test_memmapping.py::test_pool_get_temp_dir" ]
[]
BSD 3-Clause "New" or "Revised" License
1,785
[ "joblib/_memmapping_reducer.py", "CHANGES.rst" ]
[ "joblib/_memmapping_reducer.py", "CHANGES.rst" ]
vertexproject__synapse-478
ea418d756e773ad480b93d4de31c094954d53d48
2017-10-20 19:06:59
6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0
vEpiphyte: https://upload.wikimedia.org/wikipedia/commons/4/47/River_terrapin.jpg codecov[bot]: # [Codecov](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=h1) Report > Merging [#478](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=desc) into [master](https://codecov.io/gh/vertexproject/synapse/commit/75b638425eb31e5a7b448f225f5883599eed7fab?src=pr&el=desc) will **decrease** coverage by `0.06%`. > The diff coverage is `100%`. [![Impacted file tree graph](https://codecov.io/gh/vertexproject/synapse/pull/478/graphs/tree.svg?width=650&src=pr&token=c1t4BsMAUL&height=150)](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #478 +/- ## ========================================== - Coverage 91.22% 91.16% -0.07% ========================================== Files 123 122 -1 Lines 14621 14460 -161 ========================================== - Hits 13338 13182 -156 + Misses 1283 1278 -5 ``` | [Impacted Files](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [synapse/exc.py](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=tree#diff-c3luYXBzZS9leGMucHk=) | `94.26% <ø> (-0.05%)` | :arrow_down: | | [synapse/cores/common.py](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=tree#diff-c3luYXBzZS9jb3Jlcy9jb21tb24ucHk=) | `93.75% <100%> (+0.04%)` | :arrow_up: | | [synapse/lib/types.py](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=tree#diff-c3luYXBzZS9saWIvdHlwZXMucHk=) | `90.33% <100%> (+0.04%)` | :arrow_up: | | [synapse/lib/persist.py](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=tree#diff-c3luYXBzZS9saWIvcGVyc2lzdC5weQ==) | `90.42% <0%> (-1.07%)` | :arrow_down: | | [synapse/axon.py](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=tree#diff-c3luYXBzZS9heG9uLnB5) | `96.87% <0%> (ø)` | :arrow_up: | | [synapse/models/infotech.py](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=tree#diff-c3luYXBzZS9tb2RlbHMvaW5mb3RlY2gucHk=) | `100% <0%> (ø)` | :arrow_up: | | [synapse/lib/iq.py](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=tree#diff-c3luYXBzZS9saWIvaXEucHk=) | | | | [synapse/lib/socket.py](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=tree#diff-c3luYXBzZS9saWIvc29ja2V0LnB5) | `83.33% <0%> (+0.32%)` | :arrow_up: | | [synapse/lib/threads.py](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=tree#diff-c3luYXBzZS9saWIvdGhyZWFkcy5weQ==) | `69.11% <0%> (+0.98%)` | :arrow_up: | | [synapse/eventbus.py](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=tree#diff-c3luYXBzZS9ldmVudGJ1cy5weQ==) | `90.9% <0%> (+1.29%)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=footer). Last update [75b6384...ca8bee2](https://codecov.io/gh/vertexproject/synapse/pull/478?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/synapse/cores/common.py b/synapse/cores/common.py index 097756bef..c9dfca297 100644 --- a/synapse/cores/common.py +++ b/synapse/cores/common.py @@ -261,7 +261,7 @@ class Cortex(EventBus, DataModel, Runtime, s_ingest.IngestApi): node[1][form] = norm node[1]['tufo:form'] = form - node[1]['tufo:formed'] = s_common.now() + node[1]['node:created'] = s_common.now() self.runt_props[(form, None)].append(node) self.runt_props[(form, norm)].append(node) @@ -2118,22 +2118,6 @@ class Cortex(EventBus, DataModel, Runtime, s_ingest.IngestApi): return tufo - def addTufoEvent(self, form, **props): - ''' - Add a "non-deconflicted" tufo by generating a guid - - Example: - - tufo = core.addTufoEvent('foo',bar=baz) - - Notes: - - If props contains a key "time" it will be used for - the cortex timestamp column in the row storage. - - ''' - return self.addTufoEvents(form, (props,))[0] - def addJsonText(self, form, text): ''' Add and fully index a blob of JSON text. @@ -2290,82 +2274,6 @@ class Cortex(EventBus, DataModel, Runtime, s_ingest.IngestApi): return props - def addTufoEvents(self, form, propss): - ''' - Add a list of tufo events in bulk. - - Example: - - propss = [ - {'foo':10,'bar':30}, - {'foo':11,'bar':99}, - ] - - core.addTufoEvents('woot',propss) - - ''' - tname = self.getPropTypeName(form) - if tname is None and self.enforce: - raise s_common.NoSuchForm(name=form) - - if tname and not self.isSubType(tname, 'guid'): - raise s_common.NotGuidForm(name=form) - - with self.getCoreXact() as xact: - - ret = [] - - for chunk in chunked(1000, propss): - - adds = [] - rows = [] - allfulls = [] - - for props in chunk: - - iden = s_common.guid() - - fulls = self._normTufoProps(form, props, isadd=True) - - self._addDefProps(form, fulls) - - fulls[form] = iden - fulls['tufo:form'] = form - - if self.autoadd: - allfulls.append(fulls) - - # fire these immediately since we need them to potentially fill - # in values before we generate rows for the new tufo - self.fire('node:form', form=form, valu=iden, props=fulls) - - # Ensure we have ALL the required props after node:form is fired. - self._reqProps(form, fulls) - - rows.extend([(iden, p, v, xact.tick) for (p, v) in fulls.items()]) - - # sneaky ephemeral/hidden prop to identify newly created tufos - fulls['.new'] = 1 - - node = (iden, fulls) - - ret.append(node) - adds.append((form, iden, props, node)) - - self.addRows(rows) - - # fire splice events - for form, valu, props, node in adds: - xact.fire('node:add', form=form, valu=valu, node=node) - xact.spliced('node:add', form=form, valu=valu, props=props) - xact.trigger(node, 'node:add', form=form) - - if self.autoadd: - for fulls in allfulls: - self._runToAdd(fulls) - - return ret - def _reqProps(self, form, fulls): if not self.enforce: return @@ -2450,7 +2358,7 @@ class Cortex(EventBus, DataModel, Runtime, s_ingest.IngestApi): def formTufoByProp(self, prop, valu, **props): ''' Form an (iden,info) tuple by atomically deconflicting - the existance of prop=valu and creating it if not present. + the existence of prop=valu and creating it if not present. Args: @@ -2459,9 +2367,25 @@ class Cortex(EventBus, DataModel, Runtime, s_ingest.IngestApi): **props: Additional secondary properties for the node Example: + Make a node for the FQDN woot.com:: + + tufo = core.formTufoByProp('inet:fqdn','woot.com') - tufo = core.formTufoByProp('inet:fqdn','woot.com') + Notes: + When forming nodes whose primary property is derived from the + GuidType, deconfliction can be skipped if the value is set to + None. This allows for high-speed ingest of event type data which + does note require deconfliction. + + This API will fire a ``node:form`` event prior to creating rows, + allowing callbacks to populate any additional properties on the + node. After node creation is finished, ``node:add`` events are + fired on for the Cortex event bus, splices and triggers. + Returns: + ((str, dict)): The newly formed tufo, or the existing tufo if + the node already exists. The ephemeral property ".new" can be + checked to see if the node was newly created or not. ''' ctor = self.seedctors.get(prop) if ctor is not None: @@ -2499,20 +2423,31 @@ class Cortex(EventBus, DataModel, Runtime, s_ingest.IngestApi): props.update(subs) - fulls = self._normTufoProps(prop, props, isadd=True) - # create a "full" props dict which includes defaults + fulls = self._normTufoProps(prop, props) self._addDefProps(prop, fulls) fulls[prop] = valu fulls['tufo:form'] = prop - fulls['tufo:formed'] = s_common.now() + fulls['node:created'] = s_common.now() + + # Examine the fulls dictionary and identify any props which are + # themselves forms, and extract the form/valu/subs from the fulls + # dictionary so we can later make those nodes + toadds = None + if self.autoadd: + toadds = self._formToAdd(prop, fulls) + + # Remove any non-model props present in the props and fulls + # dictionary which may have been added during _normTufoProps + self._pruneFulls(prop, fulls, props, isadd=True) # update our runtime form counters self.formed[prop] += 1 - # fire these immediately since we need them to potentially fill - # in values before we generate rows for the new tufo + # Fire these immediately since we need them to potentially fill + # in values before we generate rows for the new tufo. It is + # possible this callback may generate extra-model values. self.fire('node:form', form=prop, valu=valu, props=fulls) # Ensure we have ALL the required props after node:form is fired. @@ -2524,23 +2459,84 @@ class Cortex(EventBus, DataModel, Runtime, s_ingest.IngestApi): tufo = (iden, fulls) + # Cache the tufo data now so we can avoid having a .new ephemeral + # property in the cache if self.caching: - # avoid .new in cache cachefo = (iden, dict(fulls)) for p, v in fulls.items(): self._bumpTufoCache(cachefo, p, None, v) - # fire notification events + # Run any autoadd nodes we may have. In the event of autoadds being + # present, our subsequent node:add events are fired depth-first + if self.autoadd and toadds is not None: + self._runToAdd(toadds) + + # fire the node:add events from the xact xact.fire('node:add', form=prop, valu=valu, node=tufo) xact.spliced('node:add', form=prop, valu=valu, props=props) xact.trigger(tufo, 'node:add', form=prop) - if self.autoadd: - self._runToAdd(fulls) - tufo[1]['.new'] = True return tufo + def _pruneFulls(self, form, fulls, props, isadd=False): + ''' + Modify fulls and props dicts in place to remove non-model props. + + Args: + form (str): Form of the dictionary being operated on. + fulls (dict): Dictionary of full property name & valu pairs. + props (dict): Dictionary of property name & value pairs. + isadd (bool): Bool indicating if the data is newly being added or not. + + Returns: + None + ''' + splitp = form + ':' + for name in list(fulls.keys()): + if name in ('tufo:form', 'node:created', 'node:loc'): + continue + if not self.isSetPropOk(name, isadd): + prop = name.split(splitp)[1] + props.pop(prop, None) + fulls.pop(name, None) + + def _formToAdd(self, form, fulls): + ''' + Build a list of property, valu, **props from a dictionary of fulls. + + Args: + form (str): Form of fulls + fulls (dict): + + Returns: + list: List of tuples (prop,valu,**props) for consumption by formTufoByProp. + ''' + ret = [] + skips = ('tufo:form', 'node:created', 'node:loc') + valu = fulls.get(form) + for fprop, fvalu in fulls.items(): + if fprop in skips: + continue + ptype = self.getPropTypeName(fprop) + prefix = fprop + ':' + plen = len(prefix) + for stype in self.getTypeOfs(ptype): + if self.isRuntForm(stype): + continue + if self.isTufoForm(stype): + # We don't want to recurse on forming ourself with all our same properties + if stype == form and valu == fvalu: + continue + subs = {} + + for _fprop, _fvalu in fulls.items(): + if _fprop.startswith(prefix): + k = _fprop[plen:] + subs[k] = _fvalu + ret.append((stype, fvalu, subs)) + return ret + def delTufo(self, tufo): ''' Delete a tufo and it's associated props/lists/etc. @@ -2720,47 +2716,36 @@ class Cortex(EventBus, DataModel, Runtime, s_ingest.IngestApi): for k, v in self.getFormDefs(form): fulls.setdefault(k, v) - def _runToAdd(self, fulls): - toadd = set() + def _runToAdd(self, toadds): + for form, valu, props in toadds: + self.formTufoByProp(form, valu, **props) - for prop, valu in fulls.items(): - ptype = self.getPropTypeName(prop) - for stype in self.getTypeOfs(ptype): - - if self.isRuntForm(stype): - continue - - if self.isTufoForm(stype): - toadd.add((stype, valu)) - - for form, valu in toadd: - self.formTufoByProp(form, valu) - - def _normTufoProps(self, form, props, tufo=None, isadd=False): + def _normTufoProps(self, form, props, tufo=None): ''' This will both return a dict of fully qualified props as well as modify the given props dict inband to normalize the values. ''' - fulls = {} for name in list(props.keys()): valu = props.get(name) prop = form + ':' + name - if not self.isSetPropOk(prop, isadd=isadd): - props.pop(name, None) - continue oldv = None if tufo is not None: oldv = tufo[1].get(prop) valu, subs = self.getPropNorm(prop, valu, oldval=oldv) - if tufo is not None and tufo[1].get(prop) == valu: - props.pop(name, None) - continue + if tufo is not None: + if tufo[1].get(prop) == valu: + props.pop(name, None) + continue + _isadd = not bool(oldv) + if not self.isSetPropOk(prop, isadd=_isadd): + props.pop(name, None) + continue # any sub-properties to populate? for sname, svalu in subs.items(): @@ -2870,6 +2855,8 @@ class Cortex(EventBus, DataModel, Runtime, s_ingest.IngestApi): Args: prop (str): Full property name to check. + isadd (bool): Bool indicating that the property check is being + done on a property which has not yet been set on a node. Examples: Check if a value is valid before calling a function.:: @@ -2970,10 +2957,14 @@ class Cortex(EventBus, DataModel, Runtime, s_ingest.IngestApi): iden = tufo[0] valu = tufo[1].get(form) + toadds = None + if self.autoadd: + toadds = self._formToAdd(form, fulls) + self._pruneFulls(form, fulls, props, isadd=True) + with self.getCoreXact() as xact: for p, v in fulls.items(): - oldv = tufo[1].get(p) self.setRowsByIdProp(iden, p, v) @@ -2992,8 +2983,8 @@ class Cortex(EventBus, DataModel, Runtime, s_ingest.IngestApi): xact.trigger(tufo, 'node:prop:set', form=form, prop=p) - if self.autoadd: - self._runToAdd(fulls) + if self.autoadd and toadds is not None: + self._runToAdd(toadds) return tufo diff --git a/synapse/exc.py b/synapse/exc.py index bf310f973..f7edb42a9 100644 --- a/synapse/exc.py +++ b/synapse/exc.py @@ -82,8 +82,6 @@ class BadCoreStore(SynErr): class CantDelProp(SynErr): pass class CantSetProp(SynErr): pass -class NotGuidForm(SynErr): pass - class MustBeLocal(SynErr): pass class MustBeProxy(SynErr): pass diff --git a/synapse/lib/types.py b/synapse/lib/types.py index 47bab12be..ae83de7e1 100644 --- a/synapse/lib/types.py +++ b/synapse/lib/types.py @@ -553,6 +553,10 @@ class XrefType(DataType): 'xref': pvval, } + for k, v in vsub.items(): + k = self._sorc_name + ':' + k + subs[k] = v + for k, v in pvsub.items(): k = 'xref:' + k subs[k] = v diff --git a/synapse/models/inet.py b/synapse/models/inet.py index 6f072b249..bde40731a 100644 --- a/synapse/models/inet.py +++ b/synapse/models/inet.py @@ -634,7 +634,7 @@ class InetMod(CoreModule): break for tufo in tufos: - i, p, v, t = self.core.getRowsByIdProp(tufo[0], xref_prop_prop)[0] # unavoidable until we have `tufo:formed` prop + i, p, v, t = self.core.getRowsByIdProp(tufo[0], xref_prop_prop)[0] # unavoidable until we have `node:created` prop adds, dels = [], [] # modify :xref:prop diff --git a/synapse/models/syn.py b/synapse/models/syn.py index 2260e0168..d05a4847e 100644 --- a/synapse/models/syn.py +++ b/synapse/models/syn.py @@ -184,16 +184,16 @@ class SynMod(CoreModule): adds = [] logger.debug('Lifting tufo:form rows') for i, _, v, t in self.core.store.getRowsByProp('tufo:form'): - adds.append((i, 'tufo:formed', t, now),) - logger.debug('Deleting existing tufo:formed rows') - self.core.store.delRowsByProp('tufo:formed') + adds.append((i, 'node:created', t, now),) + logger.debug('Deleting existing node:created rows') + self.core.store.delRowsByProp('node:created') if adds: tot = len(adds) - logger.debug('Adding {:,d} tufo:formed rows'.format(tot)) + logger.debug('Adding {:,d} node:created rows'.format(tot)) i = 0 n = 100000 for chunk in s_common.chunks(adds, n): self.core.store.addRows(chunk) i = i + len(chunk) logger.debug('Loading {:,d} [{}%] rows into transaction'.format(i, int((i / tot) * 100))) - logger.debug('Finished adding tufo:formed rows to the Cortex') + logger.debug('Finished adding node:created rows to the Cortex')
Creating nodes from secondary properties using autoadds may result in missing properties When nodes are made from autoadds, those nodes are made using the system normalized primary property for the node and are missing all subs which may have been applicable for that node. This primarily applies to nodes which have a primary property as a guid (either a GuidType or something that is a computed guid). This leads to a issue where its possible to create meaningful nodes from API or Storm queries which lack properties, and in some cases cannot have those properties set because of ro flags. An example of this behavior is the following block - an XREF node is used to directly make a inet:web:post node, however the inet:web:post node only would contain the primary property and would be missing the ``:acct`` and ``:text`` properties. ``` def test_model_inet_postref_postmissingprops(self): with self.getRamCore() as core: postref_tufo = core.formTufoByProp('inet:web:postref', (('vertex.link/user', 'mypost 0.0.0.0'), ('inet:ipv4', 0))) post_tufo = core.formTufoByProp('inet:web:post', ('vertex.link/user', 'mypost 0.0.0.0')) # This feels wrong... self.eq(post_tufo[1].get('inet:web:post:acct'), 'vertex.link/user') self.eq(post_tufo[1].get('inet:web:post:text'), 'mypost 0.0.0.0') ``` Credit goes to @vertexmc for finding this issue.
vertexproject/synapse
diff --git a/synapse/tests/test_axon.py b/synapse/tests/test_axon.py index f49dff25c..8f567436a 100644 --- a/synapse/tests/test_axon.py +++ b/synapse/tests/test_axon.py @@ -595,6 +595,7 @@ class AxonClusterTest(SynTest): blobs = axcluster.find('md5', craphash) self.eq(len(blobs), 0) + time.sleep(0.2) # Yield to axon threads blobs = axcluster.find('md5', asdfhash) # We have two blobs for the same hash since the clone of axfo0 is up on host1/host2 self.eq(len(blobs), 2) diff --git a/synapse/tests/test_cortex.py b/synapse/tests/test_cortex.py index c36c7be0a..565e4e211 100644 --- a/synapse/tests/test_cortex.py +++ b/synapse/tests/test_cortex.py @@ -695,7 +695,7 @@ class CortexBaseTest(SynTest): unodes = core.getTufosByProp('inet:tcp4:gatenumber') self.len(10, unodes) for node in unodes: - self.isin('tufo:formed', node[1]) + self.isin('node:created', node[1]) self.isin('inet:tcp4', node[1]) self.isin('inet:tcp4:ipv4', node[1]) self.isin('inet:tcp4:gatenumber', node[1]) @@ -1043,14 +1043,34 @@ class CortexTest(SynTest): self.eq(len(core.getTufosByProp('strform:foo', valu='zap')), 1) self.eq(len(core.getTufosByProp('strform:bar', valu='zap')), 1) - # Try using setprops with an built-in model which type subprops - t0 = core.formTufoByProp('inet:web:acct', 'vertex.link/pennywise') - self.notin('inet:web:acct:email', t0[1]) - props = {'email': '[email protected]'} - core.setTufoProps(t0, **props) - self.isin('inet:web:acct:email', t0[1]) - t1 = core.getTufoByProp('inet:email', '[email protected]') - self.nn(t1) + # Try using setprops with an built-in model which type subprops + t0 = core.formTufoByProp('inet:web:acct', 'vertex.link/pennywise') + self.notin('inet:web:acct:email', t0[1]) + props = {'email': '[email protected]'} + core.setTufoProps(t0, **props) + self.isin('inet:web:acct:email', t0[1]) + t1 = core.getTufoByProp('inet:email', '[email protected]') + self.nn(t1) + + # Trying settufoprops on a ro prop doens't change anything + self.eq(t0[1].get('inet:web:acct:user'), 'pennywise') + t0 = core.setTufoProps(t0, user='ninja') + self.eq(t0[1].get('inet:web:acct:user'), 'pennywise') + + # Try forming a node from its normalize value and then setting + # ro props after the fact. Also ensure those secondary props which + # may trigger autoadds are generating the autoadds and do not retain + # those non-model seconadry props. + valu, subs = core.getTypeNorm('inet:web:post', '(vertex.ninja/ninja,"Just ninja things.")') + t0 = core.formTufoByProp('inet:web:post', valu) + self.eq(t0[1].get('inet:web:post'), valu) + self.none(t0[1].get('inet:web:post:acct')) + self.none(t0[1].get('inet:web:post:text')) + t0 = core.setTufoProps(t0, **subs) + self.eq(t0[1].get('inet:web:post:acct'), 'vertex.ninja/ninja') + self.eq(t0[1].get('inet:web:post:text'), 'Just ninja things.') + t0 = core.setTufoProps(t0, text='Throwing stars are cool!') + self.eq(t0[1].get('inet:web:post:text'), 'Just ninja things.') def test_cortex_tufo_pop(self): with self.getRamCore() as core: @@ -1339,8 +1359,8 @@ class CortexTest(SynTest): # we're also doing the same via storm self.eq(len(core.eval('[inet:fqdn=w00t.com +#some.tag]')), 1) self.eq(len(core.eval('inet:fqdn=w00t.com')), 1) - self.eq(len(core.eval('inet:fqdn=w00t.com +tufo:formed<1')), 0) - self.eq(len(core.eval('inet:fqdn=w00t.com +tufo:formed>1')), 1) + self.eq(len(core.eval('inet:fqdn=w00t.com +node:created<1')), 0) + self.eq(len(core.eval('inet:fqdn=w00t.com +node:created>1')), 1) self.eq(len(core.eval('inet:fqdn=w00t.com totags(leaf=0)')), 2) self.eq(len(core.eval('syn:tag=some')), 1) self.eq(len(core.eval('syn:tag=some.tag')), 1) @@ -1403,6 +1423,50 @@ class CortexTest(SynTest): self.false(s_tags.tufoHasTag(tufo1, 'hehe')) self.false(s_tags.tufoHasTag(tufo1, 'hoho')) + # Add a complicated node which fires a bunch of autoadd nodes and + # ensure they are populated in the second core + postref_tufo = core0.formTufoByProp('inet:web:postref', + (('vertex.link/user', 'mypost 0.0.0.0'), + ('inet:ipv4', 0))) + self.nn(core1.getTufoByProp('inet:web:post', + ('vertex.link/user', 'mypost 0.0.0.0'))) + self.eq(postref_tufo[1]['tufo:form'], 'inet:web:postref') + self.eq(postref_tufo[1]['inet:web:postref'], '804ec63392f4ea031bb3fd004dee209d') + self.eq(postref_tufo[1]['inet:web:postref:post'], '68bc4607f0518963165536921d6e86fa') + self.eq(postref_tufo[1]['inet:web:postref:xref'], 'inet:ipv4=0.0.0.0') + self.eq(postref_tufo[1]['inet:web:postref:xref:prop'], 'inet:ipv4') + self.eq(postref_tufo[1]['inet:web:postref:xref:intval'], 0) + + # Ensure we got the deconflicted node that was already made, not a new node + post_tufo = core1.formTufoByProp('inet:web:post', + ('vertex.link/user', 'mypost 0.0.0.0')) + self.notin('.new', post_tufo[1]) + self.eq(post_tufo[1]['inet:web:post'], postref_tufo[1]['inet:web:postref:post']) + # Ensure that subs on the autoadd node are formed properly + self.eq(post_tufo[1].get('inet:web:post:acct'), 'vertex.link/user') + self.eq(post_tufo[1].get('inet:web:post:text'), 'mypost 0.0.0.0') + # Ensure multiple subs were made into nodes + self.nn(core1.getTufoByProp('inet:web:acct', 'vertex.link/user')) + self.nn(core1.getTufoByProp('inet:user', 'user')) + self.nn(core1.getTufoByProp('inet:fqdn', 'vertex.link')) + self.nn(core1.getTufoByProp('inet:fqdn', 'link')) + + # Ensure that splices for changes on ro properties on a node are reflected + valu, subs = core0.getTypeNorm('inet:web:post', + '(vertex.ninja/ninja,"Just ninja things.")') + t0 = core0.formTufoByProp('inet:web:post', valu) + self.eq(t0[1].get('inet:web:post'), valu) + self.none(t0[1].get('inet:web:post:acct')) + self.none(t0[1].get('inet:web:post:text')) + core0.setTufoProps(t0, **subs) + t0 = core1.getTufoByProp('inet:web:post', valu) + self.eq(t0[1].get('inet:web:post:acct'), 'vertex.ninja/ninja') + self.eq(t0[1].get('inet:web:post:text'), 'Just ninja things.') + self.nn(core1.getTufoByProp('inet:web:acct', 'vertex.ninja/ninja')) + self.nn(core1.getTufoByProp('inet:user', 'ninja')) + self.nn(core1.getTufoByProp('inet:fqdn', 'vertex.ninja')) + self.nn(core1.getTufoByProp('inet:fqdn', 'ninja')) + def test_cortex_dict(self): core = s_cortex.openurl('ram://') core.addTufoForm('foo:bar', ptype='int') @@ -1884,22 +1948,6 @@ class CortexTest(SynTest): core.setConfOpt('enforce', 0) self.raises(BadPropValu, core.formTufoByProp, 'foo:bar', True) - def test_cortex_events(self): - with self.getRamCore() as core: - - tick = now() - - tufo0 = core.addTufoEvent('guidform', baz=10, foo='thing') - - tock = now() - - id0 = tufo0[0] - rows = core.getRowsById(id0) - - self.eq(len(rows), 4) - self.true(rows[0][-1] >= tick) - self.true(rows[0][-1] <= tock) - def test_cortex_splicefd(self): with self.getTestDir() as path: with genfile(path, 'savefile.mpk') as fd: @@ -2000,8 +2048,8 @@ class CortexTest(SynTest): tufo1 = core1.getTufoByProp('inet:fqdn', 'woot.com') self.nn(tufo1) - # tufo:formed rows are not sent with the splice and will be created by the target core - self.gt(tufo1[1]['tufo:formed'], tufo0[1]['tufo:formed']) + # node:created rows are not sent with the splice and will be created by the target core + self.gt(tufo1[1]['node:created'], tufo0[1]['node:created']) def test_cortex_xact_deadlock(self): N = 100 @@ -2232,12 +2280,6 @@ class CortexTest(SynTest): with s_cortex.openurl('sqlite:///%s' % (path,)) as core: self.false(core.isnew) - def test_cortex_notguidform(self): - - with self.getRamCore() as core: - - self.raises(NotGuidForm, core.addTufoEvents, 'inet:fqdn', [{}]) - def test_cortex_getbytag(self): with self.getRamCore() as core: @@ -2301,7 +2343,7 @@ class CortexTest(SynTest): node = core.formTufoByProp('foo:bar', 'I am a bar foo.') self.eq(node[1].get('tufo:form'), 'foo:bar') - self.gt(node[1].get('tufo:formed'), 1483228800000) + self.gt(node[1].get('node:created'), 1483228800000) self.eq(node[1].get('foo:bar'), 'I am a bar foo.') self.none(node[1].get('foo:bar:duck')) @@ -2351,7 +2393,7 @@ class CortexTest(SynTest): node = core.formTufoByProp('foo:bar', 'I am a bar foo.') self.eq(node[1].get('tufo:form'), 'foo:bar') - self.gt(node[1].get('tufo:formed'), 1483228800000) + self.gt(node[1].get('node:created'), 1483228800000) self.eq(node[1].get('foo:bar'), 'I am a bar foo.') self.none(node[1].get('foo:bar:duck')) @@ -2396,7 +2438,7 @@ class CortexTest(SynTest): node = core.formTufoByProp('foo:bar', 'I am a bar foo.') self.eq(node[1].get('tufo:form'), 'foo:bar') - self.gt(node[1].get('tufo:formed'), 1483228800000) + self.gt(node[1].get('node:created'), 1483228800000) self.eq(node[1].get('foo:bar'), 'I am a bar foo.') self.none(node[1].get('foo:bar:duck')) @@ -2518,14 +2560,14 @@ class CortexTest(SynTest): self.isinstance(actual[0], tuple) self.eq(len(actual[0]), 2) self.eq(actual[0][1]['tufo:form'], 'inet:fqdn') - self.gt(actual[0][1]['tufo:formed'], 1483228800000) + self.gt(actual[0][1]['node:created'], 1483228800000) self.eq(actual[0][1]['inet:fqdn'], 'vertex.link') self.eq(actual[0][1]['inet:fqdn:zone'], 1) self.isinstance(actual[1], tuple) self.eq(actual[1][0], None) self.eq(actual[1][1]['tufo:form'], 'syn:err') - # NOTE: ephemeral data does not get tufo:formed + # NOTE: ephemeral data does not get node:created self.eq(actual[1][1]['syn:err'], 'BadTypeValu') for s in ['BadTypeValu', 'name=', 'inet:url', 'valu=', 'bad']: self.isin(s, actual[1][1]['syn:err:errmsg']) @@ -2533,7 +2575,7 @@ class CortexTest(SynTest): self.isinstance(actual[2], tuple) self.eq(actual[2][0], None) self.eq(actual[2][1]['tufo:form'], 'syn:err') - # NOTE: ephemeral data does not get tufo:formed + # NOTE: ephemeral data does not get node:created self.eq(actual[2][1]['syn:err'], 'NoSuchForm') for s in ['NoSuchForm', 'name=', 'bad']: self.isin(s, actual[2][1]['syn:err:errmsg']) @@ -2707,7 +2749,7 @@ class CortexTest(SynTest): 'strform:haha': 1234, 'strform:foo': 'sup'})) form, valu = s_tufo.ndef(t1) - self.gt(t1[1]['tufo:formed'], 1483228800000) + self.gt(t1[1]['node:created'], 1483228800000) self.eq(form, 'strform') self.eq(valu, 'oh hai') self.eq(t1[1].get('strform:foo'), 'sup') @@ -2756,7 +2798,7 @@ class StorageTest(SynTest): tick = s_common.now() rows.append(('1234', 'foo:bar:baz', 'yes', tick)) rows.append(('1234', 'tufo:form', 'foo:bar', tick)) - rows.append(('1234', 'tufo:formed', 1483228800000, tick)) + rows.append(('1234', 'node:created', 1483228800000, tick)) store.addRows(rows) # Retrieve the node via the Cortex interface @@ -2764,7 +2806,7 @@ class StorageTest(SynTest): node = core.getTufoByIden('1234') self.nn(node) self.eq(node[1].get('tufo:form'), 'foo:bar') - self.eq(node[1].get('tufo:formed'), 1483228800000) + self.eq(node[1].get('node:created'), 1483228800000) self.eq(node[1].get('foo:bar:baz'), 'yes') def test_storage_row_manipulation(self): @@ -2780,7 +2822,7 @@ class StorageTest(SynTest): tick = s_common.now() rows.append(('1234', 'foo:bar:baz', 'yes', tick)) rows.append(('1234', 'tufo:form', 'foo:bar', tick)) - rows.append(('1234', 'tufo:formed', 1483228800000, tick)) + rows.append(('1234', 'node:created', 1483228800000, tick)) store.addRows(rows) # Retrieve the node via the Cortex interface @@ -2788,7 +2830,7 @@ class StorageTest(SynTest): node = core.getTufoByIden('1234') self.nn(node) self.eq(node[1].get('tufo:form'), 'foo:bar') - self.eq(node[1].get('tufo:formed'), 1483228800000) + self.eq(node[1].get('node:created'), 1483228800000) self.eq(node[1].get('foo:bar:baz'), 'yes') def test_storage_handler_misses(self): diff --git a/synapse/tests/test_model_dns.py b/synapse/tests/test_model_dns.py index 6317f3f0c..7dfdc1bb2 100644 --- a/synapse/tests/test_model_dns.py +++ b/synapse/tests/test_model_dns.py @@ -43,25 +43,25 @@ class DnsModelTest(SynTest): tick = now() - t0 = core.addTufoEvent('inet:dns:look', a='WOOT.com/1.002.3.4', time=tick) + t0 = core.formTufoByProp('inet:dns:look', '*', a='WOOT.com/1.002.3.4', time=tick) self.eq(t0[1].get('inet:dns:look:time'), tick) self.eq(t0[1].get('inet:dns:look:a'), 'woot.com/1.2.3.4') self.eq(t0[1].get('inet:dns:look:a:fqdn'), 'woot.com') self.eq(t0[1].get('inet:dns:look:a:ipv4'), 0x01020304) - t0 = core.addTufoEvent('inet:dns:look', ns='WOOT.com/ns.yermom.com', time=tick) + t0 = core.formTufoByProp('inet:dns:look', '*', ns='WOOT.com/ns.yermom.com', time=tick) self.eq(t0[1].get('inet:dns:look:time'), tick) self.eq(t0[1].get('inet:dns:look:ns'), 'woot.com/ns.yermom.com') self.eq(t0[1].get('inet:dns:look:ns:ns'), 'ns.yermom.com') self.eq(t0[1].get('inet:dns:look:ns:zone'), 'woot.com') - t0 = core.addTufoEvent('inet:dns:look', rev='1.2.3.4/WOOT.com', time=tick) + t0 = core.formTufoByProp('inet:dns:look', '*', rev='1.2.3.4/WOOT.com', time=tick) self.eq(t0[1].get('inet:dns:look:time'), tick) self.eq(t0[1].get('inet:dns:look:rev'), '1.2.3.4/woot.com') self.eq(t0[1].get('inet:dns:look:rev:fqdn'), 'woot.com') self.eq(t0[1].get('inet:dns:look:rev:ipv4'), 0x01020304) - t0 = core.addTufoEvent('inet:dns:look', aaaa='WOOT.com/FF::56', time=tick) + t0 = core.formTufoByProp('inet:dns:look', '*', aaaa='WOOT.com/FF::56', time=tick) self.eq(t0[1].get('inet:dns:look:time'), tick) self.eq(t0[1].get('inet:dns:look:aaaa'), 'woot.com/ff::56') self.eq(t0[1].get('inet:dns:look:aaaa:fqdn'), 'woot.com') diff --git a/synapse/tests/test_model_files.py b/synapse/tests/test_model_files.py index 534b8ba7f..aa5b025ca 100644 --- a/synapse/tests/test_model_files.py +++ b/synapse/tests/test_model_files.py @@ -58,6 +58,45 @@ class FileModelTest(SynTest): self.eq(n2def[1], stable_guid) self.eq(n1[0], n2[0]) + def test_model_filepath_complex(self): + with self.getRamCore() as core: + + node = core.formTufoByProp('file:path', '/Foo/Bar/Baz.exe') + + self.nn(node) + self.eq(node[1].get('file:path:dir'), '/foo/bar') + self.eq(node[1].get('file:path:ext'), 'exe') + self.eq(node[1].get('file:path:base'), 'baz.exe') + + node = core.getTufoByProp('file:path', '/foo') + + self.nn(node) + self.none(node[1].get('file:path:ext')) + + self.eq(node[1].get('file:path:dir'), '') + self.eq(node[1].get('file:path:base'), 'foo') + + node = core.formTufoByProp('file:path', r'c:\Windows\system32\Kernel32.dll') + + self.nn(node) + self.eq(node[1].get('file:path:dir'), 'c:/windows/system32') + self.eq(node[1].get('file:path:ext'), 'dll') + self.eq(node[1].get('file:path:base'), 'kernel32.dll') + + self.nn(core.getTufoByProp('file:base', 'kernel32.dll')) + + node = core.getTufoByProp('file:path', 'c:') + + self.nn(node) + self.none(node[1].get('file:path:ext')) + self.eq(node[1].get('file:path:dir'), '') + self.eq(node[1].get('file:path:base'), 'c:') + + node = core.formTufoByProp('file:path', r'/foo////bar/.././baz.json') + + self.nn(node) + self.eq(node[1].get('file:path'), '/foo/baz.json') + def test_filepath(self): with self.getRamCore() as core: diff --git a/synapse/tests/test_model_inet.py b/synapse/tests/test_model_inet.py index a00a97985..f40ca950f 100644 --- a/synapse/tests/test_model_inet.py +++ b/synapse/tests/test_model_inet.py @@ -594,6 +594,32 @@ class InetModelTest(SynTest): # We require the account to be present self.raises(PropNotFound, core.formTufoByProp, 'inet:web:chprofile', '*') + def test_model_inet_postref_postmissingprops(self): + with self.getRamCore() as core: + + postref_tufo = core.formTufoByProp('inet:web:postref', (('vertex.link/user', 'mypost 0.0.0.0'), ('inet:ipv4', 0))) + self.nn(core.getTufoByProp('inet:web:post', ('vertex.link/user', 'mypost 0.0.0.0'))) + + self.eq(postref_tufo[1]['tufo:form'], 'inet:web:postref') + self.eq(postref_tufo[1]['inet:web:postref'], '804ec63392f4ea031bb3fd004dee209d') + self.eq(postref_tufo[1]['inet:web:postref:post'], '68bc4607f0518963165536921d6e86fa') + self.eq(postref_tufo[1]['inet:web:postref:xref'], 'inet:ipv4=0.0.0.0') + self.eq(postref_tufo[1]['inet:web:postref:xref:prop'], 'inet:ipv4') + self.eq(postref_tufo[1]['inet:web:postref:xref:intval'], 0) + + post_tufo = core.formTufoByProp('inet:web:post', ('vertex.link/user', 'mypost 0.0.0.0')) + # Ensure we got the deconflicted node that was already made, not a new node + self.notin('.new', post_tufo[1]) + self.eq(post_tufo[1]['inet:web:post'], postref_tufo[1]['inet:web:postref:post']) + # Ensure that subs on the autoadd node are formed properly + self.eq(post_tufo[1].get('inet:web:post:acct'), 'vertex.link/user') + self.eq(post_tufo[1].get('inet:web:post:text'), 'mypost 0.0.0.0') + # Ensure multiple subs were made into nodes + self.nn(core.getTufoByProp('inet:web:acct', 'vertex.link/user')) + self.nn(core.getTufoByProp('inet:user', 'user')) + self.nn(core.getTufoByProp('inet:fqdn', 'vertex.link')) + self.nn(core.getTufoByProp('inet:fqdn', 'link')) + def test_model_inet_201706121318(self): iden0 = guid() diff --git a/synapse/tests/test_model_infotech.py b/synapse/tests/test_model_infotech.py index 9175efb92..2786adbe1 100644 --- a/synapse/tests/test_model_infotech.py +++ b/synapse/tests/test_model_infotech.py @@ -59,45 +59,6 @@ class InfoTechTest(SynTest): node = core.getTufoByProp('it:hostname', 'hehehaha') self.nn(node) - def test_model_infotech_filepath(self): - with self.getRamCore() as core: - - node = core.formTufoByProp('file:path', '/Foo/Bar/Baz.exe') - - self.nn(node) - self.eq(node[1].get('file:path:dir'), '/foo/bar') - self.eq(node[1].get('file:path:ext'), 'exe') - self.eq(node[1].get('file:path:base'), 'baz.exe') - - node = core.getTufoByProp('file:path', '/foo') - - self.nn(node) - self.none(node[1].get('file:path:ext')) - - self.eq(node[1].get('file:path:dir'), '') - self.eq(node[1].get('file:path:base'), 'foo') - - node = core.formTufoByProp('file:path', r'c:\Windows\system32\Kernel32.dll') - - self.nn(node) - self.eq(node[1].get('file:path:dir'), 'c:/windows/system32') - self.eq(node[1].get('file:path:ext'), 'dll') - self.eq(node[1].get('file:path:base'), 'kernel32.dll') - - self.nn(core.getTufoByProp('file:base', 'kernel32.dll')) - - node = core.getTufoByProp('file:path', 'c:') - - self.nn(node) - self.none(node[1].get('file:path:ext')) - self.eq(node[1].get('file:path:dir'), '') - self.eq(node[1].get('file:path:base'), 'c:') - - node = core.formTufoByProp('file:path', r'/foo////bar/.././baz.json') - - self.nn(node) - self.eq(node[1].get('file:path'), '/foo/baz.json') - def test_model_infotech_itdev(self): with self.getRamCore() as core: diff --git a/synapse/tests/test_model_syn.py b/synapse/tests/test_model_syn.py index ae697374c..af8307a84 100644 --- a/synapse/tests/test_model_syn.py +++ b/synapse/tests/test_model_syn.py @@ -119,7 +119,7 @@ class SynModelTest(SynTest): data = {} old_formed = 12345 iden0, iden1 = guid(), guid() - tick = now() - 10000 # putting tick in the past to show that preexisting tufo:formed rows will have their stamps removed + tick = now() - 10000 # putting tick in the past to show that preexisting node:created rows will have their stamps removed rows = [ (iden0, 'inet:ipv4:type', '??', tick), (iden0, 'inet:ipv4', 16909060, tick), @@ -131,7 +131,7 @@ class SynModelTest(SynTest): (iden1, 'file:bytes:mime', '??', tick), (iden1, 'file:bytes:md5', 'd41d8cd98f00b204e9800998ecf8427e', tick), (iden1, 'tufo:form', 'file:bytes', tick), - (iden1, 'tufo:formed', tick, tick), # NOTE: this row should not exist pre-migration + (iden1, 'node:created', tick, tick), # NOTE: this row should not exist pre-migration ] with s_cortex.openstore('ram:///') as stor: @@ -147,28 +147,28 @@ class SynModelTest(SynTest): with s_cortex.fromstore(stor) as core: # 1 file:bytes, 1 inet:ipv4 - self.ge(len(core.eval('tufo:formed')), 3) + self.ge(len(core.eval('node:created')), 3) - tufos = core.eval('tufo:formed +tufo:form=inet:ipv4') + tufos = core.eval('node:created +tufo:form=inet:ipv4') self.eq(len(tufos), 1) iden, props = tufos[0] self.eq(props['tufo:form'], 'inet:ipv4') - self.eq(props['tufo:formed'], tick) + self.eq(props['node:created'], tick) self.eq(props['inet:ipv4'], 16909060) self.eq(props['inet:ipv4:asn'], -1) self.eq(props['inet:ipv4:cc'], '??') - rows = core.getRowsByIdProp(iden, 'tufo:formed') + rows = core.getRowsByIdProp(iden, 'node:created') _, _, valu, stamp = rows[0] - self.gt(stamp, valu) # tufo:formed row's stamp will be higher than its valu + self.gt(stamp, valu) # node:created row's stamp will be higher than its valu - tufos = core.eval('tufo:formed +tufo:form=file:bytes') + tufos = core.eval('node:created +tufo:form=file:bytes') self.eq(len(tufos), 1) props = tufos[0][1] self.eq(props['tufo:form'], 'file:bytes') - self.eq(props['tufo:formed'], tick) + self.eq(props['node:created'], tick) self.eq(props['file:bytes'], 'd41d8cd98f00b204e9800998ecf8427e') self.eq(props['file:bytes:mime'], '??') self.eq(props['file:bytes:md5'], 'd41d8cd98f00b204e9800998ecf8427e') - rows = core.getRowsByIdProp(iden, 'tufo:formed') + rows = core.getRowsByIdProp(iden, 'node:created') _, _, valu, stamp = rows[0] - self.gt(stamp, valu) # tufo:formed row's stamp will be higher than its valu + self.gt(stamp, valu) # node:created row's stamp will be higher than its valu
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 5 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 cffi==1.15.1 coverage==6.2 cryptography==40.0.2 execnet==1.9.0 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work lmdb==1.6.2 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work msgpack-python==0.5.6 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pycparser==2.21 pyOpenSSL==23.2.0 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-cov==4.0.0 pytest-xdist==3.0.2 -e git+https://github.com/vertexproject/synapse.git@ea418d756e773ad480b93d4de31c094954d53d48#egg=synapse toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 tornado==6.1 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work xxhash==3.2.0 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: synapse channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - cffi==1.15.1 - coverage==6.2 - cryptography==40.0.2 - execnet==1.9.0 - lmdb==1.6.2 - msgpack-python==0.5.6 - pycparser==2.21 - pyopenssl==23.2.0 - pytest-cov==4.0.0 - pytest-xdist==3.0.2 - tomli==1.2.3 - tornado==6.1 - xxhash==3.2.0 prefix: /opt/conda/envs/synapse
[ "synapse/tests/test_cortex.py::CortexBaseTest::test_cortex_lmdb", "synapse/tests/test_cortex.py::CortexBaseTest::test_cortex_ram", "synapse/tests/test_cortex.py::CortexBaseTest::test_cortex_sqlite3", "synapse/tests/test_cortex.py::CortexTest::test_cortex_formtufobytufo", "synapse/tests/test_cortex.py::CortexTest::test_cortex_formtufosbyprops", "synapse/tests/test_cortex.py::CortexTest::test_cortex_module_datamodel_migration_persistent", "synapse/tests/test_cortex.py::CortexTest::test_cortex_splicepump", "synapse/tests/test_cortex.py::CortexTest::test_cortex_splices", "synapse/tests/test_cortex.py::CortexTest::test_cortex_tags", "synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_setprops", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_postref_postmissingprops", "synapse/tests/test_model_syn.py::SynModelTest::test_model_syn_201710191144" ]
[]
[ "synapse/tests/test_axon.py::AxonTest::test_axon_basics", "synapse/tests/test_axon.py::AxonTest::test_axon_bytesize", "synapse/tests/test_axon.py::AxonTest::test_axon_eatbytes", "synapse/tests/test_axon.py::AxonTest::test_axon_restrictions", "synapse/tests/test_axon.py::AxonTest::test_axon_telepath", "synapse/tests/test_axon.py::AxonHostTest::test_axon_autorun", "synapse/tests/test_axon.py::AxonHostTest::test_axon_clone_large", "synapse/tests/test_axon.py::AxonHostTest::test_axon_host", "synapse/tests/test_axon.py::AxonHostTest::test_axon_host_clone", "synapse/tests/test_axon.py::AxonHostTest::test_axon_host_free_limit", "synapse/tests/test_axon.py::AxonHostTest::test_axon_host_maxsize_limit", "synapse/tests/test_axon.py::AxonHostTest::test_axon_host_spinbackup", "synapse/tests/test_axon.py::AxonClusterTest::test_axon_cluster", "synapse/tests/test_axon.py::AxonClusterTest::test_axon_cluster_cortex", "synapse/tests/test_axon.py::AxonFSTest::test_axon__fs_isdir", "synapse/tests/test_axon.py::AxonFSTest::test_axon__fs_isfile", "synapse/tests/test_axon.py::AxonFSTest::test_axon_fs_create", "synapse/tests/test_axon.py::AxonFSTest::test_axon_fs_getattr", "synapse/tests/test_axon.py::AxonFSTest::test_axon_fs_getxattr", "synapse/tests/test_axon.py::AxonFSTest::test_axon_fs_mkdir", "synapse/tests/test_axon.py::AxonFSTest::test_axon_fs_read", "synapse/tests/test_axon.py::AxonFSTest::test_axon_fs_readdir", "synapse/tests/test_axon.py::AxonFSTest::test_axon_fs_rename", "synapse/tests/test_axon.py::AxonFSTest::test_axon_fs_rmdir", "synapse/tests/test_axon.py::AxonFSTest::test_axon_fs_truncate", "synapse/tests/test_axon.py::AxonFSTest::test_axon_fs_unlink", "synapse/tests/test_axon.py::AxonFSTest::test_axon_fs_utimens", "synapse/tests/test_axon.py::AxonFSTest::test_axon_get_renameprops", "synapse/tests/test_cortex.py::CortexTest::test_cortex_addmodel", "synapse/tests/test_cortex.py::CortexTest::test_cortex_auth", "synapse/tests/test_cortex.py::CortexTest::test_cortex_by_type", "synapse/tests/test_cortex.py::CortexTest::test_cortex_bytype", "synapse/tests/test_cortex.py::CortexTest::test_cortex_caching", "synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_add_tufo", "synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_atlimit", "synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_del_tufo", "synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_del_tufo_prop", "synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_disable", "synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_new", "synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_oneref", "synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_set", "synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_tags", "synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_under_limit", "synapse/tests/test_cortex.py::CortexTest::test_cortex_choptag", "synapse/tests/test_cortex.py::CortexTest::test_cortex_comp", "synapse/tests/test_cortex.py::CortexTest::test_cortex_datamodel_runt_consistency", "synapse/tests/test_cortex.py::CortexTest::test_cortex_dict", "synapse/tests/test_cortex.py::CortexTest::test_cortex_enforce", "synapse/tests/test_cortex.py::CortexTest::test_cortex_fire_set", "synapse/tests/test_cortex.py::CortexTest::test_cortex_getbytag", "synapse/tests/test_cortex.py::CortexTest::test_cortex_ingest", "synapse/tests/test_cortex.py::CortexTest::test_cortex_isnew", "synapse/tests/test_cortex.py::CortexTest::test_cortex_lift_by_cidr", "synapse/tests/test_cortex.py::CortexTest::test_cortex_local", "synapse/tests/test_cortex.py::CortexTest::test_cortex_minmax", "synapse/tests/test_cortex.py::CortexTest::test_cortex_minmax_epoch", "synapse/tests/test_cortex.py::CortexTest::test_cortex_modlrevs", "synapse/tests/test_cortex.py::CortexTest::test_cortex_modlvers", "synapse/tests/test_cortex.py::CortexTest::test_cortex_module", "synapse/tests/test_cortex.py::CortexTest::test_cortex_norm_fail", "synapse/tests/test_cortex.py::CortexTest::test_cortex_ramhost", "synapse/tests/test_cortex.py::CortexTest::test_cortex_reqprops", "synapse/tests/test_cortex.py::CortexTest::test_cortex_reqstor", "synapse/tests/test_cortex.py::CortexTest::test_cortex_runts", "synapse/tests/test_cortex.py::CortexTest::test_cortex_savefd", "synapse/tests/test_cortex.py::CortexTest::test_cortex_seed", "synapse/tests/test_cortex.py::CortexTest::test_cortex_seq", "synapse/tests/test_cortex.py::CortexTest::test_cortex_splice_propdel", "synapse/tests/test_cortex.py::CortexTest::test_cortex_splicefd", "synapse/tests/test_cortex.py::CortexTest::test_cortex_splices_errs", "synapse/tests/test_cortex.py::CortexTest::test_cortex_stats", "synapse/tests/test_cortex.py::CortexTest::test_cortex_tag_ival", "synapse/tests/test_cortex.py::CortexTest::test_cortex_tagform", "synapse/tests/test_cortex.py::CortexTest::test_cortex_trigger", "synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_del", "synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_list", "synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_pop", "synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_setprop", "synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_tag", "synapse/tests/test_cortex.py::CortexTest::test_cortex_xact_deadlock", "synapse/tests/test_cortex.py::StorageTest::test_nonexist_ctor", "synapse/tests/test_cortex.py::StorageTest::test_storage_confopts", "synapse/tests/test_cortex.py::StorageTest::test_storage_handler_defaults", "synapse/tests/test_cortex.py::StorageTest::test_storage_handler_misses", "synapse/tests/test_cortex.py::StorageTest::test_storage_row_manipulation", "synapse/tests/test_cortex.py::StorageTest::test_storage_rowmanipulation", "synapse/tests/test_cortex.py::StorageTest::test_storage_xact_spliced", "synapse/tests/test_model_dns.py::DnsModelTest::test_model_dns_a", "synapse/tests/test_model_dns.py::DnsModelTest::test_model_dns_aaaa", "synapse/tests/test_model_dns.py::DnsModelTest::test_model_dns_look", "synapse/tests/test_model_dns.py::DnsModelTest::test_model_dns_ns", "synapse/tests/test_model_dns.py::DnsModelTest::test_model_dns_rev", "synapse/tests/test_model_files.py::FileModelTest::test_filebase", "synapse/tests/test_model_files.py::FileModelTest::test_filepath", "synapse/tests/test_model_files.py::FileModelTest::test_model_file_bytes", "synapse/tests/test_model_files.py::FileModelTest::test_model_file_bytes_axon", "synapse/tests/test_model_files.py::FileModelTest::test_model_file_seeds", "synapse/tests/test_model_files.py::FileModelTest::test_model_file_seeds_capitalization", "synapse/tests/test_model_files.py::FileModelTest::test_model_filepath_complex", "synapse/tests/test_model_files.py::FileModelTest::test_model_files_imgof", "synapse/tests/test_model_files.py::FileModelTest::test_model_files_txtref", "synapse/tests/test_model_inet.py::InetModelTest::test_model_fqdn_punycode", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_201706121318", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_201706201837", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_201709181501", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_201709271521", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_201710111553", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_asnet4", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_cast_defang", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_chprofile", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_cidr4", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_email", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_fqdn", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_fqdn_set_sfx", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_fqdn_unicode", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_iface", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_ipv4", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_ipv4_raise", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_ipv6", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_mac", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_passwd", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_postref", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_srv4_types", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_srv6_types", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_url_fields", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_urlfile", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_web_acct", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_web_action", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_web_actref", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_web_follows", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_web_logon", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_web_memb", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_web_mesg", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_web_post", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_whois_recns", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_whoisemail", "synapse/tests/test_model_inet.py::InetModelTest::test_model_inet_wifi", "synapse/tests/test_model_inet.py::InetModelTest::test_model_whois_contact", "synapse/tests/test_model_infotech.py::InfoTechTest::test_model_infotech_av", "synapse/tests/test_model_infotech.py::InfoTechTest::test_model_infotech_brutecast", "synapse/tests/test_model_infotech.py::InfoTechTest::test_model_infotech_cve", "synapse/tests/test_model_infotech.py::InfoTechTest::test_model_infotech_host", "synapse/tests/test_model_infotech.py::InfoTechTest::test_model_infotech_hostexec", "synapse/tests/test_model_infotech.py::InfoTechTest::test_model_infotech_hostname", "synapse/tests/test_model_infotech.py::InfoTechTest::test_model_infotech_itdev", "synapse/tests/test_model_infotech.py::InfoTechTest::test_model_infotech_semvertype", "synapse/tests/test_model_infotech.py::InfoTechTest::test_model_infotech_software", "synapse/tests/test_model_syn.py::SynModelTest::test_model_syn_201709051630", "synapse/tests/test_model_syn.py::SynModelTest::test_model_syn_201709191412" ]
[]
Apache License 2.0
1,786
[ "synapse/cores/common.py", "synapse/lib/types.py", "synapse/models/syn.py", "synapse/models/inet.py", "synapse/exc.py" ]
[ "synapse/cores/common.py", "synapse/lib/types.py", "synapse/models/syn.py", "synapse/models/inet.py", "synapse/exc.py" ]
ARMmbed__greentea-250
b8bcffbb7aaced094f252a4ddfe930e8237fb484
2017-10-20 19:13:58
68508c5f4d7cf0635c75399d0ff7cfa896fdf2cc
diff --git a/mbed_greentea/mbed_target_info.py b/mbed_greentea/mbed_target_info.py index 356676b..c825bcf 100644 --- a/mbed_greentea/mbed_target_info.py +++ b/mbed_greentea/mbed_target_info.py @@ -20,6 +20,17 @@ Author: Przemyslaw Wirkus <[email protected]> import os import re import json +from os import walk +try: + from contextlib import suppress +except ImportError: + from contextlib import contextmanager + @contextmanager + def suppress(*excs): + try: + yield + except excs: + pass from mbed_greentea.mbed_common_api import run_cli_process from mbed_greentea.mbed_greentea_log import gt_logger @@ -381,82 +392,65 @@ def get_platform_property(platform, property): :return: property value, None if property not found """ - # First load from targets.json if available - value_from_targets_json = get_platform_property_from_targets(platform, property) - if value_from_targets_json: - return value_from_targets_json - - # Check if info is available for a specific platform - if platform in TARGET_INFO_MAPPING: - if property in TARGET_INFO_MAPPING[platform]['properties']: - return TARGET_INFO_MAPPING[platform]['properties'][property] + default = _get_platform_property_from_default(property) + from_targets_json = _get_platform_property_from_targets( + platform, property, default) + if from_targets_json: + return from_targets_json + from_info_mapping = _get_platform_property_from_info_mapping(platform, property) + if from_info_mapping: + return from_info_mapping + return default + +def _get_platform_property_from_default(property): + with suppress(KeyError): + return TARGET_INFO_MAPPING['default'][property] + +def _get_platform_property_from_info_mapping(platform, property): + with suppress(KeyError): + return TARGET_INFO_MAPPING[platform]['properties'][property] + +def _platform_property_from_targets_json(targets, platform, property, default): + """! Get a platforms's property from the target data structure in + targets.json. Takes into account target inheritance. + @param targets Data structure parsed from targets.json + @param platform Name of the platform + @param property Name of the property + @param default the fallback value if none is found, but the target exists + @return property value, None if property not found - # Check if default data is available - if 'default' in TARGET_INFO_MAPPING: - if property in TARGET_INFO_MAPPING['default']: - return TARGET_INFO_MAPPING['default'][property] - - return None + """ + with suppress(KeyError): + return targets[platform][property] + with suppress(KeyError): + for inherited_target in targets[platform]['inherits']: + result = _platform_property_from_targets_json(targets, inherited_target, property, None) + if result: + return result + if platform in targets: + return default + +IGNORED_DIRS = ['.build', 'BUILD', 'tools'] + +def _find_targets_json(path): + for root, dirs, files in walk(path, followlinks=True): + for ignored_dir in IGNORED_DIRS: + if ignored_dir in dirs: + dirs.remove(ignored_dir) + if 'targets.json' in files: + yield os.path.join(root, 'targets.json') -def get_platform_property_from_targets(platform, property): +def _get_platform_property_from_targets(platform, property, default): """ Load properties from targets.json file somewhere in the project structure :param platform: :return: property value, None if property not found """ - - def get_platform_property_from_targets(targets, platform, property): - """! Get a platforms's property from the target data structure in - targets.json. Takes into account target inheritance. - @param targets Data structure parsed from targets.json - @param platform Name of the platform - @param property Name of the property - @return property value, None if property not found - - """ - - result = None - if platform in targets: - if property in targets[platform]: - result = targets[platform][property] - elif 'inherits' in targets[platform]: - result = None - for inherited_target in targets[platform]['inherits']: - result = get_platform_property_from_targets(targets, inherited_target, property) - - # Stop searching after finding the first value for the property - if result: - break - - return result - - result = None - targets_json_path = [] - for root, dirs, files in os.walk(os.getcwd(), followlinks=True): - ignored_dirs = ['.build', 'BUILD', 'tools'] - - for ignored_dir in ignored_dirs: - if ignored_dir in dirs: - dirs.remove(ignored_dir) - - if 'targets.json' in files: - targets_json_path.append(os.path.join(root, 'targets.json')) - - if not targets_json_path: - gt_logger.gt_log_warn("No targets.json files found, using default target properties") - - for targets_path in targets_json_path: - try: + for targets_path in _find_targets_json(os.getcwd()): + with suppress(IOError, ValueError): with open(targets_path, 'r') as f: targets = json.load(f) - - # Load property from targets.json - result = get_platform_property_from_targets(targets, platform, property) - - # If a valid property was found, stop looking + result = _platform_property_from_targets_json(targets, platform, property, default) if result: - break - except Exception: - continue - return result + return result diff --git a/setup.py b/setup.py index e98e109..0734dfe 100644 --- a/setup.py +++ b/setup.py @@ -50,13 +50,15 @@ setup(name='mbed-greentea', license=LICENSE, test_suite = 'test', entry_points={ - "console_scripts": ["mbedgt=mbed_greentea.mbed_greentea_cli:main",], + "console_scripts": ["mbedgt=mbed_greentea.mbed_greentea_cli:main",], }, install_requires=["PrettyTable>=0.7.2", - "PySerial>=3.0", - "mbed-host-tests>=1.2.0", - "mbed-ls>=1.2.15", - "junit-xml", - "lockfile", - "mock", - "colorama>=0.3,<0.4"]) + "PySerial>=3.0", + "mbed-host-tests>=1.2.0", + "mbed-ls>=1.2.15", + "junit-xml", + "lockfile", + "mock", + "six", + "colorama>=0.3,<0.4"]) +
Target property priority incorrect Currently we have priority as follows: ``` internal yotta blob > targets.json > tool default ``` This is a bug. Instead the priority should be: ``` targets.json /w default > internal yotta blob > tool delaut ``` This implies a few test cases: In targets.json | In yotta blob | property used | Currently Works ---------------------- | ------------- | ---------------- | --------------- Yes, with property | No | `targets.json` | Yes Yes, without property| No | default | Yes Yes, with property | Yes | `targets.json` | No Yes, without property | Yes | default | No No | No | default | Yes No | Yes | yotta blob | Yes @bridadan Is this the issue masked by #248?
ARMmbed/greentea
diff --git a/test/mbed_gt_target_info.py b/test/mbed_gt_target_info.py index e3f0a6a..96cd1db 100644 --- a/test/mbed_gt_target_info.py +++ b/test/mbed_gt_target_info.py @@ -21,6 +21,8 @@ import shutil import tempfile import unittest +from six import StringIO + from mock import patch from mbed_greentea import mbed_target_info @@ -338,8 +340,168 @@ mbed-gcc 1.1.0 result = mbed_target_info.add_target_info_mapping("null") - def test_get_platform_property_from_targets(self): - result = mbed_target_info.get_platform_property_from_targets({}, {}) + def test_get_platform_property_from_targets_no_json(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find: + _find.return_value = iter([]) + result = mbed_target_info._get_platform_property_from_targets("not_a_platform", "not_a_property", "default") + self.assertIsNone(result) + + def test_get_platform_property_from_targets_no_file(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.side_effect = IOError + result = mbed_target_info._get_platform_property_from_targets("not_a_platform", "not_a_property", "default") + self.assertIsNone(result) + + def test_get_platform_property_from_targets_invalid_json(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.return_value.__enter__.return_value = StringIO("{") + result = mbed_target_info._get_platform_property_from_targets("not_a_platform", "not_a_property", "default") + self.assertIsNone(result) + + def test_get_platform_property_from_targets_empty_json(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.return_value.__enter__.return_value = StringIO("{}") + result = mbed_target_info._get_platform_property_from_targets("not_a_platform", "not_a_property", "default") + self.assertIsNone(result) + + def test_get_platform_property_from_targets_no_value(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.return_value.__enter__.return_value = StringIO("{\"K64F\": {}}") + result = mbed_target_info._get_platform_property_from_targets("K64F", "not_a_property", "default") + self.assertEqual(result, "default") + + def test_get_platform_property_from_targets_in_json(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.return_value.__enter__.return_value = StringIO("{\"K64F\": {\"copy_method\": \"cp\"}}") + result = mbed_target_info._get_platform_property_from_targets("K64F", "copy_method", "default") + self.assertEqual("cp", result) + + def test_find_targets_json(self): + with patch("mbed_greentea.mbed_target_info.walk") as _walk: + _walk.return_value = iter([("", ["foo"], []), ("foo", [], ["targets.json"])]) + result = list(mbed_target_info._find_targets_json("bogus_path")) + self.assertEqual(result, ["foo/targets.json"]) + + def test_find_targets_json_ignored(self): + with patch("mbed_greentea.mbed_target_info.walk") as _walk: + walk_result =[("", [".build"], [])] + _walk.return_value = iter(walk_result) + result = list(mbed_target_info._find_targets_json("bogus_path")) + self.assertEqual(result, []) + self.assertEqual(walk_result, [("", [], [])]) + + def test_platform_property_from_targets_json_empty(self): + result = mbed_target_info._platform_property_from_targets_json( + {}, "not_a_target", "not_a_property", "default" + ) + self.assertIsNone(result) + + def test_platform_property_from_targets_json_base_target(self): + result = mbed_target_info._platform_property_from_targets_json( + {"K64F": {"copy_method": "cp"}}, "K64F", "copy_method", "default" + ) + self.assertEqual(result, "cp") + + def test_platform_property_from_targets_json_inherits(self): + result = mbed_target_info._platform_property_from_targets_json( + {"K64F": {"inherits": ["Target"]}, "Target": {"copy_method": "cp"}}, + "K64F", "copy_method", "default" + ) + self.assertEqual(result, "cp") + + def test_platform_property_from_default_missing(self): + result = mbed_target_info._get_platform_property_from_default("not_a_property") + self.assertIsNone(result) + + def test_platform_property_from_default(self): + result = mbed_target_info._get_platform_property_from_default("copy_method") + self.assertEqual(result, "default") + + def test_platform_property_from_info_mapping_bad_platform(self): + result = mbed_target_info._get_platform_property_from_info_mapping("not_a_platform", "not_a_property") + self.assertIsNone(result) + + def test_platform_property_from_info_mapping_missing(self): + result = mbed_target_info._get_platform_property_from_info_mapping("K64F", "not_a_property") + self.assertIsNone(result) + + def test_platform_property_from_info_mapping(self): + result = mbed_target_info._get_platform_property_from_info_mapping("K64F", "copy_method") + self.assertEqual(result, "default") + + + # The following test cases are taken from this table: + # + # Num | In targets.json | In yotta blob | In Default | property used + # --- | --------------- | ------------- | ---------- | -------------- + # 1 | Yes | No | Yes |`targets.json` + # 2 | Yes | Yes | Yes |`targets.json` + # 3 | No | Yes | Yes | yotta blob + # 4 | No | No | Yes | default + # 5 | No | No | No | None + # 6 | Yes | No | No |`targets.json` + # 7 | Yes | Yes | No |`targets.json` + # 8 | No | Yes | No | yotta blob + def test_platform_property(self): + """Test that platform_property picks the property value preserving + the following priority relationship: + targets.json > yotta blob > default + """ + with patch("mbed_greentea.mbed_target_info._get_platform_property_from_targets") as _targets,\ + patch("mbed_greentea.mbed_target_info._get_platform_property_from_info_mapping") as _info_mapping,\ + patch("mbed_greentea.mbed_target_info._get_platform_property_from_default") as _default: + # 1 + _targets.return_value = "targets" + _info_mapping.return_value = None + _default.return_value = "default" + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "targets") + # 2 + _info_mapping.return_value = "yotta" + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "targets") + # 3 + _targets.return_value = None + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "yotta") + # 4 + _info_mapping.return_value = None + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "default") + # 5 + _default.return_value = None + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + None) + # 6 + _targets.return_value = "targets" + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "targets") + # 7 + _info_mapping.return_value = "yotta" + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "targets") + # 8 + _targets.return_value = None + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "yotta") def test_parse_yotta_json_for_build_name(self):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 2 }
1.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
appdirs==1.4.4 beautifulsoup4==4.13.3 certifi==2025.1.31 charset-normalizer==3.4.1 colorama==0.3.9 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work fasteners==0.19 future==1.0.0 idna==3.10 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work intelhex==2.3.0 junit-xml==1.9 lockfile==0.12.2 -e git+https://github.com/ARMmbed/greentea.git@b8bcffbb7aaced094f252a4ddfe930e8237fb484#egg=mbed_greentea mbed-host-tests==1.8.15 mbed-ls==1.8.15 mbed-os-tools==1.8.15 mock==5.2.0 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work prettytable==2.5.0 pyserial==3.5 pytest @ file:///croot/pytest_1738938843180/work requests==2.32.3 six==1.17.0 soupsieve==2.6 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions==4.13.0 urllib3==2.3.0 wcwidth==0.2.13
name: greentea channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - appdirs==1.4.4 - beautifulsoup4==4.13.3 - certifi==2025.1.31 - charset-normalizer==3.4.1 - colorama==0.3.9 - fasteners==0.19 - future==1.0.0 - idna==3.10 - intelhex==2.3.0 - junit-xml==1.9 - lockfile==0.12.2 - mbed-host-tests==1.8.15 - mbed-ls==1.8.15 - mbed-os-tools==1.8.15 - mock==5.2.0 - prettytable==2.5.0 - pyserial==3.5 - requests==2.32.3 - six==1.17.0 - soupsieve==2.6 - typing-extensions==4.13.0 - urllib3==2.3.0 - wcwidth==0.2.13 prefix: /opt/conda/envs/greentea
[ "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_find_targets_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_find_targets_json_ignored", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_empty_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_in_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_invalid_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_file", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_value", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_default", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_default_missing", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping_bad_platform", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping_missing", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_base_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_empty", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_inherits" ]
[]
[ "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_target_from_current_dir_ok", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_invalid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_invalid_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_valid", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_failed_open", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_invalid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_valid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_add_target_info_mapping", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_json_data", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_keywords", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_name", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_multiple", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_json_for_build_name", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style_text", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style_text_2", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_text", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_with_ssl_errors", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_fail", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_chars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_nl", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_nl_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_rcnl", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_rcnl_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_version", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_whitechars" ]
[]
Apache License 2.0
1,787
[ "mbed_greentea/mbed_target_info.py", "setup.py" ]
[ "mbed_greentea/mbed_target_info.py", "setup.py" ]
marshmallow-code__marshmallow-691
48e36fa35c8019c811b0281f7b358f11ddc55173
2017-10-21 00:45:27
8e217c8d6fefb7049ab3389f31a8d35824fa2d96
diff --git a/AUTHORS.rst b/AUTHORS.rst index 002b8bea..594a6d4a 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -90,3 +90,4 @@ Contributors (chronological) - Yoichi NAKAYAMA `@yoichi <https://github.com/yoichi>`_ - Bernhard M. Wiedemann `@bmwiedemann <https://github.com/bmwiedemann>`_ - Scott Werner `@scottwernervt <https://github.com/scottwernervt>`_ +- Leonardo Fedalto `@Fedalto <https://github.com/Fedalto>`_ diff --git a/marshmallow/fields.py b/marshmallow/fields.py index 1c3a5e5a..8a70091e 100755 --- a/marshmallow/fields.py +++ b/marshmallow/fields.py @@ -9,7 +9,6 @@ import numbers import uuid import warnings import decimal -from operator import attrgetter from marshmallow import validate, utils, class_registry from marshmallow.base import FieldABC, SchemaABC @@ -119,7 +118,7 @@ class Field(FieldABC): #: :exc:`marshmallow.ValidationError`. default_error_messages = { 'required': 'Missing data for required field.', - 'type': 'Invalid input type.', # used by Unmarshaller + 'type': 'Invalid input type.', # used by Unmarshaller 'null': 'Field may not be null.', 'validator_failed': 'Invalid value.' } @@ -667,7 +666,7 @@ class Number(Field): """Format the value or raise a :exc:`ValidationError` if an error occurs.""" try: return self._format_num(value) - except (TypeError, ValueError) as err: + except (TypeError, ValueError): self.fail('invalid') def serialize(self, attr, obj, accessor=None): @@ -882,7 +881,7 @@ class FormattedString(Field): try: data = utils.to_marshallable_type(obj) return self.src_str.format(**data) - except (TypeError, IndexError) as error: + except (TypeError, IndexError): self.fail('format') @@ -954,7 +953,7 @@ class DateTime(Field): if format_func: try: return format_func(value, localtime=self.localtime) - except (AttributeError, ValueError) as err: + except (AttributeError, ValueError): self.fail('format', input=value) else: return value.strftime(self.dateformat) @@ -1020,7 +1019,6 @@ class Time(Field): """Deserialize an ISO8601-formatted time to a :class:`datetime.time` object.""" if not value: # falsy values are invalid self.fail('invalid') - raise err try: return utils.from_iso_time(value) except (AttributeError, TypeError, ValueError): @@ -1063,7 +1061,8 @@ class TimeDelta(Field): seconds or microseconds. :param str precision: Influences how the integer is interpreted during - (de)serialization. Must be 'days', 'seconds' or 'microseconds'. + (de)serialization. Must be 'days', 'seconds', 'microseconds', + 'milliseconds', 'minutes', 'hours' or 'weeks'. :param str error: Error message stored upon validation failure. :param kwargs: The same keyword arguments that :class:`Field` receives. @@ -1075,6 +1074,10 @@ class TimeDelta(Field): DAYS = 'days' SECONDS = 'seconds' MICROSECONDS = 'microseconds' + MILLISECONDS = 'milliseconds' + MINUTES = 'minutes' + HOURS = 'hours' + WEEKS = 'weeks' default_error_messages = { 'invalid': 'Not a valid period of time.', @@ -1083,10 +1086,12 @@ class TimeDelta(Field): def __init__(self, precision='seconds', error=None, **kwargs): precision = precision.lower() - units = (self.DAYS, self.SECONDS, self.MICROSECONDS) + units = (self.DAYS, self.SECONDS, self.MICROSECONDS, self.MILLISECONDS, + self.MINUTES, self.HOURS, self.WEEKS) if precision not in units: - msg = 'The precision must be "{0}", "{1}" or "{2}".'.format(*units) + msg = 'The precision must be "{0}" or "{1}".'.format( + ', '.join(units[:-1]), units[-1]) raise ValueError(msg) self.precision = precision @@ -1096,15 +1101,8 @@ class TimeDelta(Field): if value is None: return None try: - days = value.days - if self.precision == self.DAYS: - return days - else: - seconds = days * 86400 + value.seconds - if self.precision == self.SECONDS: - return seconds - else: # microseconds - return seconds * 10**6 + value.microseconds # flake8: noqa + base_unit = dt.timedelta(**{self.precision: 1}) + return int(value.total_seconds() / base_unit.total_seconds()) except AttributeError: self.fail('format', input=value) @@ -1196,6 +1194,7 @@ class Email(ValidatedField, String): :param kwargs: The same keyword arguments that :class:`String` receives. """ default_error_messages = {'invalid': 'Not a valid email address.'} + def __init__(self, *args, **kwargs): String.__init__(self, *args, **kwargs) # Insert validation into self.validators so that multiple errors can be @@ -1305,7 +1304,6 @@ class Function(Field): return func(value) - class Constant(Field): """A field that (de)serializes to a preset constant. If you only want the constant added for serialization or deserialization, you should use
fields.TimeDelta precision Hi all, I'm working on a project that uses marshmallow and we're using a subclass of TimeDelta with `hour` precision. Would you be interested in a PR with support for other precisions?
marshmallow-code/marshmallow
diff --git a/tests/test_deserialization.py b/tests/test_deserialization.py index 9549ec55..43eb0b89 100644 --- a/tests/test_deserialization.py +++ b/tests/test_deserialization.py @@ -468,6 +468,34 @@ class TestFieldDeserialization: assert result.seconds == 12 assert result.microseconds == 0 + field = fields.TimeDelta(fields.TimeDelta.WEEKS) + result = field.deserialize(1) + assert isinstance(result, dt.timedelta) + assert result.days == 7 + assert result.seconds == 0 + assert result.microseconds == 0 + + field = fields.TimeDelta(fields.TimeDelta.HOURS) + result = field.deserialize(25) + assert isinstance(result, dt.timedelta) + assert result.days == 1 + assert result.seconds == 3600 + assert result.microseconds == 0 + + field = fields.TimeDelta(fields.TimeDelta.MINUTES) + result = field.deserialize(1441) + assert isinstance(result, dt.timedelta) + assert result.days == 1 + assert result.seconds == 60 + assert result.microseconds == 0 + + field = fields.TimeDelta(fields.TimeDelta.MILLISECONDS) + result = field.deserialize(123456) + assert isinstance(result, dt.timedelta) + assert result.days == 0 + assert result.seconds == 123 + assert result.microseconds == 456000 + @pytest.mark.parametrize('in_value', [ '', diff --git a/tests/test_serialization.py b/tests/test_serialization.py index b7ebb9a4..3f3c290d 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -594,6 +594,8 @@ class TestFieldSerialization: user.d3 = dt.timedelta(days=0, seconds=0, microseconds=86401000001) user.d4 = dt.timedelta(days=0, seconds=0, microseconds=0) user.d5 = dt.timedelta(days=-1, seconds=0, microseconds=0) + user.d6 = dt.timedelta(days=1, seconds=1, microseconds=1, + milliseconds=1, minutes=1, hours=1, weeks=1) field = fields.TimeDelta(fields.TimeDelta.DAYS) assert field.serialize('d1', user) == 1 @@ -601,6 +603,8 @@ class TestFieldSerialization: assert field.serialize('d1', user) == 86401 field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS) assert field.serialize('d1', user) == 86401000001 + field = fields.TimeDelta(fields.TimeDelta.HOURS) + assert field.serialize('d1', user) == 24 field = fields.TimeDelta(fields.TimeDelta.DAYS) assert field.serialize('d2', user) == 1 @@ -630,8 +634,28 @@ class TestFieldSerialization: field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS) assert field.serialize('d5', user) == -86400000000 - user.d6 = None - assert field.serialize('d6', user) is None + field = fields.TimeDelta(fields.TimeDelta.WEEKS) + assert field.serialize('d6', user) == 1 + field = fields.TimeDelta(fields.TimeDelta.DAYS) + assert field.serialize('d6', user) == 7 + 1 + field = fields.TimeDelta(fields.TimeDelta.HOURS) + assert field.serialize('d6', user) == 7 * 24 + 24 + 1 + field = fields.TimeDelta(fields.TimeDelta.MINUTES) + assert field.serialize('d6', user) == 7 * 24 * 60 + 24 * 60 + 60 + 1 + d6_seconds = (7 * 24 * 60 * 60 + # 1 week + 24 * 60 * 60 + # 1 day + 60 * 60 + # 1 hour + 60 + # 1 minute + 1) + field = fields.TimeDelta(fields.TimeDelta.SECONDS) + assert field.serialize('d6', user) == d6_seconds + field = fields.TimeDelta(fields.TimeDelta.MILLISECONDS) + assert field.serialize('d6', user) == d6_seconds * 1000 + 1 + field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS) + assert field.serialize('d6', user) == d6_seconds * 10**6 + 1000 + 1 + + user.d7 = None + assert field.serialize('d7', user) is None def test_datetime_list_field(self): obj = DateTimeList([dt.datetime.utcnow(), dt.datetime.now()])
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 2 }
3.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[reco]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": null, "python": "3.9", "reqs_path": [ "dev-requirements.txt", "docs/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster @ git+https://github.com/sloria/alabaster.git@667b1b676c6bf7226db057f098ec826d84d3ae40 babel==2.17.0 backports.tarfile==1.2.0 cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 colorama==0.4.6 coverage==7.8.0 cryptography==44.0.2 distlib==0.3.9 docutils==0.20.1 exceptiongroup==1.2.2 execnet==2.1.1 filelock==3.18.0 flake8==2.4.1 id==1.5.0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 invoke==2.2.0 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jeepney==0.9.0 Jinja2==3.1.6 keyring==25.6.0 markdown-it-py==3.0.0 MarkupSafe==3.0.2 -e git+https://github.com/marshmallow-code/marshmallow.git@48e36fa35c8019c811b0281f7b358f11ddc55173#egg=marshmallow mccabe==0.3.1 mdurl==0.1.2 more-itertools==10.6.0 nh3==0.2.21 packaging==24.2 pep8==1.7.1 platformdirs==4.3.7 pluggy==1.5.0 py==1.11.0 pycparser==2.22 pyflakes==0.8.1 Pygments==2.19.1 pyproject-api==1.9.0 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 python-dateutil==2.9.0.post0 pytz==2025.2 readme_renderer==43.0 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 SecretStorage==3.3.3 simplejson==3.20.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==7.2.6 sphinx-issues==0.2.0 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 tomli==2.2.1 tox==4.25.0 twine==6.1.0 typing_extensions==4.13.0 urllib3==2.3.0 virtualenv==20.29.3 zipp==3.21.0
name: marshmallow channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.11+sloria0 - babel==2.17.0 - backports-tarfile==1.2.0 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - colorama==0.4.6 - coverage==7.8.0 - cryptography==44.0.2 - distlib==0.3.9 - docutils==0.20.1 - exceptiongroup==1.2.2 - execnet==2.1.1 - filelock==3.18.0 - flake8==2.4.1 - id==1.5.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - invoke==2.2.0 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jeepney==0.9.0 - jinja2==3.1.6 - keyring==25.6.0 - markdown-it-py==3.0.0 - markupsafe==3.0.2 - mccabe==0.3.1 - mdurl==0.1.2 - more-itertools==10.6.0 - nh3==0.2.21 - packaging==24.2 - pep8==1.7.1 - platformdirs==4.3.7 - pluggy==1.5.0 - py==1.11.0 - pycparser==2.22 - pyflakes==0.8.1 - pygments==2.19.1 - pyproject-api==1.9.0 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - readme-renderer==43.0 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - secretstorage==3.3.3 - simplejson==3.20.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==7.2.6 - sphinx-issues==0.2.0 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - tomli==2.2.1 - tox==4.25.0 - twine==6.1.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - virtualenv==20.29.3 - zipp==3.21.0 prefix: /opt/conda/envs/marshmallow
[ "tests/test_deserialization.py::TestFieldDeserialization::test_timedelta_field_deserialization", "tests/test_serialization.py::TestFieldSerialization::test_timedelta_field" ]
[]
[ "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[String]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Integer]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Boolean]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Float]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Number]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[DateTime]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[LocalDateTime]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Time]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Date]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[TimeDelta]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Dict]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Url]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Email]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[FormattedString]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[UUID]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Decimal]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[String]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Integer]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Boolean]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Float]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Number]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[DateTime]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[LocalDateTime]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Time]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Date]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[TimeDelta]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Dict]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Url]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Email]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[FormattedString]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[UUID]", "tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Decimal]", "tests/test_deserialization.py::TestDeserializingNone::test_allow_none_is_true_if_missing_is_true", "tests/test_deserialization.py::TestDeserializingNone::test_list_field_deserialize_none_to_empty_list", "tests/test_deserialization.py::TestFieldDeserialization::test_float_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[bad]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[in_val2]", "tests/test_deserialization.py::TestFieldDeserialization::test_integer_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_strict_integer_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_with_places", "tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_with_places_and_rounding", "tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_deserialization_string", "tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_special_values", "tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_special_values_not_permitted", "tests/test_deserialization.py::TestFieldDeserialization::test_string_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values", "tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values_invalid[notvalid]", "tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values_invalid[123]", "tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_empty_truthy", "tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_falsy_values", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[not-a-datetime]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[42]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[in_value3]", "tests/test_deserialization.py::TestFieldDeserialization::test_datetime_passed_year_is_invalid", "tests/test_deserialization.py::TestFieldDeserialization::test_datetime_passed_date_is_invalid", "tests/test_deserialization.py::TestFieldDeserialization::test_custom_date_format_datetime_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_rfc_datetime_field_deserialization[rfc]", "tests/test_deserialization.py::TestFieldDeserialization::test_rfc_datetime_field_deserialization[rfc822]", "tests/test_deserialization.py::TestFieldDeserialization::test_iso_datetime_field_deserialization[iso]", "tests/test_deserialization.py::TestFieldDeserialization::test_iso_datetime_field_deserialization[iso8601]", "tests/test_deserialization.py::TestFieldDeserialization::test_localdatetime_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_time_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[badvalue]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[in_data2]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[42]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[badvalue]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[in_value2]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[9999999999]", "tests/test_deserialization.py::TestFieldDeserialization::test_date_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[123]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[in_value2]", "tests/test_deserialization.py::TestFieldDeserialization::test_dict_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_url_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_relative_url_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_url_field_schemes_argument", "tests/test_deserialization.py::TestFieldDeserialization::test_email_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_is_noop_by_default", "tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_with_callable", "tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_with_context", "tests/test_deserialization.py::TestFieldDeserialization::test_function_field_passed_deserialize_only_is_load_only", "tests/test_deserialization.py::TestFieldDeserialization::test_function_field_passed_deserialize_and_serialize_is_not_load_only", "tests/test_deserialization.py::TestFieldDeserialization::test_uuid_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[malformed]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[123]", "tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[in_value2]", "tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_function_must_be_callable", "tests/test_deserialization.py::TestFieldDeserialization::test_method_field_deserialization_is_noop_by_default", "tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_method", "tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_method_must_be_a_method", "tests/test_deserialization.py::TestFieldDeserialization::test_method_field_deserialize_only", "tests/test_deserialization.py::TestFieldDeserialization::test_datetime_list_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_invalid_item", "tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_multiple_invalid_items", "tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[notalist]", "tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[42]", "tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[value2]", "tests/test_deserialization.py::TestFieldDeserialization::test_constant_field_deserialization", "tests/test_deserialization.py::TestFieldDeserialization::test_constant_is_always_included_in_deserialized_data", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_function", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_class_that_returns_bool", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_that_raises_error_with_list", "tests/test_deserialization.py::TestFieldDeserialization::test_validator_must_return_false_to_raise_error", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_validator_with_nonascii_input", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validators", "tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_custom_error_message", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_to_dict", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_values", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_many", "tests/test_deserialization.py::TestSchemaDeserialization::test_exclude", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_deserialization_to_dict", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_list_deserialization_to_dict", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_none_not_allowed", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_many_non_not_allowed", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_required_missing", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_many_required_missing", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_only_basestring", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_only_basestring_with_list_data", "tests/test_deserialization.py::TestSchemaDeserialization::test_none_deserialization", "tests/test_deserialization.py::TestSchemaDeserialization::test_nested_none_deserialization", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_symmetry", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_error_returns_field_name_not_attribute_name", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_error_returns_load_from_not_attribute_name", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_load_from_param", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_dump_only_param", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_value", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_callable", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_none", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_returns_errors", "tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_returns_errors_with_multiple_validators", "tests/test_deserialization.py::TestSchemaDeserialization::test_strict_mode_deserialization", "tests/test_deserialization.py::TestSchemaDeserialization::test_strict_mode_many", "tests/test_deserialization.py::TestSchemaDeserialization::test_strict_mode_deserialization_with_multiple_validators", "tests/test_deserialization.py::TestSchemaDeserialization::test_uncaught_validation_errors_are_stored", "tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_a_field", "tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_an_email_field", "tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_a_url_field", "tests/test_deserialization.py::TestSchemaDeserialization::test_required_value_only_passed_to_validators_if_provided", "tests/test_deserialization.py::TestSchemaDeserialization::test_partial_deserialization[True]", "tests/test_deserialization.py::TestSchemaDeserialization::test_partial_deserialization[False]", "tests/test_deserialization.py::TestSchemaDeserialization::test_partial_fields_deserialization", "tests/test_deserialization.py::TestSchemaDeserialization::test_partial_fields_validation", "tests/test_deserialization.py::TestValidation::test_integer_with_validator", "tests/test_deserialization.py::TestValidation::test_integer_with_validators[field0]", "tests/test_deserialization.py::TestValidation::test_integer_with_validators[field1]", "tests/test_deserialization.py::TestValidation::test_integer_with_validators[field2]", "tests/test_deserialization.py::TestValidation::test_float_with_validators[field0]", "tests/test_deserialization.py::TestValidation::test_float_with_validators[field1]", "tests/test_deserialization.py::TestValidation::test_float_with_validators[field2]", "tests/test_deserialization.py::TestValidation::test_string_validator", "tests/test_deserialization.py::TestValidation::test_function_validator", "tests/test_deserialization.py::TestValidation::test_function_validators[field0]", "tests/test_deserialization.py::TestValidation::test_function_validators[field1]", "tests/test_deserialization.py::TestValidation::test_function_validators[field2]", "tests/test_deserialization.py::TestValidation::test_method_validator", "tests/test_deserialization.py::TestValidation::test_nested_data_is_stored_when_validation_fails", "tests/test_deserialization.py::TestValidation::test_false_value_validation", "tests/test_deserialization.py::test_required_field_failure[String]", "tests/test_deserialization.py::test_required_field_failure[Integer]", "tests/test_deserialization.py::test_required_field_failure[Boolean]", "tests/test_deserialization.py::test_required_field_failure[Float]", "tests/test_deserialization.py::test_required_field_failure[Number]", "tests/test_deserialization.py::test_required_field_failure[DateTime]", "tests/test_deserialization.py::test_required_field_failure[LocalDateTime]", "tests/test_deserialization.py::test_required_field_failure[Time]", "tests/test_deserialization.py::test_required_field_failure[Date]", "tests/test_deserialization.py::test_required_field_failure[TimeDelta]", "tests/test_deserialization.py::test_required_field_failure[Dict]", "tests/test_deserialization.py::test_required_field_failure[Url]", "tests/test_deserialization.py::test_required_field_failure[Email]", "tests/test_deserialization.py::test_required_field_failure[UUID]", "tests/test_deserialization.py::test_required_field_failure[Decimal]", "tests/test_deserialization.py::test_required_message_can_be_changed[My", "tests/test_deserialization.py::test_required_message_can_be_changed[message1]", "tests/test_deserialization.py::test_required_message_can_be_changed[message2]", "tests/test_deserialization.py::test_deserialize_doesnt_raise_exception_if_strict_is_false_and_input_type_is_incorrect", "tests/test_deserialization.py::test_deserialize_raises_exception_if_strict_is_true_and_input_type_is_incorrect", "tests/test_serialization.py::TestFieldSerialization::test_default", "tests/test_serialization.py::TestFieldSerialization::test_number[42-42.0]", "tests/test_serialization.py::TestFieldSerialization::test_number[0-0.0]", "tests/test_serialization.py::TestFieldSerialization::test_number[None-None]", "tests/test_serialization.py::TestFieldSerialization::test_number_as_string", "tests/test_serialization.py::TestFieldSerialization::test_number_as_string_passed_none", "tests/test_serialization.py::TestFieldSerialization::test_callable_default", "tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_func", "tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_serialize_only_is_dump_only", "tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_deserialize_and_serialize_is_not_dump_only", "tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_serialize", "tests/test_serialization.py::TestFieldSerialization::test_function_field_does_not_swallow_attribute_error", "tests/test_serialization.py::TestFieldSerialization::test_function_field_load_only", "tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_serialize_with_context", "tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_uncallable_object", "tests/test_serialization.py::TestFieldSerialization::test_integer_field", "tests/test_serialization.py::TestFieldSerialization::test_integer_as_string_field", "tests/test_serialization.py::TestFieldSerialization::test_integer_field_default", "tests/test_serialization.py::TestFieldSerialization::test_integer_field_default_set_to_none", "tests/test_serialization.py::TestFieldSerialization::test_uuid_field", "tests/test_serialization.py::TestFieldSerialization::test_decimal_field", "tests/test_serialization.py::TestFieldSerialization::test_decimal_field_string", "tests/test_serialization.py::TestFieldSerialization::test_decimal_field_special_values", "tests/test_serialization.py::TestFieldSerialization::test_decimal_field_special_values_not_permitted", "tests/test_serialization.py::TestFieldSerialization::test_decimal_field_fixed_point_representation", "tests/test_serialization.py::TestFieldSerialization::test_boolean_field_serialization", "tests/test_serialization.py::TestFieldSerialization::test_function_with_uncallable_param", "tests/test_serialization.py::TestFieldSerialization::test_email_field_validates", "tests/test_serialization.py::TestFieldSerialization::test_email_field_serialize_none", "tests/test_serialization.py::TestFieldSerialization::test_dict_field_serialize_none", "tests/test_serialization.py::TestFieldSerialization::test_dict_field_invalid_dict_but_okay", "tests/test_serialization.py::TestFieldSerialization::test_dict_field_serialize", "tests/test_serialization.py::TestFieldSerialization::test_dict_field_serialize_ordereddict", "tests/test_serialization.py::TestFieldSerialization::test_url_field_serialize_none", "tests/test_serialization.py::TestFieldSerialization::test_url_field_validates", "tests/test_serialization.py::TestFieldSerialization::test_method_field_with_method_missing", "tests/test_serialization.py::TestFieldSerialization::test_method_field_passed_serialize_only_is_dump_only", "tests/test_serialization.py::TestFieldSerialization::test_method_field_passed_deserialize_only_is_load_only", "tests/test_serialization.py::TestFieldSerialization::test_method_field_with_uncallable_attribute", "tests/test_serialization.py::TestFieldSerialization::test_method_field_does_not_swallow_attribute_error", "tests/test_serialization.py::TestFieldSerialization::test_method_with_no_serialize_is_missing", "tests/test_serialization.py::TestFieldSerialization::test_serialize_with_dump_to_param", "tests/test_serialization.py::TestFieldSerialization::test_serialize_with_attribute_and_dump_to_uses_dump_to", "tests/test_serialization.py::TestFieldSerialization::test_datetime_serializes_to_iso_by_default", "tests/test_serialization.py::TestFieldSerialization::test_datetime_invalid_serialization[invalid]", "tests/test_serialization.py::TestFieldSerialization::test_datetime_invalid_serialization[value1]", "tests/test_serialization.py::TestFieldSerialization::test_datetime_invalid_serialization[24]", "tests/test_serialization.py::TestFieldSerialization::test_datetime_field_rfc822[rfc]", "tests/test_serialization.py::TestFieldSerialization::test_datetime_field_rfc822[rfc822]", "tests/test_serialization.py::TestFieldSerialization::test_localdatetime_rfc_field", "tests/test_serialization.py::TestFieldSerialization::test_datetime_iso8601[iso]", "tests/test_serialization.py::TestFieldSerialization::test_datetime_iso8601[iso8601]", "tests/test_serialization.py::TestFieldSerialization::test_localdatetime_iso", "tests/test_serialization.py::TestFieldSerialization::test_datetime_format", "tests/test_serialization.py::TestFieldSerialization::test_string_field", "tests/test_serialization.py::TestFieldSerialization::test_formattedstring_field", "tests/test_serialization.py::TestFieldSerialization::test_formattedstring_field_on_schema", "tests/test_serialization.py::TestFieldSerialization::test_string_field_default_to_empty_string", "tests/test_serialization.py::TestFieldSerialization::test_time_field", "tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[badvalue]", "tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[]", "tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[in_data2]", "tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[42]", "tests/test_serialization.py::TestFieldSerialization::test_date_field", "tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[badvalue]", "tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[]", "tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[in_data2]", "tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[42]", "tests/test_serialization.py::TestFieldSerialization::test_datetime_list_field", "tests/test_serialization.py::TestFieldSerialization::test_list_field_with_error", "tests/test_serialization.py::TestFieldSerialization::test_datetime_list_serialize_single_value", "tests/test_serialization.py::TestFieldSerialization::test_list_field_serialize_none_returns_none", "tests/test_serialization.py::TestFieldSerialization::test_list_field_respect_inner_attribute", "tests/test_serialization.py::TestFieldSerialization::test_list_field_respect_inner_attribute_single_value", "tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generator_single_value", "tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generators_multiple_values", "tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generators_error", "tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generators_empty_generator_returns_none_for_every_non_returning_yield_statement", "tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_set", "tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_custom_class_with_iterator_protocol", "tests/test_serialization.py::TestFieldSerialization::test_bad_list_field", "tests/test_serialization.py::TestFieldSerialization::test_serialize_does_not_apply_validators", "tests/test_serialization.py::TestFieldSerialization::test_constant_field_serialization", "tests/test_serialization.py::TestFieldSerialization::test_constant_is_always_included_in_serialized_data", "tests/test_serialization.py::TestFieldSerialization::test_constant_field_serialize_when_omitted", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[String]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Integer]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Boolean]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Float]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Number]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[DateTime]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[LocalDateTime]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Time]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Date]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[TimeDelta]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Dict]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Url]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Email]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[FormattedString]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[UUID]", "tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Decimal]", "tests/test_serialization.py::test_serializing_named_tuple", "tests/test_serialization.py::test_serializing_named_tuple_with_meta", "tests/test_serialization.py::test_serializing_slice" ]
[]
MIT License
1,788
[ "AUTHORS.rst", "marshmallow/fields.py" ]
[ "AUTHORS.rst", "marshmallow/fields.py" ]
MechanicalSoup__MechanicalSoup-140
6e45a67591ec9bf204eb1323477fb40289d615ae
2017-10-21 11:18:50
0d147eb88b3bffb4853b841b418924cbb569c4ea
hemberger: Yep! This will always work since `Form.set` is now a direct wrapper around `Form.set_*`. (The only difference is that `set` can only take one name-value pair, but value can be as many elements as the `set_*` method allows. I can certainly change the test from ``` form.set_select({'instrument': options) ``` to ``` browser['instrument'] = options ``` I only chose the former because it made it more explicit that it was testing `set_select`. Which would you prefer?
diff --git a/docs/ChangeLog.rst b/docs/ChangeLog.rst index 63d2685..7ba5011 100644 --- a/docs/ChangeLog.rst +++ b/docs/ChangeLog.rst @@ -40,6 +40,9 @@ Main changes: ``browser.get_current_form().print_summary()`` to get a summary of the fields you need to fill-in (and which ones are already filled-in). +* The ``Form`` class now supports selecting multiple options in + a ``<select multiple>`` element. + Bug fixes --------- diff --git a/mechanicalsoup/form.py b/mechanicalsoup/form.py index 258b1e2..8fb7440 100644 --- a/mechanicalsoup/form.py +++ b/mechanicalsoup/form.py @@ -185,17 +185,30 @@ class Form(object): :param data: Dict of ``{name: value, ...}``. Find the select element whose *name*-attribute is ``name``. Then select from among its children the option element whose - *value*-attribute is ``value``. + *value*-attribute is ``value``. If the select element's + *multiple*-attribute is set, then ``value`` can be a list + or tuple to select multiple options. """ for (name, value) in data.items(): select = self.form.find("select", {"name": name}) if not select: raise InvalidFormMethod("No select named " + name) + + # Deselect all options first for option in select.find_all("option"): if "selected" in option.attrs: del option.attrs["selected"] - o = select.find("option", {"value": value}) - o.attrs["selected"] = "selected" + + # Wrap individual values in a 1-element tuple. + # If value is a list/tuple, select must be a <select multiple>. + if not isinstance(value, list) and not isinstance(value, tuple): + value = (value,) + elif "multiple" not in select.attrs: + raise LinkNotFoundError("Cannot select multiple options!") + + for choice in value: + option = select.find("option", {"value": choice}) + option.attrs["selected"] = "selected" def __setitem__(self, name, value): """Forwards arguments to :func:`~Form.set`. For example,
Manage <select multiple> `<select multiple>` should behave essentially like multiple checkboxes with the same name. For now, they don't: selecting one option deselects the previous ones. @hemberger: while you're working on form, you may want to fix this too.
MechanicalSoup/MechanicalSoup
diff --git a/tests/test_form.py b/tests/test_form.py index 29530b3..6e9e765 100644 --- a/tests/test_form.py +++ b/tests/test_form.py @@ -253,6 +253,41 @@ def test_set_select(option): assert(res.status_code == 200 and res.text == 'Success!') browser.close() + +set_select_multiple_form = ''' +<form method="post" action="mock://form.com/post"> + <select name="instrument" multiple> + <option value="piano">Piano</option> + <option value="bass">Bass</option> + <option value="violin">Violin</option> + </select> + <input type="submit" value="Select Multiple" /> +</form> +''' + [email protected]("options", [ + pytest.param('bass', id='select one (str)'), + pytest.param(('bass',), id='select one (tuple)'), + pytest.param(('piano', 'violin'), id='select two'), +]) +def test_set_select_multiple(options): + """Test a <select multiple> element.""" + # When a browser submits multiple selections, the qsl looks like: + # name=option1&name=option2 + if not isinstance(options, list) and not isinstance(options, tuple): + expected = (('instrument', options),) + else: + expected = (('instrument', option) for option in options) + browser, url = setup_mock_browser(expected_post=expected, + text=set_select_multiple_form) + browser.open(url) + form = browser.select_form('form') + form.set_select({'instrument': options}) + res = browser.submit_selected() + assert(res.status_code == 200 and res.text == 'Success!') + browser.close() + + page_with_missing_elements = ''' <html> <form method="post"> @@ -294,6 +329,8 @@ def test_form_not_found(): form.textarea({'bar': 'value', 'foo': 'nosuchval'}) with pytest.raises(mechanicalsoup.utils.LinkNotFoundError): form.set_radio({'size': 'tiny'}) + with pytest.raises(mechanicalsoup.utils.LinkNotFoundError): + form.set_select({'entree': ('no_multiple', 'no_multiple')}) browser.close() page_with_radio = '''
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 2 }
0.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-flake8", "pytest-mock", "requests_mock" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt", "tests/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 beautifulsoup4==4.12.3 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 flake8==5.0.4 idna==3.10 importlib-metadata==4.2.0 iniconfig==1.1.1 mccabe==0.7.0 -e git+https://github.com/MechanicalSoup/MechanicalSoup.git@6e45a67591ec9bf204eb1323477fb40289d615ae#egg=MechanicalSoup packaging==21.3 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-flake8==1.1.1 pytest-mock==3.6.1 requests==2.27.1 requests-mock==1.12.1 six==1.17.0 soupsieve==2.3.2.post1 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: MechanicalSoup channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - beautifulsoup4==4.12.3 - charset-normalizer==2.0.12 - coverage==6.2 - flake8==5.0.4 - idna==3.10 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - mccabe==0.7.0 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-flake8==1.1.1 - pytest-mock==3.6.1 - requests==2.27.1 - requests-mock==1.12.1 - six==1.17.0 - soupsieve==2.3.2.post1 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/MechanicalSoup
[ "tests/test_form.py::test_set_select_multiple[select", "tests/test_form.py::test_form_not_found" ]
[]
[ "tests/test_form.py::test_submit_online", "tests/test_form.py::test_submit_set", "tests/test_form.py::test_choose_submit[preview]", "tests/test_form.py::test_choose_submit[save]", "tests/test_form.py::test_choose_submit[cancel]", "tests/test_form.py::test_choose_submit_fail[not", "tests/test_form.py::test_choose_submit_fail[found]", "tests/test_form.py::test_choose_submit_multiple_match", "tests/test_form.py::test_form_noaction", "tests/test_form.py::test_form_action", "tests/test_form.py::test_set_select[default]", "tests/test_form.py::test_set_select[selected]", "tests/test_form.py::test_form_check_uncheck", "tests/test_form.py::test_form_print_summary" ]
[]
MIT License
1,790
[ "docs/ChangeLog.rst", "mechanicalsoup/form.py" ]
[ "docs/ChangeLog.rst", "mechanicalsoup/form.py" ]
asottile__all-repos-25
0c1622739a951e5904debb622386e0840c751164
2017-10-21 15:48:17
6835d50a1d65e98a44a21386ce6ec37703ce8f93
diff --git a/all_repos/find_files.py b/all_repos/find_files.py index c6826b8..e230264 100644 --- a/all_repos/find_files.py +++ b/all_repos/find_files.py @@ -7,6 +7,7 @@ import sys from all_repos import cli from all_repos import color from all_repos.config import load_config +from all_repos.util import zsplit def ls_files(config, repo): @@ -15,7 +16,7 @@ def ls_files(config, repo): ('git', '-C', path, 'ls-files', '-z'), stdout=subprocess.PIPE, check=True, ) - return path, ret.stdout.rstrip(b'\0').split(b'\0') + return path, zsplit(ret.stdout) def find_files(config, regex): diff --git a/all_repos/sed.py b/all_repos/sed.py new file mode 100644 index 0000000..b6b685d --- /dev/null +++ b/all_repos/sed.py @@ -0,0 +1,69 @@ +import argparse +import functools +import os.path +import shlex +import subprocess + +from all_repos import autofix_lib +from all_repos import cli +from all_repos.util import zsplit + + +def find_repos(config, *, ls_files_cmd): + for repo in config.get_cloned_repos(): + repo_dir = os.path.join(config.output_dir, repo) + if subprocess.run( + ('git', '-C', repo_dir, *ls_files_cmd[1:]), + check=True, stdout=subprocess.PIPE, + ).stdout: + yield repo_dir + + +def apply_fix(*, ls_files_cmd, sed_cmd): + filenames = zsplit(subprocess.check_output(ls_files_cmd)) + filenames = [f.decode() for f in filenames] + autofix_lib.run(*sed_cmd, *filenames) + + +def _quote_cmd(cmd): + return ' '.join(shlex.quote(arg) for arg in cmd) + + +def main(argv=None): + parser = argparse.ArgumentParser() + cli.add_fixer_args(parser) + parser.add_argument( + '-r', '--regexp-extended', + action='store_true', + help='use extended regular expressions in the script.', + ) + parser.add_argument('--branch-name', default='all-repos-sed') + parser.add_argument('--commit-msg') + parser.add_argument('pattern') + parser.add_argument('filenames_glob', help='(passed to ls-files)') + args = parser.parse_args(argv) + + dash_r = ('-r',) if args.regexp_extended else () + sed_cmd = ('sed', '-i', *dash_r, args.pattern) + ls_files_cmd = ('git', 'ls-files', '-z', '--', args.filenames_glob) + + msg = f'{_quote_cmd(ls_files_cmd)} | xargs -0 {_quote_cmd(sed_cmd)}' + msg = args.commit_msg or msg + + repos, config, commit, autofix_settings = autofix_lib.from_cli( + args, + find_repos=functools.partial(find_repos, ls_files_cmd=ls_files_cmd), + msg=msg, branch_name=args.branch_name, + ) + + autofix_lib.fix( + repos, + apply_fix=functools.partial( + apply_fix, ls_files_cmd=ls_files_cmd, sed_cmd=sed_cmd, + ), + config=config, commit=commit, autofix_settings=autofix_settings, + ) + + +if __name__ == '__main__': + exit(main()) diff --git a/all_repos/util.py b/all_repos/util.py new file mode 100644 index 0000000..a689acb --- /dev/null +++ b/all_repos/util.py @@ -0,0 +1,5 @@ +def zsplit(bs): + if bs: + return bs.rstrip(b'\0').split(b'\0') + else: + return [] diff --git a/setup.py b/setup.py index 5b79eb7..51c20ae 100644 --- a/setup.py +++ b/setup.py @@ -20,6 +20,7 @@ setup( 'all-repos-clone=all_repos.clone:main', 'all-repos-find-files=all_repos.find_files:main', 'all-repos-grep=all_repos.grep:main', + 'all-repos-sed=all_repos.sed:main', ], }, )
Make an `all-repos-sed` Sometimes I don't want to write the full gamut of a fixer and such and really just want some nice `sed -i`. I think the interface would be something like: ``` $ all-repos-sed 's/foo/bar/g' -- baz.f ``` And this tool would make a fixer which does essentially (for each repository): ``` git ls-files -- baz.f | xargs sed -i 's/foo/bar/g' ```
asottile/all-repos
diff --git a/tests/sed_test.py b/tests/sed_test.py new file mode 100644 index 0000000..bee579c --- /dev/null +++ b/tests/sed_test.py @@ -0,0 +1,25 @@ +from all_repos import clone +from all_repos.sed import main +from testing.git import write_file_commit + + +def test_main(file_config_files): + clone.main(('--config-filename', str(file_config_files.cfg))) + assert not main(( + '--config-filename', str(file_config_files.cfg), + 's/HAI/BAI/g', '*', + )) + assert file_config_files.dir1.join('f').read() == 'OBAI\n' + assert file_config_files.dir2.join('f').read() == 'OHELLO\n' + + +def test_main_custom_file_pattern(file_config_files): + write_file_commit(file_config_files.dir1, 'g', 'OHAI\n') + clone.main(('--config-filename', str(file_config_files.cfg))) + assert not main(( + '--config-filename', str(file_config_files.cfg), + 's/AI/IE/g', 'g', + )) + assert file_config_files.dir1.join('f').read() == 'OHAI\n' + assert file_config_files.dir1.join('g').read() == 'OHIE\n' + assert file_config_files.dir2.join('f').read() == 'OHELLO\n' diff --git a/tests/util_test.py b/tests/util_test.py new file mode 100644 index 0000000..474efb8 --- /dev/null +++ b/tests/util_test.py @@ -0,0 +1,15 @@ +import pytest + +from all_repos.util import zsplit + + [email protected]( + ('bs', 'expected'), + ( + (b'', []), + (b'\0', [b'']), + (b'a\0b\0', [b'a', b'b']), + ), +) +def test_zsplit(bs, expected): + assert zsplit(bs) == expected
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-env" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/asottile/all-repos.git@0c1622739a951e5904debb622386e0840c751164#egg=all_repos certifi==2025.1.31 cfgv==3.4.0 charset-normalizer==3.4.1 coverage==7.8.0 distlib==0.3.9 exceptiongroup==1.2.2 filelock==3.18.0 flake8==7.2.0 identify==2.6.9 idna==3.10 iniconfig==2.1.0 mccabe==0.7.0 nodeenv==1.9.1 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pre_commit==4.2.0 pycodestyle==2.13.0 pyflakes==3.3.2 pytest==8.3.5 pytest-env==1.1.5 PyYAML==6.0.2 requests==2.32.3 tomli==2.2.1 urllib3==2.3.0 virtualenv==20.29.3
name: all-repos channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - cfgv==3.4.0 - charset-normalizer==3.4.1 - coverage==7.8.0 - distlib==0.3.9 - exceptiongroup==1.2.2 - filelock==3.18.0 - flake8==7.2.0 - identify==2.6.9 - idna==3.10 - iniconfig==2.1.0 - mccabe==0.7.0 - nodeenv==1.9.1 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pre-commit==4.2.0 - pycodestyle==2.13.0 - pyflakes==3.3.2 - pytest==8.3.5 - pytest-env==1.1.5 - pyyaml==6.0.2 - requests==2.32.3 - tomli==2.2.1 - urllib3==2.3.0 - virtualenv==20.29.3 prefix: /opt/conda/envs/all-repos
[ "tests/sed_test.py::test_main", "tests/sed_test.py::test_main_custom_file_pattern", "tests/util_test.py::test_zsplit[-expected0]", "tests/util_test.py::test_zsplit[\\x00-expected1]", "tests/util_test.py::test_zsplit[a\\x00b\\x00-expected2]" ]
[]
[]
[]
MIT License
1,791
[ "setup.py", "all_repos/find_files.py", "all_repos/sed.py", "all_repos/util.py" ]
[ "setup.py", "all_repos/find_files.py", "all_repos/sed.py", "all_repos/util.py" ]
stummjr__scrapy-fieldstats-7
476b8a2bdb01ab3f77a6dc5af936284f41a20272
2017-10-21 16:38:01
476b8a2bdb01ab3f77a6dc5af936284f41a20272
diff --git a/scrapy_fieldstats/fieldstats.py b/scrapy_fieldstats/fieldstats.py index 703b2dc..83d9860 100644 --- a/scrapy_fieldstats/fieldstats.py +++ b/scrapy_fieldstats/fieldstats.py @@ -1,7 +1,6 @@ # -*- coding:utf-8 -*- import logging import pprint -from collections import defaultdict from scrapy import signals from scrapy.exceptions import NotConfigured @@ -10,12 +9,12 @@ logger = logging.getLogger(__name__) class FieldStatsExtension(object): - """ When enabled, the FieldStats extensions logs the percentage of + """ When enabled, the FieldStats extension logs the percentage of items coverage for a crawl. """ def __init__(self): self.item_count = 0 - self.field_counts = defaultdict(int) + self.field_counts = {} @classmethod def from_crawler(cls, crawler): @@ -28,20 +27,46 @@ class FieldStatsExtension(object): return ext def item_scraped(self, item, spider): + self.compute_item(item) + + def spider_closed(self, spider): + fields_summary = self.build_fields_summary() + logger.info('Field stats:\n{}'.format(pprint.pformat(fields_summary))) + + def compute_item(self, item): self.item_count += 1 + self.count_item_fields(item) + + def count_item_fields(self, item, current_node=None): + if current_node is None: + current_node = self.field_counts + for name, value in item.items(): if not value: continue - self.field_counts[name] += 1 - def spider_closed(self, spider): - field_stats = self.compute_fieldstats() - logger.info('Field stats:\n{}'.format(pprint.pformat(field_stats))) + if isinstance(value, dict): + # recurse into nested items + if name not in current_node: + current_node[name] = {} + self.count_item_fields(value, current_node=current_node[name]) + continue + + if name not in current_node: + current_node[name] = 0 + current_node[name] += 1 + + def build_fields_summary(self, field_counts=None, fields_summary=None): + if field_counts is None: + field_counts = self.field_counts + fields_summary = {} - def compute_fieldstats(self): - field_stats = {} - for name, count in self.field_counts.items(): - field_coverage = int(count) * 100 / self.item_count - field_stats[name] = "{}%".format(field_coverage) + for name, value in field_counts.items(): + if isinstance(value, dict): + fields_summary[name] = {} + self.build_fields_summary(field_counts[name], fields_summary[name]) + else: + field_percentage = int(value) * 100 / self.item_count + fields_summary[name] = "{}%".format(field_percentage) - return field_stats + return fields_summary
Support nested items Currently, the extension only counts shallow items in dictionaries. However, it's quite common to have nested items such as: ```json { "title": "Animal Farm", "author": { "name": "George Orwell", "birth_location": "Motihari, India", "birth_date": "1903-06-25" }, "ratings": 4.9 } ``` In this case, the extension doesn't generate coverage stats for the nested `name`, `birth_location` and `birth_date` fields.
stummjr/scrapy-fieldstats
diff --git a/tests/test_scrapy_fieldstats.py b/tests/test_scrapy_fieldstats.py index 2873777..24b749f 100644 --- a/tests/test_scrapy_fieldstats.py +++ b/tests/test_scrapy_fieldstats.py @@ -3,18 +3,20 @@ from scrapy_fieldstats.fieldstats import FieldStatsExtension -def fake_extract_items(fake_items, extension): +def extract_fake_items_and_compute_stats(fake_items): + ext = FieldStatsExtension() for item in fake_items: - extension.item_scraped(item, None) + ext.compute_item(item) + field_stats = ext.build_fields_summary() + return field_stats def test_single_item(): fake_items = [{"field1": "value1"}] - ext = FieldStatsExtension() - fake_extract_items(fake_items, ext) - field_stats = ext.compute_fieldstats() + field_stats = extract_fake_items_and_compute_stats(fake_items) + assert len(field_stats) == 1 - assert field_stats.get('field1') == '100.0%' + assert field_stats['field1'] == '100.0%' def test_single_item_many_fields(): @@ -24,21 +26,19 @@ def test_single_item_many_fields(): "field2": "value2", } ] - ext = FieldStatsExtension() - fake_extract_items(fake_items, ext) - field_stats = ext.compute_fieldstats() + field_stats = extract_fake_items_and_compute_stats(fake_items) + assert len(field_stats) == 2 - assert field_stats.get('field1') == '100.0%' - assert field_stats.get('field2') == '100.0%' + assert field_stats['field1'] == '100.0%' + assert field_stats['field2'] == '100.0%' def test_many_items(): fake_items = [{"field1": "value1"}, {"field1": "value1"}] - ext = FieldStatsExtension() - fake_extract_items(fake_items, ext) - field_stats = ext.compute_fieldstats() + field_stats = extract_fake_items_and_compute_stats(fake_items) + assert len(field_stats) == 1 - assert field_stats.get('field1') == '100.0%' + assert field_stats['field1'] == '100.0%' def test_many_items_many_fields(): @@ -52,12 +52,11 @@ def test_many_items_many_fields(): "field2": "value2", } ] - ext = FieldStatsExtension() - fake_extract_items(fake_items, ext) - field_stats = ext.compute_fieldstats() + field_stats = extract_fake_items_and_compute_stats(fake_items) + assert len(field_stats) == 2 - assert field_stats.get('field1') == '100.0%' - assert field_stats.get('field2') == '100.0%' + assert field_stats['field1'] == '100.0%' + assert field_stats['field2'] == '100.0%' def test_many_items_many_fields_missing_field(): @@ -70,12 +69,11 @@ def test_many_items_many_fields_missing_field(): "field2": "value2", } ] - ext = FieldStatsExtension() - fake_extract_items(fake_items, ext) - field_stats = ext.compute_fieldstats() + field_stats = extract_fake_items_and_compute_stats(fake_items) + assert len(field_stats) == 2 - assert field_stats.get('field1') == '100.0%' - assert field_stats.get('field2') == '50.0%' + assert field_stats['field1'] == '100.0%' + assert field_stats['field2'] == '50.0%' def test_many_items_many_fields_empty_field(): @@ -89,9 +87,41 @@ def test_many_items_many_fields_empty_field(): "field2": "value2", } ] - ext = FieldStatsExtension() - fake_extract_items(fake_items, ext) - field_stats = ext.compute_fieldstats() + field_stats = extract_fake_items_and_compute_stats(fake_items) + assert len(field_stats) == 2 - assert field_stats.get('field1') == '100.0%' - assert field_stats.get('field2') == '50.0%' + assert field_stats['field1'] == '100.0%' + assert field_stats['field2'] == '50.0%' + + +def test_nested_items(): + fake_items = [ + { + "field1": "value1", + "field2": { + "field2.1": "value2.1", + "field2.2": "value2.2", + "field2.3": { + "field2.3.1": "value2.3.1", + "field2.3.2": "value2.3.2", + }, + } + }, + { + "field1": "value1", + "field2": { + "field2.1": "value2.1", + "field2.3": { + "field2.3.1": "value2.3.1", + "field2.3.2": "", + }, + "field2.4": "value2.2", + } + } + ] + field_stats = extract_fake_items_and_compute_stats(fake_items) + assert field_stats['field1'] == '100.0%' + assert field_stats['field2']['field2.1'] == '100.0%' + assert field_stats['field2']['field2.2'] == '50.0%' + assert field_stats['field2']['field2.2'] == '50.0%' + assert field_stats['field2']['field2.3']['field2.3.2'] == '50.0%'
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "scrapy", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
appdirs==1.4.4 attrs @ file:///croot/attrs_1734533101012/work Automat @ file:///tmp/build/80754af9/automat_1600298431173/work bcrypt @ file:///croot/bcrypt_1736182451882/work Brotli @ file:///croot/brotli-split_1736182456865/work certifi @ file:///croot/certifi_1738623731865/work/certifi cffi @ file:///croot/cffi_1736182485317/work charset-normalizer @ file:///croot/charset-normalizer_1721748349566/work constantly @ file:///croot/constantly_1703165600746/work cryptography @ file:///croot/cryptography_1740577825284/work cssselect @ file:///croot/cssselect_1707339882883/work defusedxml @ file:///tmp/build/80754af9/defusedxml_1615228127516/work exceptiongroup==1.2.2 filelock @ file:///croot/filelock_1700591183607/work hyperlink @ file:///tmp/build/80754af9/hyperlink_1610130746837/work idna @ file:///croot/idna_1714398848350/work incremental @ file:///croot/incremental_1708639938299/work iniconfig==2.1.0 itemadapter @ file:///tmp/build/80754af9/itemadapter_1626442940632/work itemloaders @ file:///croot/itemloaders_1708639918324/work jmespath @ file:///croot/jmespath_1700144569655/work lxml @ file:///croot/lxml_1737039601731/work packaging @ file:///croot/packaging_1734472117206/work parsel @ file:///croot/parsel_1707503445438/work pluggy==1.5.0 Protego @ file:///tmp/build/80754af9/protego_1598657180827/work pyasn1 @ file:///croot/pyasn1_1729239786406/work pyasn1_modules @ file:///home/conda/feedstock_root/build_artifacts/pyasn1-modules_1733324602540/work pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work PyDispatcher==2.0.5 pyOpenSSL @ file:///croot/pyopenssl_1741343803032/work PySocks @ file:///tmp/build/80754af9/pysocks_1605305812635/work pytest==8.3.5 queuelib @ file:///croot/queuelib_1696950067631/work requests @ file:///croot/requests_1730999120400/work requests-file @ file:///Users/ktietz/demo/mc3/conda-bld/requests-file_1629455781986/work Scrapy @ file:///croot/scrapy_1733166797775/work -e git+https://github.com/stummjr/scrapy-fieldstats.git@476b8a2bdb01ab3f77a6dc5af936284f41a20272#egg=scrapy_fieldstats service-identity @ file:///Users/ktietz/demo/mc3/conda-bld/service_identity_1629460757137/work six @ file:///tmp/build/80754af9/six_1644875935023/work tldextract @ file:///croot/tldextract_1723064386918/work tomli==2.2.1 Twisted @ file:///croot/twisted_1708702809815/work typing_extensions @ file:///croot/typing_extensions_1734714854207/work urllib3 @ file:///croot/urllib3_1737133630106/work w3lib @ file:///croot/w3lib_1708639924738/work zope.interface @ file:///croot/zope.interface_1731939362051/work
name: scrapy-fieldstats channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - appdirs=1.4.4=pyhd3eb1b0_0 - attrs=24.3.0=py39h06a4308_0 - automat=20.2.0=py_0 - bcrypt=3.2.0=py39h5eee18b_2 - brotli-python=1.0.9=py39h6a678d5_9 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2025.1.31=py39h06a4308_0 - cffi=1.17.1=py39h1fdaa30_1 - charset-normalizer=3.3.2=pyhd3eb1b0_0 - constantly=23.10.4=py39h06a4308_0 - cryptography=44.0.1=py39h7825ff9_0 - cssselect=1.2.0=py39h06a4308_0 - defusedxml=0.7.1=pyhd3eb1b0_0 - filelock=3.13.1=py39h06a4308_0 - hyperlink=21.0.0=pyhd3eb1b0_0 - icu=73.1=h6a678d5_0 - idna=3.7=py39h06a4308_0 - incremental=22.10.0=pyhd3eb1b0_0 - itemadapter=0.3.0=pyhd3eb1b0_0 - itemloaders=1.1.0=py39h06a4308_0 - jmespath=1.0.1=py39h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - libxml2=2.13.5=hfdd30dd_0 - libxslt=1.1.41=h097e994_0 - lxml=5.3.0=py39h57af460_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - parsel=1.8.1=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - protego=0.1.16=py_0 - pyasn1=0.6.1=py39h06a4308_0 - pyasn1-modules=0.4.1=pyhd8ed1ab_1 - pycparser=2.21=pyhd3eb1b0_0 - pydispatcher=2.0.5=py39h06a4308_2 - pyopenssl=25.0.0=py39h06a4308_0 - pysocks=1.7.1=py39h06a4308_0 - python=3.9.21=he870216_1 - queuelib=1.6.2=py39h06a4308_0 - readline=8.2=h5eee18b_0 - requests=2.32.3=py39h06a4308_1 - requests-file=1.5.1=pyhd3eb1b0_0 - scrapy=2.12.0=py39h06a4308_0 - service_identity=18.1.0=pyhd3eb1b0_1 - setuptools=75.8.0=py39h06a4308_0 - six=1.16.0=pyhd3eb1b0_1 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tldextract=5.1.2=py39h06a4308_0 - twisted=23.10.0=py39h06a4308_0 - typing_extensions=4.12.2=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - urllib3=2.3.0=py39h06a4308_0 - w3lib=2.1.2=py39h06a4308_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - zope=1.0=py39h06a4308_1 - zope.interface=7.1.1=py39h5eee18b_0 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - pluggy==1.5.0 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/scrapy-fieldstats
[ "tests/test_scrapy_fieldstats.py::test_single_item", "tests/test_scrapy_fieldstats.py::test_single_item_many_fields", "tests/test_scrapy_fieldstats.py::test_many_items", "tests/test_scrapy_fieldstats.py::test_many_items_many_fields", "tests/test_scrapy_fieldstats.py::test_many_items_many_fields_missing_field", "tests/test_scrapy_fieldstats.py::test_many_items_many_fields_empty_field", "tests/test_scrapy_fieldstats.py::test_nested_items" ]
[]
[]
[]
MIT License
1,792
[ "scrapy_fieldstats/fieldstats.py" ]
[ "scrapy_fieldstats/fieldstats.py" ]
jaywink__federation-104
bcc779e006bc0af192db08a1ff8ed245c0fbd7c9
2017-10-21 19:06:55
bcc779e006bc0af192db08a1ff8ed245c0fbd7c9
diff --git a/CHANGELOG.md b/CHANGELOG.md index 19755b4..fb6c6c0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,36 @@ * Added base entity `Share` which maps to a `DiasporaReshare` for the Diaspora protocol. ([related issue](https://github.com/jaywink/federation/issues/94)) The `Share` entity supports all the properties that a Diaspora reshare does. Additionally two other properties are supported: `raw_content` and `entity_type`. The former can be used for a "quoted share" case where the sharer adds their own note to the share. The latter can be used to reference the type of object that was shared, to help the receiver, if it is not sharing a `Post` entity. The value must be a base entity class name. + +* Entities have two new properties: `id` and `target_id`. + + Diaspora entity ID's are in the form of the [Diaspora URI scheme](https://diaspora.github.io/diaspora_federation/federation/diaspora_scheme.html), where it is possible to construct an ID from the entity. In the future, ActivityPub object ID's will be found in these properties. + +* New high level fetcher function `federation.fetchers.retrieve_remote_content`. ([related issue](https://github.com/jaywink/federation/issues/103)) + + This function takes the following parameters: + + * `id` - Object ID. For Diaspora, the only supported protocol at the moment, this is in the [Diaspora URI](https://diaspora.github.io/diaspora_federation/federation/diaspora_scheme.html) format. + * `sender_key_fetcher` - Optional function that takes a profile `handle` and returns a public key in `str` format. If this is not given, the public key will be fetched from the remote profile over the network. + + The given ID will be fetched from the remote endpoint, validated to be from the correct author against their public key and then an instance of the entity class will be constructed and returned. + +* New Diaspora protocol helpers in `federation.utils.diaspora`: + + * `retrieve_and_parse_content`. See notes regarding the high level fetcher above. + * `fetch_public_key`. Given a `handle` as a parameter, will fetch the remote profile and return the `public_key` from it. + * `parse_diaspora_uri`. Parses a Diaspora URI scheme string, returns either `None` if parsing fails or a `tuple` of `handle`, `entity_type` and `guid`. + +### Changed +* Refactoring for Diaspora `MagicEnvelope` class. + + The class init now also allows passing in parameters to construct and verify MagicEnvelope instances. The order of init parameters has not been changed, but they are now all optional. When creating a class instance, one should always pass in the necessary parameters depnding on whether the class instance will be used for building a payload or verifying an incoming payload. See class docstring for details. + +* Diaspora procotol receive flow now uses the `MagicEnvelope` class to verify payloads. No functional changes regarding verification otherwise. + +* Diaspora protocol receive flow now fetches the sender public key over the network if a `sender_key_fetcher` function is not passed in. Previously an error would be raised. + + Note that fetching over the network for each payload is wasteful. Implementers should instead cache public keys when possible and pass in a function to retrieve them, as before. ### Fixed * Converting base entity `Profile` to `DiasporaProfile` for outbound sending missed two attributes, `image_urls` and `tag_list`. Those are now included so that the values transfer into the built payload. diff --git a/docs/usage.rst b/docs/usage.rst index 5a84ebd..d599c11 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -66,6 +66,7 @@ Fetchers High level utility functions to fetch remote objects. These should be favoured instead of protocol specific utility functions. +.. autofunction:: federation.fetchers.retrieve_remote_content .. autofunction:: federation.fetchers.retrieve_remote_profile @@ -105,7 +106,12 @@ Various utils are provided for internal and external usage. Diaspora ........ +.. autofunction:: federation.utils.diaspora.fetch_public_key +.. autofunction:: federation.utils.diaspora.get_fetch_content_endpoint +.. autofunction:: federation.utils.diaspora.get_public_endpoint +.. autofunction:: federation.utils.diaspora.parse_diaspora_uri .. autofunction:: federation.utils.diaspora.parse_profile_from_hcard +.. autofunction:: federation.utils.diaspora.retrieve_and_parse_content .. autofunction:: federation.utils.diaspora.retrieve_and_parse_profile .. autofunction:: federation.utils.diaspora.retrieve_diaspora_hcard .. autofunction:: federation.utils.diaspora.retrieve_diaspora_webfinger diff --git a/federation/entities/base.py b/federation/entities/base.py index b8f8ba5..c0b5bcf 100644 --- a/federation/entities/base.py +++ b/federation/entities/base.py @@ -30,6 +30,14 @@ class BaseEntity: self.__class__.__name__, key )) + @property + def id(self): + """Global network ID. + + Future expansion: Convert later into an attribute which with ActivityPub will have the 'id' directly. + """ + return + def validate(self): """Do validation. @@ -120,6 +128,14 @@ class TargetGUIDMixin(BaseEntity): super().__init__(*args, **kwargs) self._required += ["target_guid"] + @property + def target_id(self): + """Global network target ID. + + Future expansion: convert to attribute when ActivityPub is supported. + """ + return + def validate_target_guid(self): if len(self.target_guid) < 16: raise ValueError("Target GUID must be at least 16 characters") diff --git a/federation/entities/diaspora/entities.py b/federation/entities/diaspora/entities.py index d7392aa..ade6da1 100644 --- a/federation/entities/diaspora/entities.py +++ b/federation/entities/diaspora/entities.py @@ -1,17 +1,58 @@ +import importlib + from lxml import etree from federation.entities.base import ( - Comment, Post, Reaction, Relationship, Profile, Retraction, BaseEntity, Follow, Share) + Comment, Post, Reaction, Relationship, Profile, Retraction, BaseEntity, Follow, Share, Image, +) from federation.entities.diaspora.utils import format_dt, struct_to_xml, get_base_attributes, add_element_to_doc from federation.exceptions import SignatureVerificationError from federation.protocols.diaspora.signatures import verify_relayable_signature, create_relayable_signature from federation.utils.diaspora import retrieve_and_parse_profile +CLASS_TO_TAG_MAPPING = { + Comment: "comment", + Follow: "contact", + Image: "photo", + Post: "status_message", + Profile: "profile", + Reaction: "like", + Relationship: "request", + Retraction: "retraction", + Share: "reshare", +} + class DiasporaEntityMixin(BaseEntity): # Normally outbound document is generated from entity. Store one here if at some point we already have a doc outbound_doc = None + @property + def id(self): + """Diaspora URI scheme format ID. + + Only available for entities that own a handle and a guid. + """ + try: + # noinspection PyUnresolvedReferences + return "diaspora://%s/%s/%s" % (self.handle, self._tag_name, self.guid) + except AttributeError: + return None + + # noinspection PyUnresolvedReferences + @property + def target_id(self): + """Diaspora URI scheme format target ID. + + Only available for entities that own a target_handle, target_guid and entity_type. + """ + try: + cls_module = importlib.import_module("federation.entities.base") + cls = getattr(cls_module, self.entity_type) + return "diaspora://%s/%s/%s" % (self.target_handle, CLASS_TO_TAG_MAPPING[cls], self.target_guid) + except (AttributeError, ImportError, KeyError): + return None + def to_xml(self): """Override in subclasses.""" raise NotImplementedError @@ -66,8 +107,10 @@ class DiasporaRelayableMixin(DiasporaEntityMixin): class DiasporaComment(DiasporaRelayableMixin, Comment): """Diaspora comment.""" + _tag_name = "comment" + def to_xml(self): - element = etree.Element("comment") + element = etree.Element(self._tag_name) struct_to_xml(element, [ {"guid": self.guid}, {"parent_guid": self.target_guid}, @@ -82,9 +125,11 @@ class DiasporaComment(DiasporaRelayableMixin, Comment): class DiasporaPost(DiasporaEntityMixin, Post): """Diaspora post, ie status message.""" + _tag_name = "status_message" + def to_xml(self): """Convert to XML message.""" - element = etree.Element("status_message") + element = etree.Element(self._tag_name) struct_to_xml(element, [ {"raw_message": self.raw_content}, {"guid": self.guid}, @@ -98,11 +143,12 @@ class DiasporaPost(DiasporaEntityMixin, Post): class DiasporaLike(DiasporaRelayableMixin, Reaction): """Diaspora like.""" + _tag_name = "like" reaction = "like" def to_xml(self): """Convert to XML message.""" - element = etree.Element("like") + element = etree.Element(self._tag_name) struct_to_xml(element, [ {"target_type": "Post"}, {"guid": self.guid}, @@ -117,11 +163,12 @@ class DiasporaLike(DiasporaRelayableMixin, Reaction): class DiasporaRequest(DiasporaEntityMixin, Relationship): """Diaspora legacy request.""" + _tag_name = "request" relationship = "sharing" def to_xml(self): """Convert to XML message.""" - element = etree.Element("request") + element = etree.Element(self._tag_name) struct_to_xml(element, [ {"sender_handle": self.handle}, {"recipient_handle": self.target_handle}, @@ -134,10 +181,11 @@ class DiasporaContact(DiasporaEntityMixin, Follow): Note we don't implement 'sharing' at the moment so just send it as the same as 'following'. """ + _tag_name = "contact" def to_xml(self): """Convert to XML message.""" - element = etree.Element("contact") + element = etree.Element(self._tag_name) struct_to_xml(element, [ {"author": self.handle}, {"recipient": self.target_handle}, @@ -149,10 +197,11 @@ class DiasporaContact(DiasporaEntityMixin, Follow): class DiasporaProfile(DiasporaEntityMixin, Profile): """Diaspora profile.""" + _tag_name = "profile" def to_xml(self): """Convert to XML message.""" - element = etree.Element("profile") + element = etree.Element(self._tag_name) struct_to_xml(element, [ {"diaspora_handle": self.handle}, {"first_name": self.name}, @@ -181,6 +230,7 @@ class DiasporaProfile(DiasporaEntityMixin, Profile): class DiasporaRetraction(DiasporaEntityMixin, Retraction): """Diaspora Retraction.""" + _tag_name = "retraction" mapped = { "Like": "Reaction", "Photo": "Image", @@ -189,7 +239,7 @@ class DiasporaRetraction(DiasporaEntityMixin, Retraction): def to_xml(self): """Convert to XML message.""" - element = etree.Element("retraction") + element = etree.Element(self._tag_name) struct_to_xml(element, [ {"author": self.handle}, {"target_guid": self.target_guid}, @@ -216,8 +266,10 @@ class DiasporaRetraction(DiasporaEntityMixin, Retraction): class DiasporaReshare(DiasporaEntityMixin, Share): """Diaspora Reshare.""" + _tag_name = "reshare" + def to_xml(self): - element = etree.Element("reshare") + element = etree.Element(self._tag_name) struct_to_xml(element, [ {"author": self.handle}, {"guid": self.guid}, diff --git a/federation/entities/diaspora/mappers.py b/federation/entities/diaspora/mappers.py index 5acd205..e9e2884 100644 --- a/federation/entities/diaspora/mappers.py +++ b/federation/entities/diaspora/mappers.py @@ -3,10 +3,11 @@ from datetime import datetime from lxml import etree -from federation.entities.base import Image, Relationship, Post, Reaction, Comment, Profile, Retraction, Follow, Share +from federation.entities.base import Comment, Follow, Image, Post, Profile, Reaction, Relationship, Retraction, Share from federation.entities.diaspora.entities import ( - DiasporaPost, DiasporaComment, DiasporaLike, DiasporaRequest, DiasporaProfile, DiasporaRetraction, - DiasporaRelayableMixin, DiasporaContact, DiasporaReshare) + DiasporaComment, DiasporaContact, DiasporaLike, DiasporaPost, + DiasporaProfile, DiasporaRelayableMixin, DiasporaRequest, DiasporaReshare, DiasporaRetraction, +) from federation.protocols.diaspora.signatures import get_element_child_info from federation.utils.diaspora import retrieve_and_parse_profile diff --git a/federation/fetchers.py b/federation/fetchers.py index 46b9a5d..a32a7bf 100644 --- a/federation/fetchers.py +++ b/federation/fetchers.py @@ -1,7 +1,21 @@ -# -*- coding: utf-8 -*- import importlib +def retrieve_remote_content(id, sender_key_fetcher=None): + """Retrieve remote content and return an Entity object. + + Currently, due to no other protocols supported, always use the Diaspora protocol. + + :param id: ID of the remote entity. + :param sender_key_fetcher: Function to use to fetch sender public key. If not given, network will be used + to fetch the profile and the key. Function must take handle as only parameter and return a public key. + :returns: Entity class instance or ``None`` + """ + protocol_name = "diaspora" + utils = importlib.import_module("federation.utils.%s" % protocol_name) + return utils.retrieve_and_parse_content(id, sender_key_fetcher=sender_key_fetcher) + + def retrieve_remote_profile(handle): """High level retrieve profile method. @@ -10,7 +24,7 @@ def retrieve_remote_profile(handle): Currently, due to no other protocols supported, always use the Diaspora protocol. - :arg handle: The profile handle in format [email protected] + :param handle: The profile handle in format [email protected] :returns: ``federation.entities.base.Profile`` or ``None`` """ protocol_name = "diaspora" diff --git a/federation/protocols/diaspora/magic_envelope.py b/federation/protocols/diaspora/magic_envelope.py index 46a9a08..7279fe5 100644 --- a/federation/protocols/diaspora/magic_envelope.py +++ b/federation/protocols/diaspora/magic_envelope.py @@ -1,9 +1,13 @@ from base64 import urlsafe_b64encode, b64encode, urlsafe_b64decode from Crypto.Hash import SHA256 -from Crypto.Signature import PKCS1_v1_5 as PKCSSign +from Crypto.PublicKey import RSA +from Crypto.Signature import PKCS1_v1_5 from lxml import etree +from federation.exceptions import SignatureVerificationError +from federation.utils.diaspora import fetch_public_key +from federation.utils.text import decode_if_bytes NAMESPACE = "http://salmon-protocol.org/ns/magic-env" @@ -11,25 +15,70 @@ NAMESPACE = "http://salmon-protocol.org/ns/magic-env" class MagicEnvelope: """Diaspora protocol magic envelope. - See: http://diaspora.github.io/diaspora_federation/federation/magicsig.html + Can be used to construct and deconstruct MagicEnvelope documents. + + When constructing, the following parameters should be given: + * message + * private_key + * author_handle + + When deconstructing, the following should be given: + * payload + * public_key (optional, will be fetched if not given, using either 'sender_key_fetcher' or remote server) + + Upstream specification: http://diaspora.github.io/diaspora_federation/federation/magicsig.html """ nsmap = { "me": NAMESPACE, } - def __init__(self, message, private_key, author_handle, wrap_payload=False): + def __init__(self, message=None, private_key=None, author_handle=None, wrap_payload=False, payload=None, + public_key=None, sender_key_fetcher=None, verify=False, doc=None): """ - Args: - wrap_payload (bool) - Whether to wrap the message in <XML><post></post></XML>. - This is part of the legacy Diaspora protocol which will be removed in the future. (default False) + All parameters are optional. Some are required for signing, some for opening. + + :param message: Message string. Required to create a MagicEnvelope document. + :param private_key: Private key RSA object. + :param author_handle: Author signing the Magic Envelope, owns the private key. + :param wrap_payload: - Boolean, whether to wrap the message in <XML><post></post></XML>. + This is part of the legacy Diaspora protocol which will be removed in the future. (default False) + :param payload: Magic Envelope payload as str or bytes. + :param public_key: Author public key in str format. + :param sender_key_fetcher: Function to use to fetch sender public key, if public key not given. Will fall back + to network fetch of the profile and the key. Function must take handle as only parameter and return + a public key string. + :param verify: Verify after creating object, defaults to False. + :param doc: MagicEnvelope document. """ - self.message = message + self._message = message self.private_key = private_key self.author_handle = author_handle self.wrap_payload = wrap_payload - self.doc = None - self.payload = None + self.payload = payload + self.public_key = public_key + self.sender_key_fetcher = sender_key_fetcher + if payload: + self.extract_payload() + elif doc is not None: + self.doc = doc + else: + self.doc = None + if verify: + self.verify() + + def extract_payload(self): + payload = decode_if_bytes(self.payload) + payload = payload.lstrip().encode("utf-8") + self.doc = etree.fromstring(payload) + self.author_handle = self.get_sender(self.doc) + self.message = self.message_from_doc() + + def fetch_public_key(self): + if self.sender_key_fetcher: + self.public_key = self.sender_key_fetcher(self.author_handle) + return + self.public_key = fetch_public_key(self.author_handle) @staticmethod def get_sender(doc): @@ -41,6 +90,19 @@ class MagicEnvelope: key_id = doc.find(".//{%s}sig" % NAMESPACE).get("key_id") return urlsafe_b64decode(key_id).decode("utf-8") + @property + def message(self): + return self._message + + @message.setter + def message(self, value): + self._message = value + + def message_from_doc(self): + message = self.doc.find( + ".//{http://salmon-protocol.org/ns/magic-env}data").text + return urlsafe_b64decode(message.encode("ascii")) + def create_payload(self): """Create the payload doc. @@ -65,7 +127,7 @@ class MagicEnvelope: b64encode(b"base64url").decode("ascii") + "." + \ b64encode(b"RSA-SHA256").decode("ascii") sig_hash = SHA256.new(sig_contents.encode("ascii")) - cipher = PKCSSign.new(self.private_key) + cipher = PKCS1_v1_5.new(self.private_key) sig = urlsafe_b64encode(cipher.sign(sig_hash)) key_id = urlsafe_b64encode(bytes(self.author_handle, encoding="utf-8")) return sig, key_id @@ -84,3 +146,20 @@ class MagicEnvelope: if self.doc is None: self.build() return etree.tostring(self.doc, encoding="unicode") + + def verify(self): + """Verify Magic Envelope document against public key.""" + if not self.public_key: + self.fetch_public_key() + data = self.doc.find(".//{http://salmon-protocol.org/ns/magic-env}data").text + sig = self.doc.find(".//{http://salmon-protocol.org/ns/magic-env}sig").text + sig_contents = '.'.join([ + data, + b64encode(b"application/xml").decode("ascii"), + b64encode(b"base64url").decode("ascii"), + b64encode(b"RSA-SHA256").decode("ascii") + ]) + sig_hash = SHA256.new(sig_contents.encode("ascii")) + cipher = PKCS1_v1_5.new(RSA.importKey(self.public_key)) + if not cipher.verify(sig_hash, urlsafe_b64decode(sig)): + raise SignatureVerificationError("Signature cannot be verified using the given public key") diff --git a/federation/protocols/diaspora/protocol.py b/federation/protocols/diaspora/protocol.py index 4e094bc..100cbdc 100644 --- a/federation/protocols/diaspora/protocol.py +++ b/federation/protocols/diaspora/protocol.py @@ -5,15 +5,15 @@ from urllib.parse import unquote_plus from Crypto.Cipher import AES, PKCS1_v1_5 from Crypto.Hash import SHA256 -from Crypto.PublicKey import RSA from Crypto.Random import get_random_bytes from Crypto.Signature import PKCS1_v1_5 as PKCSSign from lxml import etree -from federation.exceptions import EncryptedMessageError, NoSenderKeyFoundError, SignatureVerificationError +from federation.exceptions import EncryptedMessageError, NoSenderKeyFoundError from federation.protocols.base import BaseProtocol from federation.protocols.diaspora.encrypted import EncryptedPayload from federation.protocols.diaspora.magic_envelope import MagicEnvelope +from federation.utils.diaspora import fetch_public_key from federation.utils.text import decode_if_bytes, encode_if_text logger = logging.getLogger("federation") @@ -174,23 +174,13 @@ class Protocol(BaseProtocol): Verify the signed XML elements to have confidence that the claimed author did actually generate this message. """ - sender_key = self.get_contact_key(self.sender_handle) + if self.get_contact_key: + sender_key = self.get_contact_key(self.sender_handle) + else: + sender_key = fetch_public_key(self.sender_handle) if not sender_key: raise NoSenderKeyFoundError("Could not find a sender contact to retrieve key") - body = self.doc.find( - ".//{http://salmon-protocol.org/ns/magic-env}data").text - sig = self.doc.find( - ".//{http://salmon-protocol.org/ns/magic-env}sig").text - sig_contents = '.'.join([ - body, - b64encode(b"application/xml").decode("ascii"), - b64encode(b"base64url").decode("ascii"), - b64encode(b"RSA-SHA256").decode("ascii") - ]) - sig_hash = SHA256.new(sig_contents.encode("ascii")) - cipher = PKCSSign.new(RSA.importKey(sender_key)) - if not cipher.verify(sig_hash, urlsafe_b64decode(sig)): - raise SignatureVerificationError("Signature cannot be verified using the given contact key") + MagicEnvelope(doc=self.doc, public_key=sender_key, verify=True) def parse_header(self, b64data, key): """ diff --git a/federation/utils/diaspora.py b/federation/utils/diaspora.py index f127699..b5efebb 100644 --- a/federation/utils/diaspora.py +++ b/federation/utils/diaspora.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- import logging import xml from urllib.parse import quote @@ -7,11 +6,22 @@ from lxml import html from xrd import XRD from federation.entities.base import Profile +from federation.inbound import handle_receive from federation.utils.network import fetch_document logger = logging.getLogger("federation") +def fetch_public_key(handle): + """Fetch public key over the network. + + :param handle: Remote handle to retrieve public key for. + :return: Public key in str format from parsed profile. + """ + profile = retrieve_and_parse_profile(handle) + return profile.public_key + + def retrieve_diaspora_hcard(handle): """ Retrieve a remote Diaspora hCard document. @@ -98,6 +108,22 @@ def _get_element_attr_or_none(document, selector, attribute): return None +def parse_diaspora_uri(uri): + """Parse Diaspora URI scheme string. + + See: https://diaspora.github.io/diaspora_federation/federation/diaspora_scheme.html + + :return: tuple of (handle, entity_type, guid) or ``None``. + """ + if not uri.startswith("diaspora://"): + return + try: + handle, entity_type, guid = uri.replace("diaspora://", "").rsplit("/", maxsplit=2) + except ValueError: + return + return handle, entity_type, guid + + def parse_profile_from_hcard(hcard, handle): """ Parse all the fields we can from a hCard document to get a Profile. @@ -122,6 +148,38 @@ def parse_profile_from_hcard(hcard, handle): return profile +def retrieve_and_parse_content(id, sender_key_fetcher=None): + """Retrieve remote content and return an Entity class instance. + + This is basically the inverse of receiving an entity. Instead, we fetch it, then call 'handle_receive'. + + :param id: Diaspora URI scheme format ID. + :param sender_key_fetcher: Function to use to fetch sender public key. If not given, network will be used + to fetch the profile and the key. Function must take handle as only parameter and return a public key. + :returns: Entity object instance or ``None`` + """ + handle, entity_type, guid = parse_diaspora_uri(id) + _username, domain = handle.split("@") + url = get_fetch_content_endpoint(domain, entity_type, guid) + document, status_code, error = fetch_document(url) + if status_code == 200: + _sender, _protocol, entities = handle_receive(document, sender_key_fetcher=sender_key_fetcher) + if len(entities) > 1: + logger.warning("retrieve_and_parse_content - more than one entity parsed from remote even though we" + "expected only one! ID %s", id) + if entities: + return entities[0] + return + elif status_code == 404: + logger.warning("retrieve_and_parse_content - remote content %s not found", id) + return + if error: + raise error + raise Exception("retrieve_and_parse_content - unknown problem when fetching document: %s, %s, %s" % ( + document, status_code, error, + )) + + def retrieve_and_parse_profile(handle): """ Retrieve the remote user and return a Profile object. @@ -142,5 +200,14 @@ def retrieve_and_parse_profile(handle): return profile +def get_fetch_content_endpoint(domain, entity_type, guid): + """Get remote fetch content endpoint. + + See: https://diaspora.github.io/diaspora_federation/federation/fetching.html + """ + return "https://%s/fetch/%s/%s" % (domain, entity_type, guid) + + def get_public_endpoint(domain): + """Get remote endpoint for delivering public payloads.""" return "https://%s/receive/public" % domain
Method to fetch remote content For example posts, reshares and comments. Do a fetch against the correct protocol endpoint and return a validated entity. Related to jaywink/socialhome#206
jaywink/federation
diff --git a/federation/tests/conftest.py b/federation/tests/conftest.py index e2bd947..3bdf54d 100644 --- a/federation/tests/conftest.py +++ b/federation/tests/conftest.py @@ -2,7 +2,8 @@ from unittest.mock import Mock import pytest -from federation.entities.diaspora.entities import DiasporaPost +# noinspection PyUnresolvedReferences +from federation.tests.fixtures.diaspora import * from federation.tests.fixtures.keys import get_dummy_private_key @@ -23,10 +24,10 @@ def disable_network_calls(monkeypatch): @pytest.fixture -def diasporapost(): - return DiasporaPost() +def private_key(): + return get_dummy_private_key() @pytest.fixture -def private_key(): - return get_dummy_private_key() +def public_key(private_key): + return private_key.publickey().exportKey() diff --git a/federation/tests/entities/diaspora/test_entities.py b/federation/tests/entities/diaspora/test_entities.py index dea34c1..0d4e3d1 100644 --- a/federation/tests/entities/diaspora/test_entities.py +++ b/federation/tests/entities/diaspora/test_entities.py @@ -110,6 +110,42 @@ class TestEntitiesConvertToXML: assert etree.tostring(result).decode("utf-8") == converted +class TestEntityAttributes: + def test_comment_ids(self, diasporacomment): + assert diasporacomment.id == "diaspora://handle/comment/guid" + assert not diasporacomment.target_id + + def test_contact_ids(self, diasporacontact): + assert not diasporacontact.id + assert not diasporacontact.target_id + + def test_like_ids(self, diasporalike): + assert diasporalike.id == "diaspora://handle/like/guid" + assert not diasporalike.target_id + + def test_post_ids(self, diasporapost): + assert diasporapost.id == "diaspora://handle/status_message/guid" + assert not diasporapost.target_id + + def test_profile_ids(self, diasporaprofile): + assert diasporaprofile.id == "diaspora://[email protected]/profile/" + assert not diasporaprofile.target_id + + def test_request_ids(self, diasporarequest): + assert not diasporarequest.id + assert not diasporarequest.target_id + + def test_reshare_ids(self, diasporareshare): + assert diasporareshare.id == "diaspora://%s/reshare/%s" % (diasporareshare.handle, diasporareshare.guid) + assert diasporareshare.target_id == "diaspora://%s/status_message/%s" % ( + diasporareshare.target_handle, diasporareshare.target_guid + ) + + def test_retraction_ids(self, diasporaretraction): + assert not diasporaretraction.id + assert not diasporaretraction.target_id + + class TestDiasporaProfileFillExtraAttributes: def test_raises_if_no_handle(self): attrs = {"foo": "bar"} diff --git a/federation/tests/fixtures/diaspora.py b/federation/tests/fixtures/diaspora.py new file mode 100644 index 0000000..881a158 --- /dev/null +++ b/federation/tests/fixtures/diaspora.py @@ -0,0 +1,68 @@ +import pytest + +from federation.entities.diaspora.entities import ( + DiasporaPost, DiasporaComment, DiasporaLike, DiasporaRequest, DiasporaProfile, DiasporaRetraction, + DiasporaContact, DiasporaReshare, +) +from federation.tests.factories.entities import ShareFactory +from federation.tests.fixtures.payloads import DIASPORA_PUBLIC_PAYLOAD + +__all__ = ("diasporacomment", "diasporacontact", "diasporalike", "diasporapost", "diasporaprofile", + "diasporareshare", "diasporarequest", "diasporaretraction", "diaspora_public_payload") + + [email protected] +def diaspora_public_payload(): + return DIASPORA_PUBLIC_PAYLOAD + + [email protected] +def diasporacomment(): + return DiasporaComment( + raw_content="raw_content", guid="guid", target_guid="target_guid", handle="handle", + signature="signature" + ) + + [email protected] +def diasporacontact(): + return DiasporaContact(handle="[email protected]", target_handle="[email protected]", following=True) + + [email protected] +def diasporalike(): + return DiasporaLike(guid="guid", target_guid="target_guid", handle="handle", signature="signature") + + [email protected] +def diasporapost(): + return DiasporaPost( + raw_content="raw_content", guid="guid", handle="handle", public=True, + provider_display_name="Socialhome" + ) + + [email protected] +def diasporaprofile(): + return DiasporaProfile( + handle="[email protected]", raw_content="foobar", name="Bob Bobertson", public=True, + tag_list=["socialfederation", "federation"], image_urls={ + "large": "urllarge", "medium": "urlmedium", "small": "urlsmall" + } + ) + + [email protected] +def diasporareshare(): + base_entity = ShareFactory() + return DiasporaReshare.from_base(base_entity) + + [email protected] +def diasporarequest(): + return DiasporaRequest(handle="[email protected]", target_handle="[email protected]", relationship="following") + + [email protected] +def diasporaretraction(): + return DiasporaRetraction(handle="[email protected]", target_guid="x" * 16, entity_type="Post") diff --git a/federation/tests/fixtures/keys.py b/federation/tests/fixtures/keys.py index ad699b6..19291f8 100644 --- a/federation/tests/fixtures/keys.py +++ b/federation/tests/fixtures/keys.py @@ -1,6 +1,5 @@ from Crypto.PublicKey import RSA - PRIVATE_KEY = "-----BEGIN RSA PRIVATE KEY-----\n" \ "MIIEogIBAAKCAQEAiY2JBgMV90ULt0btku198l6wGuzn3xCcHs+eBZHL2C+XWRA3\n" \ "BVDThSBj19dKXehfDphQ5u/Omfm76ImajEPHGBiYtZT7AgcO15zvm+JCpbREbdOV\n" \ @@ -29,6 +28,44 @@ PRIVATE_KEY = "-----BEGIN RSA PRIVATE KEY-----\n" \ "w6Y5FnjFw022w+M3exyH6ZtxcmG6buDbp2F/SPD/FnYy5IFCDig=\n" \ "-----END RSA PRIVATE KEY-----" +# Not related to above private key +PUBKEY = "-----BEGIN PUBLIC KEY-----\nMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAuCfU1G5X+3O6vPdSz6QY\nSFbgdbv3KPv" \ + "xHi8tRmlyOLdLt5i1eqsy2WCW1iYNijiCL7OfbrvymBQxe3GA9S64\nVuavwzQ8nO7nzpNMqxY5tBXsBM1lECCHDOvm5dzINXWT9Sg7P1" \ + "8iIxE/2wQEgMUL\nAeVbJtAriXM4zydL7c91agFMJu1aHp0lxzoH8I13xzUetGMutR1tbcfWvoQvPAoU\n89uAz5j/DFMhWrkVEKGeWt1" \ + "YtHMmJqpYqR6961GDlwRuUsOBsLgLLVohzlBsTBSn\n3580o2E6G3DEaX0Az9WB9ylhNeV/L/PP3c5htpEyoPZSy1pgtut6TRYQwC8wns" \ + "qO\nbVIbFBkrKoaRDyVCnpMuKdDNLZqOOfhzas+SWRAby6D8VsXpPi/DpeS9XkX0o/uH\nJ9N49GuYMSUGC8gKtaddD13pUqS/9rpSvLD" \ + "rrDQe5Lhuyusgd28wgEAPCTmM3pEt\nQnlxEeEmFMIn3OBLbEDw5TFE7iED0z7a4dAkqqz8KCGEt12e1Kz7ujuOVMxJxzk6\nNtwt40Sq" \ + "EOPcdsGHAA+hqzJnXUihXfmtmFkropaCxM2f+Ha0bOQdDDui5crcV3sX\njShmcqN6YqFzmoPK0XM9P1qC+lfL2Mz6bHC5p9M8/FtcM46" \ + "hCj1TF/tl8zaZxtHP\nOrMuFJy4j4yAsyVy3ddO69ECAwEAAQ==\n-----END PUBLIC KEY-----\n" + +SIGNATURE = "A/vVRxM3V1ceEH1JrnPOaIZGM3gMjw/fnT9TgUh3poI4q9eH95AIoig+3eTA8XFuGvuo0tivxci4e0NJ1VLVkl/aqp8rvBNrRI1RQk" \ + "n2WVF6zk15Gq6KSia/wyzyiJHGxNGM8oFY4qPfNp6K+8ydUti22J11tVBEvQn+7FPAoloF2Xz1waK48ZZCFs8Rxzj+4jlz1PmuXCnT" \ + "j7v7GYS1Rb6sdFz4nBSuVk5X8tGOSXIRYxPgmtsDRMRrvDeEK+v3OY6VnT8dLTckS0qCwTRUULub1CGwkz/2mReZk/M1W4EbUnugF5" \ + "ptslmFqYDYJZM8PA/g89EKVpkx2gaFbsC4KXocWnxHNiue18rrFQ5hMnDuDRiRybLnQkxXbE/HDuLdnognt2S5wRshPoZmhe95v3qq" \ + "/5nH/GX1D7VmxEEIG9fX+XX+Vh9kzO9bLbwoJZwm50zXxCvrLlye/2JU5Vd2Hbm4aMuAyRAZiLS/EQcBlsts4DaFu4txe60HbXSh6n" \ + "qNofGkusuzZnCd0VObOpXizrI8xNQzZpjJEB5QqE2gbCC2YZNdOS0eBGXw42dAXa/QV3jZXGES7DdQlqPqqT3YjcMFLiRrWQR8cl4h" \ + "JIBRpV5piGyLmMMKYrWu7hQSrdRAEL3K6mNZZU6/yoG879LjtQbVwaFGPeT29B4zBE97FIo=" + +SIGNATURE2 = "Xla/AlirMihx72hehGMgpKILRUA2ZkEhFgVc65sl80iN+F62yQdSikGyUQVL+LaGNUgmzgK0zEahamfaMFep/9HE2FWuXlTCM+ZXx" \ + "OhGWUnjkGW9vi41/Turm7ALzaJoFm1f3Iv4nh1sRD1jySzlZvYwrq4LwmgZ8r0M+Q6xUSIIJfgS8Zjmp43strKo28vKT+DmUKu9Fg" \ + "jZWjW3S8WPPJFO0UqA0b1UQspmNLZOVxsNpa0OCM1pofJvT09n6xG+byV30Bed27Kw+D3fzfYq5xvohyeCyliTq8LHnOykecki3Y2" \ + "Pvl1qsxxBehlwc/WH8yIUiwC2Du6zY61tN3LGgMAoIFl40Roo1z/I7YfOy4ZCukOGqqyiLdjoXxIVQqqsPtKsrVXS+A9OQ+sVESgw" \ + "f8jeEIw/KXLVB/aEyrZJXQR1pBfqkOTCSnAfZVBSjJyxhanS/8iGmnRV5zz3auYMLR9aA8QHjV/VZOj0Bxhuba9VIzJlY9XoUt5Vs" \ + "h3uILJM3uVJzSjlZV+Jw3O+NdQFnZyh7m1+eJUMQJ8i0Sr3sMLsdb9me/I0HueXCa5eBHAoTtAyQgS4uN4NMhvpqrB/lQCx7pqnkt" \ + "xiCO/bUEZONQjWrvJT+EfD+I0UMFtPFiGDzJ0yi0Ah7LxSTGEGPFZHH5RgsJA8lJwGMCUtc9Cpy8A=" + +SIGNATURE3 = "hVdLwsWXe6yVy88m9H1903+Bj/DjSGsYL+ZIpEz+G6u/aVx6QfsvnWHzasjqN8SU+brHfL0c8KrapWcACO+jyCuXlHMZb9zKmJkHR" \ + "FSOiprCJ3tqNpv/4MIa9CXu0YDqnLHBSyxS01luKw3EqgpWPQdYcqDpOkjjTOq45dQC0PGHA/DXjP7LBptV9AwW200LIcL5Li8tDU" \ + "a8VSQybspDDfDpXU3+Xl5tJIBVS4ercPczp5B39Cwne4q2gyj/Y5RdIoX5RMqmFhfucw1he38T1oRC9AHTJqj4CBcDt7gc6jPHuzk" \ + "N7u1eUf0IK3+KTDKsCkkoHcGaoxT+NeWcS8Ki1A==" + +XML = "<comment><guid>0dd40d800db1013514416c626dd55703</guid><parent_guid>69ab2b83-aa69-4456-ad0a-dd669" \ + "7f54714</parent_guid><text>Woop Woop</text><diaspora_handle>[email protected]</diaspora_handle></comment>" + +XML2 = "<comment><guid>d728fe501584013514526c626dd55703</guid><parent_guid>d641bd35-8142-414e-a12d-f956cc2c1bb9" \ + "</parent_guid><text>What about the mystical problem with &#x1F44D; (pt2 with more logging)</text>" \ + "<diaspora_handle>[email protected]</diaspora_handle></comment>" + def get_dummy_private_key(): return RSA.importKey(PRIVATE_KEY) diff --git a/federation/tests/protocols/diaspora/test_magic_envelope.py b/federation/tests/protocols/diaspora/test_magic_envelope.py index 952ec2e..8d44ba9 100644 --- a/federation/tests/protocols/diaspora/test_magic_envelope.py +++ b/federation/tests/protocols/diaspora/test_magic_envelope.py @@ -1,21 +1,16 @@ -from lxml import etree +from unittest.mock import patch, Mock -from Crypto import Random -from Crypto.PublicKey import RSA +import pytest +from lxml import etree from lxml.etree import _Element +from federation.exceptions import SignatureVerificationError from federation.protocols.diaspora.magic_envelope import MagicEnvelope -from federation.tests.fixtures.keys import get_dummy_private_key +from federation.tests.fixtures.keys import get_dummy_private_key, PUBKEY from federation.tests.fixtures.payloads import DIASPORA_PUBLIC_PAYLOAD -class TestMagicEnvelope(): - @staticmethod - def generate_rsa_private_key(): - """Generate a new RSA private key.""" - rand = Random.new().read - return RSA.generate(2048, rand) - +class TestMagicEnvelope: def test_build(self): env = MagicEnvelope( message="<status_message><foo>bar</foo></status_message>", @@ -40,11 +35,74 @@ class TestMagicEnvelope(): env = MagicEnvelope( message="<status_message><foo>bar</foo></status_message>", private_key="key", - author_handle="[email protected]" + author_handle="[email protected]", ) payload = env.create_payload() assert payload == "PHN0YXR1c19tZXNzYWdlPjxmb28-YmFyPC9mb28-PC9zdGF0dXNfbWVzc2FnZT4=" + def test_extract_payload(self, diaspora_public_payload): + env = MagicEnvelope() + env.payload = diaspora_public_payload + assert not env.doc + assert not env.author_handle + assert not env.message + env.extract_payload() + assert isinstance(env.doc, _Element) + assert env.author_handle == "[email protected]" + assert env.message == b"<status_message><foo>bar</foo></status_message>" + + @patch("federation.protocols.diaspora.magic_envelope.fetch_public_key", autospec=True) + def test_fetch_public_key__calls_sender_key_fetcher(self, mock_fetch): + mock_fetcher = Mock(return_value="public key") + env = MagicEnvelope(author_handle="spam@eggs", sender_key_fetcher=mock_fetcher) + env.fetch_public_key() + mock_fetcher.assert_called_once_with("spam@eggs") + assert not mock_fetch.called + + @patch("federation.protocols.diaspora.magic_envelope.fetch_public_key", autospec=True) + def test_fetch_public_key__calls_fetch_public_key(self, mock_fetch): + env = MagicEnvelope(author_handle="spam@eggs") + env.fetch_public_key() + mock_fetch.assert_called_once_with("spam@eggs") + + def test_message_from_doc(self, diaspora_public_payload): + env = MagicEnvelope(payload=diaspora_public_payload) + assert env.message_from_doc() == env.message + + def test_payload_extracted_on_init(self, diaspora_public_payload): + env = MagicEnvelope(payload=diaspora_public_payload) + assert isinstance(env.doc, _Element) + assert env.author_handle == "[email protected]" + assert env.message == b"<status_message><foo>bar</foo></status_message>" + + def test_verify(self, private_key, public_key): + me = MagicEnvelope( + message="<status_message><foo>bar</foo></status_message>", + private_key=private_key, + author_handle="[email protected]" + ) + me.build() + output = me.render() + + MagicEnvelope(payload=output, public_key=public_key, verify=True) + + with pytest.raises(SignatureVerificationError): + MagicEnvelope(payload=output, public_key=PUBKEY, verify=True) + + def test_verify__calls_fetch_public_key(self, diaspora_public_payload): + me = MagicEnvelope(payload=diaspora_public_payload) + with pytest.raises(TypeError): + with patch.object(me, "fetch_public_key") as mock_fetch: + me.verify() + mock_fetch.assert_called_once_with() + + @patch("federation.protocols.diaspora.magic_envelope.MagicEnvelope.verify") + def test_verify_on_init(self, mock_verify, diaspora_public_payload): + MagicEnvelope(payload=diaspora_public_payload) + assert not mock_verify.called + MagicEnvelope(payload=diaspora_public_payload, verify=True) + assert mock_verify.called + def test_build_signature(self): env = MagicEnvelope( message="<status_message><foo>bar</foo></status_message>", diff --git a/federation/tests/protocols/diaspora/test_protocol.py b/federation/tests/protocols/diaspora/test_protocol.py index 66df93b..199d8b7 100644 --- a/federation/tests/protocols/diaspora/test_protocol.py +++ b/federation/tests/protocols/diaspora/test_protocol.py @@ -5,16 +5,16 @@ from xml.etree.ElementTree import ElementTree from lxml import etree import pytest -from federation.exceptions import EncryptedMessageError, NoSenderKeyFoundError +from federation.exceptions import EncryptedMessageError, NoSenderKeyFoundError, SignatureVerificationError from federation.protocols.diaspora.protocol import Protocol, identify_payload +from federation.tests.fixtures.keys import PUBKEY from federation.tests.fixtures.payloads import ( - ENCRYPTED_LEGACY_DIASPORA_PAYLOAD, UNENCRYPTED_LEGACY_DIASPORA_PAYLOAD, - DIASPORA_PUBLIC_PAYLOAD, + ENCRYPTED_LEGACY_DIASPORA_PAYLOAD, UNENCRYPTED_LEGACY_DIASPORA_PAYLOAD, DIASPORA_PUBLIC_PAYLOAD, DIASPORA_ENCRYPTED_PAYLOAD, ) -class MockUser(): +class MockUser: private_key = "foobar" def __init__(self, nokey=False): @@ -30,7 +30,7 @@ def mock_not_found_get_contact_key(contact): return None -class DiasporaTestBase(): +class DiasporaTestBase: def init_protocol(self): return Protocol() @@ -99,11 +99,37 @@ class TestDiasporaProtocol(DiasporaTestBase): with pytest.raises(EncryptedMessageError): protocol.receive(ENCRYPTED_LEGACY_DIASPORA_PAYLOAD, user) - def test_receive_raises_if_sender_key_cannot_be_found(self): + @patch("federation.protocols.diaspora.protocol.fetch_public_key", autospec=True) + def test_receive_raises_if_sender_key_cannot_be_found(self, mock_fetch): protocol = self.init_protocol() user = self.get_mock_user() with pytest.raises(NoSenderKeyFoundError): protocol.receive(UNENCRYPTED_LEGACY_DIASPORA_PAYLOAD, user, mock_not_found_get_contact_key) + assert not mock_fetch.called + + @patch("federation.protocols.diaspora.protocol.fetch_public_key", autospec=True, return_value=None) + def test_receive_calls_fetch_public_key_if_key_fetcher_not_given(self, mock_fetch): + protocol = self.init_protocol() + user = self.get_mock_user() + with pytest.raises(NoSenderKeyFoundError): + protocol.receive(UNENCRYPTED_LEGACY_DIASPORA_PAYLOAD, user) + mock_fetch.assert_called_once_with("[email protected]") + + @patch("federation.protocols.diaspora.protocol.MagicEnvelope", autospec=True) + @patch("federation.protocols.diaspora.protocol.fetch_public_key", autospec=True, return_value="key") + def test_receive_creates_and_verifies_magic_envelope_instance(self, mock_fetch, mock_env): + protocol = self.init_protocol() + user = self.get_mock_user() + protocol.receive(UNENCRYPTED_LEGACY_DIASPORA_PAYLOAD, user) + mock_env.assert_called_once_with(doc=protocol.doc, public_key="key", verify=True) + + @patch("federation.protocols.diaspora.protocol.fetch_public_key", autospec=True) + def test_receive_raises_on_signature_verification_failure(self, mock_fetch): + mock_fetch.return_value = PUBKEY + protocol = self.init_protocol() + user = self.get_mock_user() + with pytest.raises(SignatureVerificationError): + protocol.receive(DIASPORA_PUBLIC_PAYLOAD, user) def test_get_message_content(self): protocol = self.init_protocol() diff --git a/federation/tests/protocols/diaspora/test_signatures.py b/federation/tests/protocols/diaspora/test_signatures.py index 062e3c8..a6b1b56 100644 --- a/federation/tests/protocols/diaspora/test_signatures.py +++ b/federation/tests/protocols/diaspora/test_signatures.py @@ -1,44 +1,7 @@ from lxml import etree -from federation.protocols.diaspora.signatures import verify_relayable_signature, create_relayable_signature -from federation.tests.fixtures.keys import get_dummy_private_key - -XML = "<comment><guid>0dd40d800db1013514416c626dd55703</guid><parent_guid>69ab2b83-aa69-4456-ad0a-dd669" \ - "7f54714</parent_guid><text>Woop Woop</text><diaspora_handle>[email protected]</diaspora_handle></comment>" - -XML2 = "<comment><guid>d728fe501584013514526c626dd55703</guid><parent_guid>d641bd35-8142-414e-a12d-f956cc2c1bb9" \ - "</parent_guid><text>What about the mystical problem with &#x1F44D; (pt2 with more logging)</text>" \ - "<diaspora_handle>[email protected]</diaspora_handle></comment>" - -SIGNATURE = "A/vVRxM3V1ceEH1JrnPOaIZGM3gMjw/fnT9TgUh3poI4q9eH95AIoig+3eTA8XFuGvuo0tivxci4e0NJ1VLVkl/aqp8rvBNrRI1RQk" \ - "n2WVF6zk15Gq6KSia/wyzyiJHGxNGM8oFY4qPfNp6K+8ydUti22J11tVBEvQn+7FPAoloF2Xz1waK48ZZCFs8Rxzj+4jlz1PmuXCnT" \ - "j7v7GYS1Rb6sdFz4nBSuVk5X8tGOSXIRYxPgmtsDRMRrvDeEK+v3OY6VnT8dLTckS0qCwTRUULub1CGwkz/2mReZk/M1W4EbUnugF5" \ - "ptslmFqYDYJZM8PA/g89EKVpkx2gaFbsC4KXocWnxHNiue18rrFQ5hMnDuDRiRybLnQkxXbE/HDuLdnognt2S5wRshPoZmhe95v3qq" \ - "/5nH/GX1D7VmxEEIG9fX+XX+Vh9kzO9bLbwoJZwm50zXxCvrLlye/2JU5Vd2Hbm4aMuAyRAZiLS/EQcBlsts4DaFu4txe60HbXSh6n" \ - "qNofGkusuzZnCd0VObOpXizrI8xNQzZpjJEB5QqE2gbCC2YZNdOS0eBGXw42dAXa/QV3jZXGES7DdQlqPqqT3YjcMFLiRrWQR8cl4h" \ - "JIBRpV5piGyLmMMKYrWu7hQSrdRAEL3K6mNZZU6/yoG879LjtQbVwaFGPeT29B4zBE97FIo=" - -SIGNATURE2 = "Xla/AlirMihx72hehGMgpKILRUA2ZkEhFgVc65sl80iN+F62yQdSikGyUQVL+LaGNUgmzgK0zEahamfaMFep/9HE2FWuXlTCM+ZXx" \ - "OhGWUnjkGW9vi41/Turm7ALzaJoFm1f3Iv4nh1sRD1jySzlZvYwrq4LwmgZ8r0M+Q6xUSIIJfgS8Zjmp43strKo28vKT+DmUKu9Fg" \ - "jZWjW3S8WPPJFO0UqA0b1UQspmNLZOVxsNpa0OCM1pofJvT09n6xG+byV30Bed27Kw+D3fzfYq5xvohyeCyliTq8LHnOykecki3Y2" \ - "Pvl1qsxxBehlwc/WH8yIUiwC2Du6zY61tN3LGgMAoIFl40Roo1z/I7YfOy4ZCukOGqqyiLdjoXxIVQqqsPtKsrVXS+A9OQ+sVESgw" \ - "f8jeEIw/KXLVB/aEyrZJXQR1pBfqkOTCSnAfZVBSjJyxhanS/8iGmnRV5zz3auYMLR9aA8QHjV/VZOj0Bxhuba9VIzJlY9XoUt5Vs" \ - "h3uILJM3uVJzSjlZV+Jw3O+NdQFnZyh7m1+eJUMQJ8i0Sr3sMLsdb9me/I0HueXCa5eBHAoTtAyQgS4uN4NMhvpqrB/lQCx7pqnkt" \ - "xiCO/bUEZONQjWrvJT+EfD+I0UMFtPFiGDzJ0yi0Ah7LxSTGEGPFZHH5RgsJA8lJwGMCUtc9Cpy8A=" - -SIGNATURE3 = "hVdLwsWXe6yVy88m9H1903+Bj/DjSGsYL+ZIpEz+G6u/aVx6QfsvnWHzasjqN8SU+brHfL0c8KrapWcACO+jyCuXlHMZb9zKmJkHR" \ - "FSOiprCJ3tqNpv/4MIa9CXu0YDqnLHBSyxS01luKw3EqgpWPQdYcqDpOkjjTOq45dQC0PGHA/DXjP7LBptV9AwW200LIcL5Li8tDU" \ - "a8VSQybspDDfDpXU3+Xl5tJIBVS4ercPczp5B39Cwne4q2gyj/Y5RdIoX5RMqmFhfucw1he38T1oRC9AHTJqj4CBcDt7gc6jPHuzk" \ - "N7u1eUf0IK3+KTDKsCkkoHcGaoxT+NeWcS8Ki1A==" - -PUBKEY = "-----BEGIN PUBLIC KEY-----\nMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAuCfU1G5X+3O6vPdSz6QY\nSFbgdbv3KPv" \ - "xHi8tRmlyOLdLt5i1eqsy2WCW1iYNijiCL7OfbrvymBQxe3GA9S64\nVuavwzQ8nO7nzpNMqxY5tBXsBM1lECCHDOvm5dzINXWT9Sg7P1" \ - "8iIxE/2wQEgMUL\nAeVbJtAriXM4zydL7c91agFMJu1aHp0lxzoH8I13xzUetGMutR1tbcfWvoQvPAoU\n89uAz5j/DFMhWrkVEKGeWt1" \ - "YtHMmJqpYqR6961GDlwRuUsOBsLgLLVohzlBsTBSn\n3580o2E6G3DEaX0Az9WB9ylhNeV/L/PP3c5htpEyoPZSy1pgtut6TRYQwC8wns" \ - "qO\nbVIbFBkrKoaRDyVCnpMuKdDNLZqOOfhzas+SWRAby6D8VsXpPi/DpeS9XkX0o/uH\nJ9N49GuYMSUGC8gKtaddD13pUqS/9rpSvLD" \ - "rrDQe5Lhuyusgd28wgEAPCTmM3pEt\nQnlxEeEmFMIn3OBLbEDw5TFE7iED0z7a4dAkqqz8KCGEt12e1Kz7ujuOVMxJxzk6\nNtwt40Sq" \ - "EOPcdsGHAA+hqzJnXUihXfmtmFkropaCxM2f+Ha0bOQdDDui5crcV3sX\njShmcqN6YqFzmoPK0XM9P1qC+lfL2Mz6bHC5p9M8/FtcM46" \ - "hCj1TF/tl8zaZxtHP\nOrMuFJy4j4yAsyVy3ddO69ECAwEAAQ==\n-----END PUBLIC KEY-----\n" +from federation.protocols.diaspora.signatures import create_relayable_signature, verify_relayable_signature +from federation.tests.fixtures.keys import PUBKEY, SIGNATURE, SIGNATURE2, SIGNATURE3, XML, XML2, get_dummy_private_key def test_verify_relayable_signature(): diff --git a/federation/tests/test_fetchers.py b/federation/tests/test_fetchers.py index 9a2157a..c886e38 100644 --- a/federation/tests/test_fetchers.py +++ b/federation/tests/test_fetchers.py @@ -1,16 +1,23 @@ -# -*- coding: utf-8 -*- from unittest.mock import patch, Mock -from federation.fetchers import retrieve_remote_profile +from federation.fetchers import retrieve_remote_profile, retrieve_remote_content -class TestRetrieveRemoteProfile(object): +class TestRetrieveRemoteContent: @patch("federation.fetchers.importlib.import_module") - def test_calls_diaspora_retrieve_and_parse_profile(self, mock_import): - class MockRetrieve(Mock): - def retrieve_and_parse_profile(self, handle): - return "called with %s" % handle + def test_calls_diaspora_retrieve_and_parse_content(self, mock_import): + mock_retrieve = Mock() + mock_import.return_value = mock_retrieve + retrieve_remote_content("diaspora://[email protected]/status_message/1234", sender_key_fetcher=sum) + mock_retrieve.retrieve_and_parse_content.assert_called_once_with( + "diaspora://[email protected]/status_message/1234", sender_key_fetcher=sum, + ) + - mock_retrieve = MockRetrieve() +class TestRetrieveRemoteProfile: + @patch("federation.fetchers.importlib.import_module") + def test_calls_diaspora_retrieve_and_parse_profile(self, mock_import): + mock_retrieve = Mock() mock_import.return_value = mock_retrieve - assert retrieve_remote_profile("foo@bar") == "called with foo@bar" + retrieve_remote_profile("foo@bar") + mock_retrieve.retrieve_and_parse_profile.assert_called_once_with("foo@bar") diff --git a/federation/tests/utils/test_diaspora.py b/federation/tests/utils/test_diaspora.py index 30d15ff..4916c14 100644 --- a/federation/tests/utils/test_diaspora.py +++ b/federation/tests/utils/test_diaspora.py @@ -1,17 +1,41 @@ -# -*- coding: utf-8 -*- import xml from unittest.mock import patch, Mock from urllib.parse import quote +import pytest from lxml import html from federation.entities.base import Profile -from federation.hostmeta.generators import DiasporaWebFinger, DiasporaHostMeta, DiasporaHCard, generate_hcard -from federation.utils.diaspora import retrieve_diaspora_hcard, retrieve_diaspora_webfinger, retrieve_diaspora_host_meta, \ - _get_element_text_or_none, _get_element_attr_or_none, parse_profile_from_hcard, retrieve_and_parse_profile +from federation.hostmeta.generators import DiasporaWebFinger, DiasporaHostMeta, generate_hcard +from federation.tests.fixtures.payloads import DIASPORA_PUBLIC_PAYLOAD +from federation.utils.diaspora import ( + retrieve_diaspora_hcard, retrieve_diaspora_webfinger, retrieve_diaspora_host_meta, _get_element_text_or_none, + _get_element_attr_or_none, parse_profile_from_hcard, retrieve_and_parse_profile, retrieve_and_parse_content, + get_fetch_content_endpoint, fetch_public_key, parse_diaspora_uri, +) -class TestRetrieveDiasporaHCard(object): +@patch("federation.utils.diaspora.retrieve_and_parse_profile", autospec=True) +def test_fetch_public_key(mock_retrieve): + mock_retrieve.return_value = Mock(public_key="public key") + result = fetch_public_key("spam@eggs") + mock_retrieve.assert_called_once_with("spam@eggs") + assert result == "public key" + + +def test_get_fetch_content_endpoint(): + assert get_fetch_content_endpoint("example.com", "status_message", "1234") == \ + "https://example.com/fetch/status_message/1234" + + +def test_parse_diaspora_uri(): + assert parse_diaspora_uri("diaspora://[email protected]/spam/eggs") == ("[email protected]", "spam", "eggs") + assert parse_diaspora_uri("diaspora://[email protected]/spam/eggs@spam") == ("[email protected]", "spam", "eggs@spam") + assert not parse_diaspora_uri("https://[email protected]/spam/eggs") + assert not parse_diaspora_uri("spam and eggs") + + +class TestRetrieveDiasporaHCard: @patch("federation.utils.diaspora.retrieve_diaspora_webfinger", return_value=None) def test_retrieve_webfinger_is_called(self, mock_retrieve): retrieve_diaspora_hcard("bob@localhost") @@ -40,7 +64,7 @@ class TestRetrieveDiasporaHCard(object): assert document == None -class TestRetrieveDiasporaWebfinger(object): +class TestRetrieveDiasporaWebfinger: @patch("federation.utils.diaspora.retrieve_diaspora_host_meta", return_value=None) def test_retrieve_host_meta_is_called(self, mock_retrieve): retrieve_diaspora_webfinger("bob@localhost") @@ -82,7 +106,7 @@ class TestRetrieveDiasporaWebfinger(object): assert document == None -class TestRetrieveDiasporaHostMeta(object): +class TestRetrieveDiasporaHostMeta: @patch("federation.utils.diaspora.XRD.parse_xrd") @patch("federation.utils.diaspora.fetch_document") def test_fetch_document_is_called(self, mock_fetch, mock_xrd): @@ -100,7 +124,44 @@ class TestRetrieveDiasporaHostMeta(object): assert document == None -class TestGetElementTextOrNone(object): +class TestRetrieveAndParseContent: + @patch("federation.utils.diaspora.fetch_document", return_value=(None, 404, None)) + @patch("federation.utils.diaspora.get_fetch_content_endpoint", return_value="https://example.com/fetch/spam/eggs") + def test_calls_fetch_document(self, mock_get, mock_fetch): + retrieve_and_parse_content("diaspora://[email protected]/spam/eggs") + mock_fetch.assert_called_once_with("https://example.com/fetch/spam/eggs") + + @patch("federation.utils.diaspora.fetch_document", return_value=(None, 404, None)) + @patch("federation.utils.diaspora.get_fetch_content_endpoint") + def test_calls_get_fetch_content_endpoint(self, mock_get, mock_fetch): + retrieve_and_parse_content("diaspora://[email protected]/spam/eggs") + mock_get.assert_called_once_with("example.com", "spam", "eggs") + mock_get.reset_mock() + retrieve_and_parse_content("diaspora://[email protected]/spam/eggs@spam") + mock_get.assert_called_once_with("example.com", "spam", "eggs@spam") + + @patch("federation.utils.diaspora.fetch_document", return_value=(DIASPORA_PUBLIC_PAYLOAD, 200, None)) + @patch("federation.utils.diaspora.get_fetch_content_endpoint", return_value="https://example.com/fetch/spam/eggs") + @patch("federation.utils.diaspora.handle_receive", return_value=("sender", "protocol", ["entity"])) + def test_calls_handle_receive(self, mock_handle, mock_get, mock_fetch): + entity = retrieve_and_parse_content("diaspora://[email protected]/spam/eggs", sender_key_fetcher=sum) + mock_handle.assert_called_once_with(DIASPORA_PUBLIC_PAYLOAD, sender_key_fetcher=sum) + assert entity == "entity" + + @patch("federation.utils.diaspora.fetch_document", return_value=(None, None, Exception())) + @patch("federation.utils.diaspora.get_fetch_content_endpoint", return_value="https://example.com/fetch/spam/eggs") + def test_raises_on_fetch_error(self, mock_get, mock_fetch): + with pytest.raises(Exception): + retrieve_and_parse_content("diaspora://[email protected]/spam/eggs") + + @patch("federation.utils.diaspora.fetch_document", return_value=(None, 404, None)) + @patch("federation.utils.diaspora.get_fetch_content_endpoint", return_value="https://example.com/fetch/spam/eggs") + def test_returns_on_404(self, mock_get, mock_fetch): + result = retrieve_and_parse_content("diaspora://[email protected]/spam/eggs") + assert not result + + +class TestGetElementTextOrNone: doc = html.fromstring("<foo>bar</foo>") def test_text_returned_on_element(self): @@ -110,7 +171,7 @@ class TestGetElementTextOrNone(object): assert _get_element_text_or_none(self.doc, "bar") == None -class TestGetElementAttrOrNone(object): +class TestGetElementAttrOrNone: doc = html.fromstring("<foo src='baz'>bar</foo>") def test_attr_returned_on_attr(self): @@ -123,7 +184,7 @@ class TestGetElementAttrOrNone(object): assert _get_element_attr_or_none(self.doc, "bar", "href") == None -class TestParseProfileFromHCard(object): +class TestParseProfileFromHCard: def test_profile_is_parsed(self): hcard = generate_hcard( "diaspora", @@ -151,7 +212,7 @@ class TestParseProfileFromHCard(object): profile.validate() -class TestRetrieveAndParseProfile(object): +class TestRetrieveAndParseProfile: @patch("federation.utils.diaspora.retrieve_diaspora_hcard", return_value=None) def test_retrieve_diaspora_hcard_is_called(self, mock_retrieve): retrieve_and_parse_profile("foo@bar")
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 9 }
0.14
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-warnings" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "dev-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 arrow==1.2.3 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 colorama==0.4.5 commonmark==0.9.1 coverage==6.2 cssselect==1.1.0 dirty-validators==0.5.4 docutils==0.18.1 factory-boy==3.2.1 Faker==14.2.1 -e git+https://github.com/jaywink/federation.git@bcc779e006bc0af192db08a1ff8ed245c0fbd7c9#egg=federation freezegun==1.2.2 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 isodate==0.6.1 Jinja2==3.0.3 jsonschema==3.2.0 livereload==2.6.3 lxml==5.3.1 MarkupSafe==2.0.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pycrypto==2.6.1 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 pytest-warnings==0.3.1 python-dateutil==2.9.0.post0 python-xrd==0.1 pytz==2025.2 recommonmark==0.7.1 requests==2.27.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-autobuild==2021.3.14 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 tornado==6.1 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: federation channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - arrow==1.2.3 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - codecov==2.1.13 - colorama==0.4.5 - commonmark==0.9.1 - coverage==6.2 - cssselect==1.1.0 - dirty-validators==0.5.4 - docutils==0.18.1 - factory-boy==3.2.1 - faker==14.2.1 - freezegun==1.2.2 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isodate==0.6.1 - jinja2==3.0.3 - jsonschema==3.2.0 - livereload==2.6.3 - lxml==5.3.1 - markupsafe==2.0.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pycrypto==2.6.1 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-warnings==0.3.1 - python-dateutil==2.9.0.post0 - python-xrd==0.1 - pytz==2025.2 - recommonmark==0.7.1 - requests==2.27.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-autobuild==2021.3.14 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - tornado==6.1 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/federation
[ "federation/tests/entities/diaspora/test_entities.py::TestEntitiesConvertToXML::test_post_to_xml", "federation/tests/entities/diaspora/test_entities.py::TestEntitiesConvertToXML::test_comment_to_xml", "federation/tests/entities/diaspora/test_entities.py::TestEntitiesConvertToXML::test_like_to_xml", "federation/tests/entities/diaspora/test_entities.py::TestEntitiesConvertToXML::test_request_to_xml", "federation/tests/entities/diaspora/test_entities.py::TestEntitiesConvertToXML::test_profile_to_xml", "federation/tests/entities/diaspora/test_entities.py::TestEntitiesConvertToXML::test_retraction_to_xml", "federation/tests/entities/diaspora/test_entities.py::TestEntitiesConvertToXML::test_contact_to_xml", "federation/tests/entities/diaspora/test_entities.py::TestEntitiesConvertToXML::test_reshare_to_xml", "federation/tests/entities/diaspora/test_entities.py::TestEntityAttributes::test_comment_ids", "federation/tests/entities/diaspora/test_entities.py::TestEntityAttributes::test_contact_ids", "federation/tests/entities/diaspora/test_entities.py::TestEntityAttributes::test_like_ids", "federation/tests/entities/diaspora/test_entities.py::TestEntityAttributes::test_post_ids", "federation/tests/entities/diaspora/test_entities.py::TestEntityAttributes::test_profile_ids", "federation/tests/entities/diaspora/test_entities.py::TestEntityAttributes::test_request_ids", "federation/tests/entities/diaspora/test_entities.py::TestEntityAttributes::test_reshare_ids", "federation/tests/entities/diaspora/test_entities.py::TestEntityAttributes::test_retraction_ids", "federation/tests/entities/diaspora/test_entities.py::TestDiasporaProfileFillExtraAttributes::test_raises_if_no_handle", "federation/tests/entities/diaspora/test_entities.py::TestDiasporaProfileFillExtraAttributes::test_calls_retrieve_and_parse_profile", "federation/tests/entities/diaspora/test_entities.py::TestDiasporaRetractionEntityConverters::test_entity_type_from_remote", "federation/tests/entities/diaspora/test_entities.py::TestDiasporaRetractionEntityConverters::test_entity_type_to_remote", "federation/tests/entities/diaspora/test_entities.py::TestDiasporaRelayableMixin::test_signing_comment_works", "federation/tests/entities/diaspora/test_entities.py::TestDiasporaRelayableMixin::test_signing_like_works", "federation/tests/entities/diaspora/test_entities.py::TestDiasporaRelayableMixin::test_sign_with_parent", "federation/tests/entities/diaspora/test_entities.py::TestDiasporaRelayableEntityValidate::test_raises_if_no_sender_key", "federation/tests/entities/diaspora/test_entities.py::TestDiasporaRelayableEntityValidate::test_calls_verify_signature", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_build", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_create_payload_wrapped", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_create_payload", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_extract_payload", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_fetch_public_key__calls_sender_key_fetcher", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_fetch_public_key__calls_fetch_public_key", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_message_from_doc", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_payload_extracted_on_init", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_verify", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_verify__calls_fetch_public_key", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_verify_on_init", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_build_signature", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_render", "federation/tests/protocols/diaspora/test_magic_envelope.py::TestMagicEnvelope::test_get_sender", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_find_unencrypted_header", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_find_encrypted_header", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_unencrypted_returns_sender_and_content", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_encrypted_returns_sender_and_content", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_raises_on_encrypted_message_and_no_user", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_raises_on_encrypted_message_and_no_user_key", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_raises_if_sender_key_cannot_be_found", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_calls_fetch_public_key_if_key_fetcher_not_given", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_creates_and_verifies_magic_envelope_instance", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_raises_on_signature_verification_failure", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_get_message_content", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_identify_payload_with_legacy_diaspora_payload", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_identify_payload_with_diaspora_public_payload", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_identify_payload_with_diaspora_encrypted_payload", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_identify_payload_with_other_payload", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_get_sender_legacy_returns_sender_in_header", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_get_sender_legacy_returns_sender_in_content", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_get_sender_legacy_returns_none_if_no_sender_found", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_build_send", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_build_send_uses_outbound_doc", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_get_json_payload_magic_envelope", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_store_magic_envelope_doc_json_payload", "federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_store_magic_envelope_doc_xml_payload", "federation/tests/protocols/diaspora/test_signatures.py::test_verify_relayable_signature", "federation/tests/protocols/diaspora/test_signatures.py::test_verify_relayable_signature_with_unicode", "federation/tests/protocols/diaspora/test_signatures.py::test_create_relayable_signature", "federation/tests/test_fetchers.py::TestRetrieveRemoteContent::test_calls_diaspora_retrieve_and_parse_content", "federation/tests/test_fetchers.py::TestRetrieveRemoteProfile::test_calls_diaspora_retrieve_and_parse_profile", "federation/tests/utils/test_diaspora.py::test_fetch_public_key", "federation/tests/utils/test_diaspora.py::test_get_fetch_content_endpoint", "federation/tests/utils/test_diaspora.py::test_parse_diaspora_uri", "federation/tests/utils/test_diaspora.py::TestRetrieveDiasporaHCard::test_retrieve_webfinger_is_called", "federation/tests/utils/test_diaspora.py::TestRetrieveDiasporaHCard::test_fetch_document_is_called", "federation/tests/utils/test_diaspora.py::TestRetrieveDiasporaHCard::test_returns_none_on_fetch_document_exception", "federation/tests/utils/test_diaspora.py::TestRetrieveDiasporaWebfinger::test_retrieve_host_meta_is_called", "federation/tests/utils/test_diaspora.py::TestRetrieveDiasporaWebfinger::test_retrieve_fetch_document_is_called", "federation/tests/utils/test_diaspora.py::TestRetrieveDiasporaWebfinger::test_returns_none_on_fetch_document_exception", "federation/tests/utils/test_diaspora.py::TestRetrieveDiasporaWebfinger::test_returns_none_on_xrd_parse_exception", "federation/tests/utils/test_diaspora.py::TestRetrieveDiasporaHostMeta::test_fetch_document_is_called", "federation/tests/utils/test_diaspora.py::TestRetrieveDiasporaHostMeta::test_returns_none_on_fetch_document_exception", "federation/tests/utils/test_diaspora.py::TestRetrieveAndParseContent::test_calls_fetch_document", "federation/tests/utils/test_diaspora.py::TestRetrieveAndParseContent::test_calls_get_fetch_content_endpoint", "federation/tests/utils/test_diaspora.py::TestRetrieveAndParseContent::test_calls_handle_receive", "federation/tests/utils/test_diaspora.py::TestRetrieveAndParseContent::test_raises_on_fetch_error", "federation/tests/utils/test_diaspora.py::TestRetrieveAndParseContent::test_returns_on_404", "federation/tests/utils/test_diaspora.py::TestGetElementTextOrNone::test_text_returned_on_element", "federation/tests/utils/test_diaspora.py::TestGetElementTextOrNone::test_none_returned_on_no_element", "federation/tests/utils/test_diaspora.py::TestGetElementAttrOrNone::test_attr_returned_on_attr", "federation/tests/utils/test_diaspora.py::TestGetElementAttrOrNone::test_none_returned_on_attr", "federation/tests/utils/test_diaspora.py::TestGetElementAttrOrNone::test_none_returned_on_no_element", "federation/tests/utils/test_diaspora.py::TestParseProfileFromHCard::test_profile_is_parsed", "federation/tests/utils/test_diaspora.py::TestRetrieveAndParseProfile::test_retrieve_diaspora_hcard_is_called", "federation/tests/utils/test_diaspora.py::TestRetrieveAndParseProfile::test_parse_profile_from_hcard_called", "federation/tests/utils/test_diaspora.py::TestRetrieveAndParseProfile::test_profile_that_doesnt_validate_returns_none", "federation/tests/utils/test_diaspora.py::TestRetrieveAndParseProfile::test_profile_validate_is_called" ]
[]
[]
[]
BSD 3-Clause "New" or "Revised" License
1,793
[ "federation/utils/diaspora.py", "federation/protocols/diaspora/protocol.py", "federation/entities/diaspora/mappers.py", "federation/protocols/diaspora/magic_envelope.py", "CHANGELOG.md", "federation/entities/base.py", "federation/fetchers.py", "federation/entities/diaspora/entities.py", "docs/usage.rst" ]
[ "federation/utils/diaspora.py", "federation/protocols/diaspora/protocol.py", "federation/entities/diaspora/mappers.py", "federation/protocols/diaspora/magic_envelope.py", "CHANGELOG.md", "federation/entities/base.py", "federation/fetchers.py", "federation/entities/diaspora/entities.py", "docs/usage.rst" ]
stitchfix__nodebook-8
b6e1ec614fd39acb740b04e99ee7e97d99122420
2017-10-21 23:04:18
46211e90955f3388a22e2a2132bb895814260f9a
diff --git a/nodebook/nodebookcore.py b/nodebook/nodebookcore.py index 98b5cdc..ddae374 100644 --- a/nodebook/nodebookcore.py +++ b/nodebook/nodebookcore.py @@ -46,6 +46,9 @@ class ReferenceFinder(ast.NodeVisitor): self.locals.add(node.name) self.generic_visit(node) + def visit_arg(self, node): + self.locals.add(node.arg) + def visit_AugAssign(self, node): target = node.target while (type(target) is ast.Subscript): diff --git a/setup.py b/setup.py index 11d6a77..adc7229 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ import sys setup( name='nodebook', - version='0.2.0', + version='0.2.1', author='Kevin Zielnicki', author_email='[email protected]', license='Stitch Fix 2017',
Functions don't work in nodebook in py3 Because of changes to the ast in Python 3, functions no longer are parsed correctly. Eg, from @hacktuarial: ``` def add(a, b): return a + b ``` nodebook throws an error: ``` KeyError: "name 'a' is not defined" ```
stitchfix/nodebook
diff --git a/tests/test_nodebookcore.py b/tests/test_nodebookcore.py index cfd9646..470121c 100644 --- a/tests/test_nodebookcore.py +++ b/tests/test_nodebookcore.py @@ -42,6 +42,16 @@ class TestReferenceFinder(object): assert rf.locals == {'pd', 'y'} assert rf.imports == {'pandas'} + def test_function(self, rf): + code_tree = ast.parse( + "def add(x,y):\n" + " return x+y\n" + ) + rf.visit(code_tree) + assert rf.inputs == set() + assert rf.locals == {'add', 'x', 'y'} + assert rf.imports == set() + class TestNodebook(object): @pytest.fixture() diff --git a/tests/test_pickledict.py b/tests/test_pickledict.py index ef35fdd..90b7088 100644 --- a/tests/test_pickledict.py +++ b/tests/test_pickledict.py @@ -33,6 +33,12 @@ class TestPickleDict(object): df = pd.DataFrame({'a': [0, 1, 2], 'b': ['foo', 'bar', 'baz']}) mydict['test_df'] = df assert mydict['test_df'].equals(df) + + def test_func(self, mydict): + def add(a, b): + return a + b + mydict['test_func'] = add + assert mydict['test_func'](3,5) == 8 def test_immutability(self, mydict): l = [1, 2, 3]
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==4.9.0 argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 arrow==1.3.0 asttokens==3.0.0 async-lru==2.0.5 attrs==25.3.0 babel==2.17.0 beautifulsoup4==4.13.3 bleach==6.2.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 click==8.1.8 comm==0.2.2 debugpy==1.8.13 decorator==5.2.1 defusedxml==0.7.1 dill==0.3.9 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work executing==2.2.0 fastjsonschema==2.21.1 fqdn==1.5.1 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 idna==3.10 importlib_metadata==8.6.1 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work ipykernel==6.29.5 ipython==8.18.1 ipywidgets==8.1.5 isoduration==20.11.0 jedi==0.19.2 Jinja2==3.1.6 json5==0.10.0 jsonpointer==3.0.0 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 jupyter==1.1.1 jupyter-console==6.6.3 jupyter-events==0.12.0 jupyter-lsp==2.2.5 jupyter_client==8.6.3 jupyter_core==5.7.2 jupyter_server==2.15.0 jupyter_server_terminals==0.5.3 jupyterlab==4.3.6 jupyterlab_pygments==0.3.0 jupyterlab_server==2.27.3 jupyterlab_widgets==3.0.13 MarkupSafe==3.0.2 matplotlib-inline==0.1.7 mistune==3.1.3 msgpack-python==0.5.6 nbclient==0.10.2 nbconvert==7.16.6 nbformat==5.10.4 nest-asyncio==1.6.0 -e git+https://github.com/stitchfix/nodebook.git@b6e1ec614fd39acb740b04e99ee7e97d99122420#egg=nodebook notebook==7.3.3 notebook_shim==0.2.4 numpy==2.0.2 overrides==7.7.0 packaging @ file:///croot/packaging_1734472117206/work pandas==2.2.3 pandocfilters==1.5.1 parso==0.8.4 pexpect==4.9.0 platformdirs==4.3.7 pluggy @ file:///croot/pluggy_1733169602837/work prometheus_client==0.21.1 prompt_toolkit==3.0.50 psutil==7.0.0 ptyprocess==0.7.0 pure_eval==0.2.3 pycparser==2.22 Pygments==2.19.1 pytest @ file:///croot/pytest_1738938843180/work pytest-runner==6.0.1 python-dateutil==2.9.0.post0 python-json-logger==3.3.0 pytz==2025.2 PyYAML==6.0.2 pyzmq==26.3.0 referencing==0.36.2 requests==2.32.3 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 rpds-py==0.24.0 Send2Trash==1.8.3 six==1.17.0 sniffio==1.3.1 soupsieve==2.6 stack-data==0.6.3 terminado==0.18.1 tinycss2==1.4.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work tornado==6.4.2 traitlets==5.14.3 types-python-dateutil==2.9.0.20241206 typing_extensions==4.13.0 tzdata==2025.2 uri-template==1.3.0 urllib3==2.3.0 wcwidth==0.2.13 webcolors==24.11.1 webencodings==0.5.1 websocket-client==1.8.0 widgetsnbextension==4.0.13 zipp==3.21.0
name: nodebook channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==4.9.0 - argon2-cffi==23.1.0 - argon2-cffi-bindings==21.2.0 - arrow==1.3.0 - asttokens==3.0.0 - async-lru==2.0.5 - attrs==25.3.0 - babel==2.17.0 - beautifulsoup4==4.13.3 - bleach==6.2.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - click==8.1.8 - comm==0.2.2 - debugpy==1.8.13 - decorator==5.2.1 - defusedxml==0.7.1 - dill==0.3.9 - executing==2.2.0 - fastjsonschema==2.21.1 - fqdn==1.5.1 - h11==0.14.0 - httpcore==1.0.7 - httpx==0.28.1 - idna==3.10 - importlib-metadata==8.6.1 - ipykernel==6.29.5 - ipython==8.18.1 - ipywidgets==8.1.5 - isoduration==20.11.0 - jedi==0.19.2 - jinja2==3.1.6 - json5==0.10.0 - jsonpointer==3.0.0 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - jupyter==1.1.1 - jupyter-client==8.6.3 - jupyter-console==6.6.3 - jupyter-core==5.7.2 - jupyter-events==0.12.0 - jupyter-lsp==2.2.5 - jupyter-server==2.15.0 - jupyter-server-terminals==0.5.3 - jupyterlab==4.3.6 - jupyterlab-pygments==0.3.0 - jupyterlab-server==2.27.3 - jupyterlab-widgets==3.0.13 - markupsafe==3.0.2 - matplotlib-inline==0.1.7 - mistune==3.1.3 - msgpack-python==0.5.6 - nbclient==0.10.2 - nbconvert==7.16.6 - nbformat==5.10.4 - nest-asyncio==1.6.0 - notebook==7.3.3 - notebook-shim==0.2.4 - numpy==2.0.2 - overrides==7.7.0 - pandas==2.2.3 - pandocfilters==1.5.1 - parso==0.8.4 - pexpect==4.9.0 - platformdirs==4.3.7 - prometheus-client==0.21.1 - prompt-toolkit==3.0.50 - psutil==7.0.0 - ptyprocess==0.7.0 - pure-eval==0.2.3 - pycparser==2.22 - pygments==2.19.1 - pytest-runner==6.0.1 - python-dateutil==2.9.0.post0 - python-json-logger==3.3.0 - pytz==2025.2 - pyyaml==6.0.2 - pyzmq==26.3.0 - referencing==0.36.2 - requests==2.32.3 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 - rpds-py==0.24.0 - send2trash==1.8.3 - six==1.17.0 - sniffio==1.3.1 - soupsieve==2.6 - stack-data==0.6.3 - terminado==0.18.1 - tinycss2==1.4.0 - tornado==6.4.2 - traitlets==5.14.3 - types-python-dateutil==2.9.0.20241206 - typing-extensions==4.13.0 - tzdata==2025.2 - uri-template==1.3.0 - urllib3==2.3.0 - wcwidth==0.2.13 - webcolors==24.11.1 - webencodings==0.5.1 - websocket-client==1.8.0 - widgetsnbextension==4.0.13 - zipp==3.21.0 prefix: /opt/conda/envs/nodebook
[ "tests/test_nodebookcore.py::TestReferenceFinder::test_function" ]
[]
[ "tests/test_nodebookcore.py::TestReferenceFinder::test_assign", "tests/test_nodebookcore.py::TestReferenceFinder::test_augassign", "tests/test_nodebookcore.py::TestReferenceFinder::test_import", "tests/test_nodebookcore.py::TestReferenceFinder::test_multiline", "tests/test_nodebookcore.py::TestNodebook::test_single_node", "tests/test_nodebookcore.py::TestNodebook::test_node_chain", "tests/test_pickledict.py::TestPickleDict::test_int[mode_memory]", "tests/test_pickledict.py::TestPickleDict::test_int[mode_disk]", "tests/test_pickledict.py::TestPickleDict::test_string[mode_memory]", "tests/test_pickledict.py::TestPickleDict::test_string[mode_disk]", "tests/test_pickledict.py::TestPickleDict::test_bytes[mode_memory]", "tests/test_pickledict.py::TestPickleDict::test_bytes[mode_disk]", "tests/test_pickledict.py::TestPickleDict::test_df[mode_memory]", "tests/test_pickledict.py::TestPickleDict::test_df[mode_disk]", "tests/test_pickledict.py::TestPickleDict::test_func[mode_memory]", "tests/test_pickledict.py::TestPickleDict::test_func[mode_disk]", "tests/test_pickledict.py::TestPickleDict::test_immutability[mode_memory]", "tests/test_pickledict.py::TestPickleDict::test_immutability[mode_disk]" ]
[]
Apache License 2.0
1,794
[ "nodebook/nodebookcore.py", "setup.py" ]
[ "nodebook/nodebookcore.py", "setup.py" ]
fabfuel__ecs-deploy-43
d5377840427f9cb657417136ecb111bcce1269a2
2017-10-23 05:07:50
0da56c64512f0d312cb3c4585325d7afb8fce46d
diff --git a/ecs_deploy/__init__.py b/ecs_deploy/__init__.py index e5909ef..9aab03e 100644 --- a/ecs_deploy/__init__.py +++ b/ecs_deploy/__init__.py @@ -1,1 +1,1 @@ -VERSION = '1.4.1' +VERSION = '1.4.2' diff --git a/ecs_deploy/ecs.py b/ecs_deploy/ecs.py index 18e0e6e..390925b 100644 --- a/ecs_deploy/ecs.py +++ b/ecs_deploy/ecs.py @@ -140,7 +140,7 @@ class EcsService(dict): class EcsTaskDefinition(object): def __init__(self, containerDefinitions, volumes, family, revision, - status, taskDefinitionArn, requiresAttributes, + status, taskDefinitionArn, requiresAttributes=None, taskRoleArn=None, **kwargs): self.containers = containerDefinitions self.volumes = volumes @@ -148,7 +148,7 @@ class EcsTaskDefinition(object): self.revision = revision self.status = status self.arn = taskDefinitionArn - self.requires_attributes = requiresAttributes + self.requires_attributes = requiresAttributes or {} self.role_arn = taskRoleArn or u'' self.additional_properties = kwargs self._diff = []
__init__() takes at least 8 arguments (7 given) Everytime I do a deploy I get the following response: `__init__() takes at least 8 arguments (7 given)` command ran is: `ecs deploy test-cluster test-service` . Though I've tried multiple combinations of different options flags, but it always returns the error above. Any insight would be great, Thanks.
fabfuel/ecs-deploy
diff --git a/tests/test_ecs.py b/tests/test_ecs.py index b8466b8..b0a8eee 100644 --- a/tests/test_ecs.py +++ b/tests/test_ecs.py @@ -61,7 +61,6 @@ PAYLOAD_TASK_DEFINITION_2 = { u'volumes': deepcopy(TASK_DEFINITION_VOLUMES_2), u'containerDefinitions': deepcopy(TASK_DEFINITION_CONTAINERS_2), u'status': u'active', - u'requiresAttributes': {}, u'unknownProperty': u'lorem-ipsum', }
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 2 }
1.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-mock", "mock", "requests" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work boto3==1.23.10 botocore==1.26.10 certifi==2021.5.30 charset-normalizer==2.0.12 click==8.0.4 coverage==6.2 -e git+https://github.com/fabfuel/ecs-deploy.git@d5377840427f9cb657417136ecb111bcce1269a2#egg=ecs_deploy future==1.0.0 idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work jmespath==0.10.0 mock==5.2.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-cov==4.0.0 pytest-mock==3.6.1 python-dateutil==2.9.0.post0 requests==2.27.1 s3transfer==0.5.2 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: ecs-deploy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - boto3==1.23.10 - botocore==1.26.10 - charset-normalizer==2.0.12 - click==8.0.4 - coverage==6.2 - future==1.0.0 - idna==3.10 - jmespath==0.10.0 - mock==5.2.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - python-dateutil==2.9.0.post0 - requests==2.27.1 - s3transfer==0.5.2 - six==1.17.0 - tomli==1.2.3 - urllib3==1.26.20 prefix: /opt/conda/envs/ecs-deploy
[ "tests/test_ecs.py::test_get_running_tasks_count_new_revision", "tests/test_ecs.py::test_deploy_action" ]
[]
[ "tests/test_ecs.py::test_service_init", "tests/test_ecs.py::test_service_set_desired_count", "tests/test_ecs.py::test_service_set_task_definition", "tests/test_ecs.py::test_service_name", "tests/test_ecs.py::test_service_deployment_created_at", "tests/test_ecs.py::test_service_deployment_updated_at", "tests/test_ecs.py::test_service_deployment_created_at_without_deployments", "tests/test_ecs.py::test_service_deployment_updated_at_without_deployments", "tests/test_ecs.py::test_service_errors", "tests/test_ecs.py::test_service_older_errors", "tests/test_ecs.py::test_task_family", "tests/test_ecs.py::test_task_containers", "tests/test_ecs.py::test_task_container_names", "tests/test_ecs.py::test_task_volumes", "tests/test_ecs.py::test_task_revision", "tests/test_ecs.py::test_task_no_diff", "tests/test_ecs.py::test_task_image_diff", "tests/test_ecs.py::test_task_set_tag", "tests/test_ecs.py::test_task_set_image", "tests/test_ecs.py::test_task_set_environment", "tests/test_ecs.py::test_task_set_image_for_unknown_container", "tests/test_ecs.py::test_task_set_command", "tests/test_ecs.py::test_task_set_command_for_unknown_container", "tests/test_ecs.py::test_task_get_overrides", "tests/test_ecs.py::test_task_get_overrides_with_command", "tests/test_ecs.py::test_task_get_overrides_with_environment", "tests/test_ecs.py::test_task_get_overrides_with_commandand_environment", "tests/test_ecs.py::test_task_get_overrides_with_commandand_environment_for_multiple_containers", "tests/test_ecs.py::test_task_get_overrides_command", "tests/test_ecs.py::test_task_get_overrides_environment", "tests/test_ecs.py::test_task_definition_diff", "tests/test_ecs.py::test_client_init", "tests/test_ecs.py::test_client_describe_services", "tests/test_ecs.py::test_client_describe_task_definition", "tests/test_ecs.py::test_client_describe_unknown_task_definition", "tests/test_ecs.py::test_client_list_tasks", "tests/test_ecs.py::test_client_describe_tasks", "tests/test_ecs.py::test_client_register_task_definition", "tests/test_ecs.py::test_client_deregister_task_definition", "tests/test_ecs.py::test_client_update_service", "tests/test_ecs.py::test_client_run_task", "tests/test_ecs.py::test_ecs_action_init", "tests/test_ecs.py::test_ecs_action_init_with_invalid_cluster", "tests/test_ecs.py::test_ecs_action_init_with_invalid_service", "tests/test_ecs.py::test_ecs_action_init_without_credentials", "tests/test_ecs.py::test_ecs_action_get_service", "tests/test_ecs.py::test_ecs_action_get_current_task_definition", "tests/test_ecs.py::test_update_task_definition", "tests/test_ecs.py::test_deregister_task_definition", "tests/test_ecs.py::test_update_service", "tests/test_ecs.py::test_is_deployed", "tests/test_ecs.py::test_is_not_deployed_with_more_than_one_deployment", "tests/test_ecs.py::test_is_deployed_if_no_tasks_should_be_running", "tests/test_ecs.py::test_is_not_deployed_if_no_tasks_running", "tests/test_ecs.py::test_get_running_tasks_count", "tests/test_ecs.py::test_scale_action", "tests/test_ecs.py::test_run_action", "tests/test_ecs.py::test_run_action_run" ]
[]
BSD-3-Clause
1,795
[ "ecs_deploy/__init__.py", "ecs_deploy/ecs.py" ]
[ "ecs_deploy/__init__.py", "ecs_deploy/ecs.py" ]
claranet__ssha-63
66de5fc0b726aaf076b57c843f7e7d04dbd9641c
2017-10-23 17:37:51
66de5fc0b726aaf076b57c843f7e7d04dbd9641c
diff --git a/README.md b/README.md index ca01c66..5031e43 100644 --- a/README.md +++ b/README.md @@ -130,7 +130,13 @@ discover { name of the config that was selected by the user. */ Environment = "${config.name}" - Service = "bastion" + } + + /* + TagsNotEqual can be used to exclude instances with matching tags. + */ + TagsNotEqual { + Service = "k8s" } } diff --git a/ssha/ec2.py b/ssha/ec2.py index 8c51e56..d60c327 100644 --- a/ssha/ec2.py +++ b/ssha/ec2.py @@ -1,5 +1,7 @@ from __future__ import print_function +import operator + from . import aws, config, errors, ssm @@ -42,19 +44,27 @@ def _instance_sort_key(instance): return result -def _rules_pass(obj, rules): +def _rules_pass(obj, rules, compare=operator.eq): for key, expected_value in rules.items(): - if key not in obj: - return False - if isinstance(expected_value, dict): - nested_rules = expected_value - if not _rules_pass(obj[key], nested_rules): + + if key.endswith('NotEqual'): + nested_compare = operator.ne + key = key[:-len('NotEqual')] + else: + nested_compare = compare + + nested_rules_passed = _rules_pass( + obj=obj.get(key) or {}, + rules=expected_value, + compare=nested_compare, + ) + if not nested_rules_passed: return False - elif obj[key] != expected_value: + elif not compare(obj.get(key), expected_value): return False return True
Exclude instances The `discover` and `bastion` blocks allow filters on tags and such. It could be useful to exclude instances with certain tags too.
claranet/ssha
diff --git a/tests/test_ec2.py b/tests/test_ec2.py new file mode 100644 index 0000000..d44e071 --- /dev/null +++ b/tests/test_ec2.py @@ -0,0 +1,49 @@ +import unittest + +from ssha import ec2 + + +class TestEC2(unittest.TestCase): + + def test_rules_pass(self): + + bastion_instance = { + 'State': { + 'Name': 'Running', + }, + 'Tags': { + 'Service': 'bastion', + }, + } + + web_instance = { + 'State': { + 'Name': 'Running', + }, + 'Tags': { + 'Service': 'web', + }, + } + + is_bastion = { + 'State': { + 'Name': 'Running', + }, + 'Tags': { + 'Service': 'bastion', + } + } + + is_not_bastion = { + 'State': { + 'Name': 'Running', + }, + 'TagsNotEqual': { + 'Service': 'bastion', + } + } + + self.assertTrue(ec2._rules_pass(bastion_instance, is_bastion)) + self.assertTrue(ec2._rules_pass(web_instance, is_not_bastion)) + self.assertFalse(ec2._rules_pass(web_instance, is_bastion)) + self.assertFalse(ec2._rules_pass(bastion_instance, is_not_bastion))
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 2 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
bcrypt==4.3.0 boto3==1.37.23 boto3-session-cache==1.0.2 botocore==1.37.23 cffi==1.17.1 cryptography==44.0.2 exceptiongroup==1.2.2 iniconfig==2.1.0 jmespath==1.0.1 packaging==24.2 paramiko==3.5.1 pluggy==1.5.0 pycparser==2.22 pyhcl==0.4.5 PyNaCl==1.5.0 pytest==8.3.5 python-dateutil==2.9.0.post0 s3transfer==0.11.4 six==1.17.0 -e git+https://github.com/claranet/ssha.git@66de5fc0b726aaf076b57c843f7e7d04dbd9641c#egg=ssha tomli==2.2.1 urllib3==1.26.20
name: ssha channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - bcrypt==4.3.0 - boto3==1.37.23 - boto3-session-cache==1.0.2 - botocore==1.37.23 - cffi==1.17.1 - cryptography==44.0.2 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - jmespath==1.0.1 - packaging==24.2 - paramiko==3.5.1 - pluggy==1.5.0 - pycparser==2.22 - pyhcl==0.4.5 - pynacl==1.5.0 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - s3transfer==0.11.4 - six==1.17.0 - tomli==2.2.1 - urllib3==1.26.20 prefix: /opt/conda/envs/ssha
[ "tests/test_ec2.py::TestEC2::test_rules_pass" ]
[]
[]
[]
MIT License
1,797
[ "ssha/ec2.py", "README.md" ]
[ "ssha/ec2.py", "README.md" ]
springload__draftjs_exporter-77
af50b3cfc656afac5eec6a8217355e120ba3f106
2017-10-23 20:54:21
8a9837bbcd3438e97c79bd6672e3aa9f5aac57fd
su27: Good idea, it's quite efficient and straightforward. One thing, sometimes I need to insert an HTML string into the DOM structure, for example, in order to add a 3rd-party video into my article, I have to get the embedded HTML from the video website, parse it to DOM objects, and append them to the draftjs DOM structure. So, if the `parse_html` method is removed, I still need a way to insert raw HTML into the article. thibaudcolas: @BertrandBordage good question about the static methods. I don't remember my reasoning then. It doesn't feel worth the refactoring because of how seldom-used this part of the exporter's API is, but in this case yep it does seem completely over-engineered. @su27 I think it would be possible to add `parse_html` onto this, all that's needed would be to circumvent the escaping of the HTML string in the `render` method. The big caveat is that contrary to the other BeautifulSoup and lxml engines, the HTML would be treated as a dumb string without any processing at all (checking its validity, escaping the parts that need escaping, etc). Does that sound reasonable / useful? su27: @thibaudcolas That sounds good to me. Although it may cause problems if the user doesn't process the HTML string correctly, but I think the validating responsibility should belong to the user, not the engine. If one decides to use the string-based engine, he/she should make sure the input is legal, including tag names, attribute names, and the "raw HTML".
diff --git a/CHANGELOG.md b/CHANGELOG.md index f842979..ce3b362 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,27 @@ > All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## Unreleased + +### Added + +- Add new string-based dependency-free DOM backing engine, with much better performance, thanks to the expertise of @BertrandBordage (#77). + +### Changed + +- Pre-compile regexes in html5lib engine for performance improvements (#76). + +### How to upgrade + +There is no need to make any changes to keep using the previous engines (html5lib, lxml). To switch to the new string engine, opt-in via the config: + +```diff +exporter = HTML({ ++ # Specify which DOM backing engine to use. ++ 'engine': 'string', +}) +``` + ## [v1.0.0](https://github.com/springload/draftjs_exporter/releases/tag/v1.0.0) > This release is functionally identical to the previous one, `v0.9.0`. diff --git a/README.rst b/README.rst index 2e12755..dfaab02 100644 --- a/README.rst +++ b/README.rst @@ -237,10 +237,23 @@ This fallback component can now control the exporter behavior when normal compon See ``examples.py`` in the repository for more details. -lxml backing engine -~~~~~~~~~~~~~~~~~~~ +Alternative backing engines +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default the exporter uses ``html5lib`` via BeautifulSoup to build the DOM tree. There are two alternative backing engines: ``string`` and ``lxml``. + +The ``string`` engine is the fastest, and does not have any dependencies. Its only drawback is that the ``parse_html`` method does not escape/sanitise HTML like that of other engines. + +To use it, add the following to the exporter config: + +.. code:: python + + config = { + # Specify which DOM backing engine to use. + 'engine': 'string', + } -By default the exporter uses ``html5lib`` via BeautifulSoup to build DOM tree. ``lxml`` is also supported. lxml is more performant, but it requires ``libxml2`` and ``libxslt`` to be available on your system. +``lxml`` is also supported. It requires ``libxml2`` and ``libxslt`` to be available on your system. .. code:: sh diff --git a/docs/README.md b/docs/README.md index 46706c5..e4bc087 100644 --- a/docs/README.md +++ b/docs/README.md @@ -23,9 +23,7 @@ draftjs_exporter documentation - `style` prop is rendered as-is if it is a string, or can also be a dict in which case its properties are converted into a string using `camel_to_dash`. - Invalid attributes are left for the BeautifulSoup / html5lib parser to handle. - HTML escaping is automatically done by BeautifulSoup / html5lib. - -### Unsupported markup - +- The string engine escapes single quotes. ## R&D notes diff --git a/draftjs_exporter/dom.py b/draftjs_exporter/dom.py index b2d31d2..406667b 100644 --- a/draftjs_exporter/dom.py +++ b/draftjs_exporter/dom.py @@ -26,6 +26,7 @@ class DOM(object): HTML5LIB = 'html5lib' LXML = 'lxml' + STRING = 'string' dom = DOM_HTML5LIB @@ -48,6 +49,9 @@ class DOM(object): elif engine.lower() == cls.LXML: from draftjs_exporter.engines.lxml import DOM_LXML cls.dom = DOM_LXML + elif engine.lower() == cls.STRING: + from draftjs_exporter.engines.string import DOMString + cls.dom = DOMString else: raise ConfigException('Invalid DOM engine.') diff --git a/draftjs_exporter/engines/string.py b/draftjs_exporter/engines/string.py new file mode 100644 index 0000000..4cc4c04 --- /dev/null +++ b/draftjs_exporter/engines/string.py @@ -0,0 +1,107 @@ +from __future__ import absolute_import, unicode_literals + +from draftjs_exporter.engines.base import DOMEngine + +try: + # Python 3.2 and above. + from html import escape +except ImportError: + import cgi + + def escape(s): + # Force quote escaping in Python 2.7. + return cgi.escape(s, quote=True).replace('\'', '&#x27;') + +# http://w3c.github.io/html/single-page.html#void-elements +# https://github.com/html5lib/html5lib-python/blob/0cae52b2073e3f2220db93a7650901f2200f2a13/html5lib/constants.py#L560 +VOID_ELEMENTS = { + 'area', + 'base', + 'br', + 'col', + 'embed', + 'hr', + 'img', + 'input', + 'link', + 'meta', + 'param', + 'source', + 'track', + 'wbr', +} + + +class DOMString(DOMEngine): + """ + String concatenation implementation of the DOM API. + """ + + @staticmethod + def create_tag(type_, attr=None): + return { + 'type': type_, + 'attr': attr, + 'children': [], + } + + @staticmethod + def parse_html(markup): + """ + Allows inserting arbitrary HTML into the exporter output. + Treats the HTML as if it had been escaped and was safe already. + """ + return { + 'type': 'escaped_html', + 'attr': None, + 'children': None, + 'markup': markup, + } + + @staticmethod + def append_child(elt, child): + # This check is necessary because the current wrapper_state implementation + # has an issue where it inserts elements multiple times. + # This must be skipped for text, which can be duplicated. + is_existing_ref = isinstance(child, dict) and child in elt['children'] + if not is_existing_ref: + elt['children'].append(child) + + @staticmethod + def render_attrs(attr): + return ''.join(sorted([' %s="%s"' % (k, escape(v)) for k, v in attr.items()])) + + @staticmethod + def render_children(children): + return ''.join([DOMString.render(c) if isinstance(c, dict) else escape(c) for c in children]) + + @staticmethod + def render(elt): + type_ = elt['type'] + attr = DOMString.render_attrs(elt['attr']) if elt['attr'] else '' + children = DOMString.render_children(elt['children']) if elt['children'] else '' + + if type_ == 'fragment': + return children + + if type_ in VOID_ELEMENTS: + return '<%s%s/>' % (type_, attr) + + if type_ == 'escaped_html': + return elt['markup'] + + return '<%s%s>%s</%s>' % (type_, attr, children, type_) + + @staticmethod + def render_debug(elt): + type_ = elt['type'] + attr = DOMString.render_attrs(elt['attr']) if elt['attr'] else '' + children = DOMString.render_children(elt['children']) if elt['children'] else '' + + if type_ in VOID_ELEMENTS: + return '<%s%s/>' % (type_, attr) + + if type_ == 'escaped_html': + return elt['markup'] + + return '<%s%s>%s</%s>' % (type_, attr, children, type_) diff --git a/example.py b/example.py index 7512d5b..ca74ad1 100644 --- a/example.py +++ b/example.py @@ -194,7 +194,7 @@ config = { Linkify, ], # Specify which DOM backing engine to use. - 'engine': 'html5lib', + 'engine': 'string', } exporter = HTML(config)
Create a new dependency-free DOM backing engine Make a dependency-free implementation of `DOMEngine` based on `xml.etree.ElementTree` or `xml.etree.cElementTree`. This might have a positive performance impact, as well as facilitating the use of the exporter. For reference, here is my tentative implementation of an engine using `ElementTree`. For some reason it outputs wrappers twice, I would suspect a bug in `wrapper_state` that this particular implementation surfaces. ```python class DOM_ETREE(DOMEngine): """ lxml implementation of the DOM API. """ @staticmethod def create_tag(type_, attr=None): if not attr: attr = {} return etree.Element(type_, attrib=attr) @staticmethod def parse_html(markup): pass @staticmethod def append_child(elt, child): if hasattr(child, 'tag'): elt.append(child) else: c = etree.Element('fragment') c.text = child elt.append(c) @staticmethod def render(elt): return re.sub(r'(</?fragment>)', '', etree.tostring(elt, method='html')) @staticmethod def render_debug(elt): return etree.tostring(elt, method='html') ``` Edit: once implemented, this could become the default engine so people can more easily "choose their own adventure" with any other engine, but still have a working default when doing `pip install draftjs_exporter`.
springload/draftjs_exporter
diff --git a/tests/engines/test_engines_differences.py b/tests/engines/test_engines_differences.py index a73e551..3d616a3 100644 --- a/tests/engines/test_engines_differences.py +++ b/tests/engines/test_engines_differences.py @@ -5,6 +5,7 @@ import unittest from draftjs_exporter.engines.html5lib import DOM_HTML5LIB from draftjs_exporter.engines.lxml import DOM_LXML +from draftjs_exporter.engines.string import DOMString class TestDOMEnginesDifferences(unittest.TestCase): @@ -14,6 +15,9 @@ class TestDOMEnginesDifferences(unittest.TestCase): def test_lxml_self_closing_tags(self): self.assertEqual(DOM_LXML.render_debug(DOM_LXML.create_tag('hr')), '<hr>') + def test_string_self_closing_tags(self): + self.assertEqual(DOMString.render_debug(DOMString.create_tag('hr')), '<hr/>') + def test_html5lib_invalid_attributes(self): self.assertEqual(DOM_HTML5LIB.render_debug(DOM_HTML5LIB.create_tag('div', {'*ngFor': 'test'})), '<div *ngFor="test"></div>') @@ -21,6 +25,9 @@ class TestDOMEnginesDifferences(unittest.TestCase): with self.assertRaises(ValueError): DOM_LXML.render_debug(DOM_LXML.create_tag('div', {'*ngFor': 'test'})) + def test_string_invalid_attributes(self): + self.assertEqual(DOMString.render_debug(DOMString.create_tag('div', {'*ngFor': 'test'})), '<div *ngFor="test"></div>') + def test_html5lib_namespaced_attributes(self): bs_elt = DOM_HTML5LIB.create_tag('svg') DOM_HTML5LIB.append_child(bs_elt, DOM_HTML5LIB.create_tag('use', {'xlink:href': 'test'})) @@ -30,3 +37,32 @@ class TestDOMEnginesDifferences(unittest.TestCase): lxml_elt = DOM_LXML.create_tag('svg') DOM_LXML.append_child(lxml_elt, DOM_LXML.create_tag('use', {'xlink:href': 'test'})) self.assertEqual(DOM_LXML.render_debug(lxml_elt), '<svg><use xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="test"></use></svg>') + + def test_string_namespaced_attributes(self): + bs_elt = DOMString.create_tag('svg') + DOMString.append_child(bs_elt, DOMString.create_tag('use', {'xlink:href': 'test'})) + self.assertEqual(DOMString.render_debug(bs_elt), '<svg><use xlink:href="test"></use></svg>') + + def test_html5lib_html_escaping(self): + self.assertEqual(DOM_HTML5LIB.render_debug(DOM_HTML5LIB.create_tag('img', { + 'alt': '< " \' < > &', + })), '<img alt="&lt; &quot; \' &lt; &gt; &amp;"/>') + + def test_lxml_html_escaping(self): + self.assertEqual(DOM_LXML.render_debug(DOM_LXML.create_tag('img', { + 'alt': '< " \' < > &', + })), '<img alt="&lt; &quot; \' &lt; &gt; &amp;">') + + def test_string_html_escaping(self): + self.assertEqual(DOMString.render_debug(DOMString.create_tag('img', { + 'alt': '< " \' < > &', + })), '<img alt="&lt; &quot; &#x27; &lt; &gt; &amp;"/>') + + def test_html5lib_html_parsing(self): + self.assertEqual(DOM_HTML5LIB.render_debug(DOM_HTML5LIB.parse_html('<p>Invalid < " > &</p>')), '<p>Invalid &lt; " &gt; &amp;</p>') + + def test_lxml_html_parsing(self): + self.assertEqual(DOM_LXML.render_debug(DOM_LXML.parse_html('<p>Invalid < " > &</p>')), '<p>Invalid &lt; " &gt; &amp;</p>') + + def test_string_html_parsing(self): + self.assertEqual(DOMString.render_debug(DOMString.parse_html('<p>Invalid < " > &</p>')), '<p>Invalid < " > &</p>') diff --git a/tests/engines/test_engines_string.py b/tests/engines/test_engines_string.py new file mode 100644 index 0000000..765ff3f --- /dev/null +++ b/tests/engines/test_engines_string.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals + +import unittest + +from draftjs_exporter.engines.string import DOMString + + +class TestDOMString(unittest.TestCase): + def test_create_tag(self): + self.assertEqual(DOMString.render_debug(DOMString.create_tag('p', {'class': 'intro'})), '<p class="intro"></p>') + + def test_create_tag_empty(self): + self.assertEqual(DOMString.render_debug(DOMString.create_tag('p')), '<p></p>') + + def test_parse_html(self): + self.assertEqual(DOMString.render(DOMString.parse_html('<p><span>Test text</span></p>')), '<p><span>Test text</span></p>') + + def test_append_child(self): + parent = DOMString.create_tag('p') + DOMString.append_child(parent, DOMString.create_tag('span', {})) + self.assertEqual(DOMString.render_debug(parent), '<p><span></span></p>') + + def test_render_attrs(self): + self.assertEqual(DOMString.render_attrs({ + 'src': 'src.png', + 'alt': 'img\'s alt', + 'class': 'intro', + }), ' alt="img&#x27;s alt" class="intro" src="src.png"') + + + def test_render_children(self): + self.assertEqual(DOMString.render_children([ + 'render children', + DOMString.create_tag('p', {'class': 'intro'}), + 'test test', + ]), 'render children<p class="intro"></p>test test') + + def test_render(self): + self.assertEqual(DOMString.render_debug(DOMString.create_tag('p', {'class': 'intro'})), '<p class="intro"></p>') + + def test_render_debug(self): + self.assertEqual(DOMString.render_debug(DOMString.create_tag('p', {'class': 'intro'})), '<p class="intro"></p>') diff --git a/tests/test_exports.json b/tests/test_exports.json index 5ec9d35..b4a7617 100644 --- a/tests/test_exports.json +++ b/tests/test_exports.json @@ -3,7 +3,8 @@ "label": "Plain text", "output": { "html5lib": "<p>a</p>", - "lxml": "<p>a</p>" + "lxml": "<p>a</p>", + "string": "<p>a</p>" }, "content_state": { "entityMap": {}, @@ -24,7 +25,8 @@ "label": "Single inline style", "output": { "html5lib": "<p>asd<strong>f</strong></p>", - "lxml": "<p>asd<strong>f</strong></p>" + "lxml": "<p>asd<strong>f</strong></p>", + "string": "<p>asd<strong>f</strong></p>" }, "content_state": { "entityMap": {}, @@ -51,7 +53,8 @@ "label": "Nested inline styles", "output": { "html5lib": "<p><strong><em>BoldItalic</em></strong></p>", - "lxml": "<p><strong><em>BoldItalic</em></strong></p>" + "lxml": "<p><strong><em>BoldItalic</em></strong></p>", + "string": "<p><strong><em>BoldItalic</em></strong></p>" }, "content_state": { "entityMap": {}, @@ -83,7 +86,8 @@ "label": "Nested inline styles (inverted)", "output": { "html5lib": "<p><strong><em>ItalicBold</em></strong></p>", - "lxml": "<p><strong><em>ItalicBold</em></strong></p>" + "lxml": "<p><strong><em>ItalicBold</em></strong></p>", + "string": "<p><strong><em>ItalicBold</em></strong></p>" }, "content_state": { "entityMap": {}, @@ -115,7 +119,8 @@ "label": "Adjacent inline styles", "output": { "html5lib": "<p><em>Bold</em><strong>Italic</strong></p>", - "lxml": "<p><em>Bold</em><strong>Italic</strong></p>" + "lxml": "<p><em>Bold</em><strong>Italic</strong></p>", + "string": "<p><em>Bold</em><strong>Italic</strong></p>" }, "content_state": { "entityMap": {}, @@ -147,7 +152,8 @@ "label": "Style map defaults", "output": { "html5lib": "<p><strong>0</strong><code>1</code><em>2</em><u>3</u><s>4</s><sup>5</sup><sub>6</sub><mark>7</mark><q>8</q><small>9</small><samp>a</samp><ins>b</ins><del>c</del><kbd>d</kbd>ef</p>", - "lxml": "<p><strong>0</strong><code>1</code><em>2</em><u>3</u><s>4</s><sup>5</sup><sub>6</sub><mark>7</mark><q>8</q><small>9</small><samp>a</samp><ins>b</ins><del>c</del><kbd>d</kbd>ef</p>" + "lxml": "<p><strong>0</strong><code>1</code><em>2</em><u>3</u><s>4</s><sup>5</sup><sub>6</sub><mark>7</mark><q>8</q><small>9</small><samp>a</samp><ins>b</ins><del>c</del><kbd>d</kbd>ef</p>", + "string": "<p><strong>0</strong><code>1</code><em>2</em><u>3</u><s>4</s><sup>5</sup><sub>6</sub><mark>7</mark><q>8</q><small>9</small><samp>a</samp><ins>b</ins><del>c</del><kbd>d</kbd>ef</p>" }, "content_state": { "entityMap": {}, @@ -239,7 +245,8 @@ "label": "Entity", "output": { "html5lib": "<p><a href=\"/\" title=\"hi\"><em>a</em></a></p>", - "lxml": "<p><a href=\"/\" title=\"hi\"><em>a</em></a></p>" + "lxml": "<p><a href=\"/\" title=\"hi\"><em>a</em></a></p>", + "string": "<p><a href=\"/\" title=\"hi\"><em>a</em></a></p>" }, "content_state": { "entityMap": { @@ -282,7 +289,8 @@ "label": "Entity with data-*", "output": { "html5lib": "<p><a data-=\"no\" data-False=\"bad\" data-id=\"42\" data-mutability=\"mutable\" extra=\"foo\" href=\"/\" title=\"hi\"><em>a</em></a></p>", - "lxml": "<p><a data-=\"no\" data-False=\"bad\" data-id=\"42\" data-mutability=\"mutable\" extra=\"foo\" href=\"/\" title=\"hi\"><em>a</em></a></p>" + "lxml": "<p><a data-=\"no\" data-False=\"bad\" data-id=\"42\" data-mutability=\"mutable\" extra=\"foo\" href=\"/\" title=\"hi\"><em>a</em></a></p>", + "string": "<p><a data-=\"no\" data-False=\"bad\" data-id=\"42\" data-mutability=\"mutable\" extra=\"foo\" href=\"/\" title=\"hi\"><em>a</em></a></p>" }, "content_state": { "entityMap": { @@ -329,7 +337,8 @@ "label": "Entity with inline style", "output": { "html5lib": "<p><a href=\"/\"><em>a</em></a></p>", - "lxml": "<p><a href=\"/\"><em>a</em></a></p>" + "lxml": "<p><a href=\"/\"><em>a</em></a></p>", + "string": "<p><a href=\"/\"><em>a</em></a></p>" }, "content_state": { "entityMap": { @@ -370,7 +379,8 @@ "label": "Ordered list", "output": { "html5lib": "<p>An ordered list:</p><ol><li>One</li><li>Two</li></ol>", - "lxml": "<p>An ordered list:</p><ol><li>One</li><li>Two</li></ol>" + "lxml": "<p>An ordered list:</p><ol><li>One</li><li>Two</li></ol>", + "string": "<p>An ordered list:</p><ol><li>One</li><li>Two</li></ol>" }, "content_state": { "entityMap": {}, @@ -407,7 +417,8 @@ "label": "All plain HTML elements we need", "output": { "html5lib": "<h2>Title 2</h2><h3>Title 3</h3><h4>Title 4</h4><h5>Title 5</h5><blockquote>Blockquote</blockquote><ul class=\"bullet-list\"><li>List item<ul class=\"bullet-list\"><li>Nested list item</li></ul></li></ul><ol><li>Ordered item <strong>(bold)</strong><ol><li>Nested ordered item <em>(italic)</em></li></ol></li></ol>", - "lxml": "<h2>Title 2</h2><h3>Title 3</h3><h4>Title 4</h4><h5>Title 5</h5><blockquote>Blockquote</blockquote><ul class=\"bullet-list\"><li>List item<ul class=\"bullet-list\"><li>Nested list item</li></ul></li></ul><ol><li>Ordered item <strong>(bold)</strong><ol><li>Nested ordered item <em>(italic)</em></li></ol></li></ol>" + "lxml": "<h2>Title 2</h2><h3>Title 3</h3><h4>Title 4</h4><h5>Title 5</h5><blockquote>Blockquote</blockquote><ul class=\"bullet-list\"><li>List item<ul class=\"bullet-list\"><li>Nested list item</li></ul></li></ul><ol><li>Ordered item <strong>(bold)</strong><ol><li>Nested ordered item <em>(italic)</em></li></ol></li></ol>", + "string": "<h2>Title 2</h2><h3>Title 3</h3><h4>Title 4</h4><h5>Title 5</h5><blockquote>Blockquote</blockquote><ul class=\"bullet-list\"><li>List item<ul class=\"bullet-list\"><li>Nested list item</li></ul></li></ul><ol><li>Ordered item <strong>(bold)</strong><ol><li>Nested ordered item <em>(italic)</em></li></ol></li></ol>" }, "content_state": { "entityMap": {}, @@ -429,7 +440,8 @@ "label": "From https://github.com/icelab/draft-js-ast-exporter/blob/651c807bea12d97dad6f4965ab40481c8f2130dd/test/fixtures/content.js", "output": { "html5lib": "<h2>DraftJS AST Exporter</h2><p>In your draft-js, <strong>exporting</strong> your <em>content</em>:</p><ol><li>From draft-js internals</li><li>To an abstract syntax tree</li><li>Extensibility.</li></ol><img src=\"http://placekitten.com/500/300\"/>:)<p>Find the project on <a href=\"https://github.com/icelab/draft-js-ast-exporter\">Github</a>.</p>", - "lxml": "<h2>DraftJS AST Exporter</h2><p>In your draft-js, <strong>exporting</strong> your <em>content</em>:</p><ol><li>From draft-js internals</li><li>To an abstract syntax tree</li><li>Extensibility.</li></ol><img src=\"http://placekitten.com/500/300\">:)<p>Find the project on <a href=\"https://github.com/icelab/draft-js-ast-exporter\">Github</a>.</p>" + "lxml": "<h2>DraftJS AST Exporter</h2><p>In your draft-js, <strong>exporting</strong> your <em>content</em>:</p><ol><li>From draft-js internals</li><li>To an abstract syntax tree</li><li>Extensibility.</li></ol><img src=\"http://placekitten.com/500/300\">:)<p>Find the project on <a href=\"https://github.com/icelab/draft-js-ast-exporter\">Github</a>.</p>", + "string": "<h2>DraftJS AST Exporter</h2><p>In your draft-js, <strong>exporting</strong> your <em>content</em>:</p><ol><li>From draft-js internals</li><li>To an abstract syntax tree</li><li>Extensibility.</li></ol><img src=\"http://placekitten.com/500/300\"/>:)<p>Find the project on <a href=\"https://github.com/icelab/draft-js-ast-exporter\">Github</a>.</p>" }, "content_state": { "entityMap": { @@ -539,7 +551,8 @@ "label": "HTML entities escaping", "output": { "html5lib": "<p><a href=\"http://www.example.com/?a=1&amp;b=2\">http://www.example.com/?a=1&amp;b=2</a></p>", - "lxml": "<p><a href=\"http://www.example.com/?a=1&amp;b=2\">http://www.example.com/?a=1&amp;b=2</a></p>" + "lxml": "<p><a href=\"http://www.example.com/?a=1&amp;b=2\">http://www.example.com/?a=1&amp;b=2</a></p>", + "string": "<p><a href=\"http://www.example.com/?a=1&amp;b=2\">http://www.example.com/?a=1&amp;b=2</a></p>" }, "content_state": { "entityMap": { @@ -574,7 +587,8 @@ "label": "Multiple decorators", "output": { "html5lib": "<p>search <a href=\"http://www.google.com#world\">http://www.google.com#world</a> for the <span class=\"hashtag\">#world</span></p>", - "lxml": "<p>search <a href=\"http://www.google.com#world\">http://www.google.com#world</a> for the <span class=\"hashtag\">#world</span></p>" + "lxml": "<p>search <a href=\"http://www.google.com#world\">http://www.google.com#world</a> for the <span class=\"hashtag\">#world</span></p>", + "string": "<p>search <a href=\"http://www.google.com#world\">http://www.google.com#world</a> for the <span class=\"hashtag\">#world</span></p>" }, "content_state": { "entityMap": {}, @@ -595,7 +609,8 @@ "label": "Big content export", "output": { "html5lib": "<h2>draftjs_exporter is an HTML exporter for <a href=\"https://github.com/facebook/draft-js\">Draft.js</a> content</h2><blockquote>Try it out by running this file!</blockquote><h3>Features 📝🍸</h3><p>The exporter aims to provide sensible defaults from basic block types and inline styles to HTML, that can easily be customised when required. For more advanced scenarios, an API is provided (mimicking React's <a href=\"https://facebook.github.io/react/docs/top-level-api.html#react.createelement\"><code>createElement</code></a>) to create custom rendering components of arbitrary complexity.</p><hr/><p>Here are some features worth highlighting:</p><ul class=\"bullet-list\"><li>Convert line breaks to <code>&lt;br&gt;</code><br/>elements.</li><li>Automatic conversion of entity data to HTML attributes (int &amp; boolean to string, <a href=\"https://facebook.github.io/react/docs/jsx-in-depth.html\"><code>style object</code> to <code>style string</code></a>).</li><li>Wrapped blocks (<code>&lt;li&gt; </code>elements go inside <code>&lt;ul&gt;</code> or <code>&lt;ol&gt;</code>).<ul class=\"bullet-list\"><li>With arbitrary nesting.<ul class=\"bullet-list\"><li>Common text styles: <strong>Bold</strong>, <em>Italic</em>, <u>Underline</u>, <code>Monospace</code>, <s>Strikethrough.</s> <kbd>cmd + b</kbd></li><li><s>Overlapping </s><strong><s>te</s></strong><strong><em>xt</em></strong><em> styles. </em><strong style=\"text-decoration: underline;\">Custom styles</strong> too!<ul class=\"bullet-list\"><li><span class=\"hashtag\">#hashtag</span> support via <a href=\"https://github.com/springload/draftjs_exporter/pull/17\">#CompositeDecorators</a>.<ul class=\"bullet-list\"><li>Linkify URLs too! <a href=\"http://example.com/\">http://example.com/</a></li></ul></li></ul></li><li>Depth can go back and forth, it works fiiine (1)</li></ul></li><li>Depth can go back and forth, it works fiiine (2)<ul class=\"bullet-list\"><li>Depth can go back and forth, it works fiiine (3)</li></ul></li><li>Depth can go back and forth, it works fiiine (4)</li></ul></li><li>Depth can go back and forth, it works fiiine (5)</li></ul><img alt=\"Test image alt text\" height=\"200\" src=\"https://placekitten.com/g/300/200\" width=\"300\"/><h3>For developers \ud83d\ude80</h3><ol><li>Import the library</li><li>Define your configuration</li><li>Go!<ol><li>Optionally, define your custom components.</li></ol></li></ol><pre><code>def Blockquote(props):\n block_data = props['block']['data']\n return DOM.create_element('blockquote', {\n 'cite': block_data.get('cite')\n }, props['children'])\n</code></pre><p>Voilà!</p>", - "lxml": "<h2>draftjs_exporter is an HTML exporter for <a href=\"https://github.com/facebook/draft-js\">Draft.js</a> content</h2><blockquote>Try it out by running this file!</blockquote><h3>Features 📝🍸</h3><p>The exporter aims to provide sensible defaults from basic block types and inline styles to HTML, that can easily be customised when required. For more advanced scenarios, an API is provided (mimicking React's <a href=\"https://facebook.github.io/react/docs/top-level-api.html#react.createelement\"><code>createElement</code></a>) to create custom rendering components of arbitrary complexity.</p><hr><p>Here are some features worth highlighting:</p><ul class=\"bullet-list\"><li>Convert line breaks to <code>&lt;br&gt;</code><br>elements.</li><li>Automatic conversion of entity data to HTML attributes (int &amp; boolean to string, <a href=\"https://facebook.github.io/react/docs/jsx-in-depth.html\"><code>style object</code> to <code>style string</code></a>).</li><li>Wrapped blocks (<code>&lt;li&gt; </code>elements go inside <code>&lt;ul&gt;</code> or <code>&lt;ol&gt;</code>).<ul class=\"bullet-list\"><li>With arbitrary nesting.<ul class=\"bullet-list\"><li>Common text styles: <strong>Bold</strong>, <em>Italic</em>, <u>Underline</u>, <code>Monospace</code>, <s>Strikethrough.</s> <kbd>cmd + b</kbd></li><li><s>Overlapping </s><strong><s>te</s></strong><strong><em>xt</em></strong><em> styles. </em><strong style=\"text-decoration: underline;\">Custom styles</strong> too!<ul class=\"bullet-list\"><li><span class=\"hashtag\">#hashtag</span> support via <a href=\"https://github.com/springload/draftjs_exporter/pull/17\">#CompositeDecorators</a>.<ul class=\"bullet-list\"><li>Linkify URLs too! <a href=\"http://example.com/\">http://example.com/</a></li></ul></li></ul></li><li>Depth can go back and forth, it works fiiine (1)</li></ul></li><li>Depth can go back and forth, it works fiiine (2)<ul class=\"bullet-list\"><li>Depth can go back and forth, it works fiiine (3)</li></ul></li><li>Depth can go back and forth, it works fiiine (4)</li></ul></li><li>Depth can go back and forth, it works fiiine (5)</li></ul><img alt=\"Test image alt text\" height=\"200\" src=\"https://placekitten.com/g/300/200\" width=\"300\"><h3>For developers \ud83d\ude80</h3><ol><li>Import the library</li><li>Define your configuration</li><li>Go!<ol><li>Optionally, define your custom components.</li></ol></li></ol><pre><code>def Blockquote(props):\n block_data = props['block']['data']\n return DOM.create_element('blockquote', {\n 'cite': block_data.get('cite')\n }, props['children'])\n</code></pre><p>Voilà!</p>" + "lxml": "<h2>draftjs_exporter is an HTML exporter for <a href=\"https://github.com/facebook/draft-js\">Draft.js</a> content</h2><blockquote>Try it out by running this file!</blockquote><h3>Features 📝🍸</h3><p>The exporter aims to provide sensible defaults from basic block types and inline styles to HTML, that can easily be customised when required. For more advanced scenarios, an API is provided (mimicking React's <a href=\"https://facebook.github.io/react/docs/top-level-api.html#react.createelement\"><code>createElement</code></a>) to create custom rendering components of arbitrary complexity.</p><hr><p>Here are some features worth highlighting:</p><ul class=\"bullet-list\"><li>Convert line breaks to <code>&lt;br&gt;</code><br>elements.</li><li>Automatic conversion of entity data to HTML attributes (int &amp; boolean to string, <a href=\"https://facebook.github.io/react/docs/jsx-in-depth.html\"><code>style object</code> to <code>style string</code></a>).</li><li>Wrapped blocks (<code>&lt;li&gt; </code>elements go inside <code>&lt;ul&gt;</code> or <code>&lt;ol&gt;</code>).<ul class=\"bullet-list\"><li>With arbitrary nesting.<ul class=\"bullet-list\"><li>Common text styles: <strong>Bold</strong>, <em>Italic</em>, <u>Underline</u>, <code>Monospace</code>, <s>Strikethrough.</s> <kbd>cmd + b</kbd></li><li><s>Overlapping </s><strong><s>te</s></strong><strong><em>xt</em></strong><em> styles. </em><strong style=\"text-decoration: underline;\">Custom styles</strong> too!<ul class=\"bullet-list\"><li><span class=\"hashtag\">#hashtag</span> support via <a href=\"https://github.com/springload/draftjs_exporter/pull/17\">#CompositeDecorators</a>.<ul class=\"bullet-list\"><li>Linkify URLs too! <a href=\"http://example.com/\">http://example.com/</a></li></ul></li></ul></li><li>Depth can go back and forth, it works fiiine (1)</li></ul></li><li>Depth can go back and forth, it works fiiine (2)<ul class=\"bullet-list\"><li>Depth can go back and forth, it works fiiine (3)</li></ul></li><li>Depth can go back and forth, it works fiiine (4)</li></ul></li><li>Depth can go back and forth, it works fiiine (5)</li></ul><img alt=\"Test image alt text\" height=\"200\" src=\"https://placekitten.com/g/300/200\" width=\"300\"><h3>For developers \ud83d\ude80</h3><ol><li>Import the library</li><li>Define your configuration</li><li>Go!<ol><li>Optionally, define your custom components.</li></ol></li></ol><pre><code>def Blockquote(props):\n block_data = props['block']['data']\n return DOM.create_element('blockquote', {\n 'cite': block_data.get('cite')\n }, props['children'])\n</code></pre><p>Voilà!</p>", + "string": "<h2>draftjs_exporter is an HTML exporter for <a href=\"https://github.com/facebook/draft-js\">Draft.js</a> content</h2><blockquote>Try it out by running this file!</blockquote><h3>Features 📝🍸</h3><p>The exporter aims to provide sensible defaults from basic block types and inline styles to HTML, that can easily be customised when required. For more advanced scenarios, an API is provided (mimicking React&#x27;s <a href=\"https://facebook.github.io/react/docs/top-level-api.html#react.createelement\"><code>createElement</code></a>) to create custom rendering components of arbitrary complexity.</p><hr/><p>Here are some features worth highlighting:</p><ul class=\"bullet-list\"><li>Convert line breaks to <code>&lt;br&gt;</code><br/>elements.</li><li>Automatic conversion of entity data to HTML attributes (int &amp; boolean to string, <a href=\"https://facebook.github.io/react/docs/jsx-in-depth.html\"><code>style object</code> to <code>style string</code></a>).</li><li>Wrapped blocks (<code>&lt;li&gt; </code>elements go inside <code>&lt;ul&gt;</code> or <code>&lt;ol&gt;</code>).<ul class=\"bullet-list\"><li>With arbitrary nesting.<ul class=\"bullet-list\"><li>Common text styles: <strong>Bold</strong>, <em>Italic</em>, <u>Underline</u>, <code>Monospace</code>, <s>Strikethrough.</s> <kbd>cmd + b</kbd></li><li><s>Overlapping </s><strong><s>te</s></strong><strong><em>xt</em></strong><em> styles. </em><strong style=\"text-decoration: underline;\">Custom styles</strong> too!<ul class=\"bullet-list\"><li><span class=\"hashtag\">#hashtag</span> support via <a href=\"https://github.com/springload/draftjs_exporter/pull/17\">#CompositeDecorators</a>.<ul class=\"bullet-list\"><li>Linkify URLs too! <a href=\"http://example.com/\">http://example.com/</a></li></ul></li></ul></li><li>Depth can go back and forth, it works fiiine (1)</li></ul></li><li>Depth can go back and forth, it works fiiine (2)<ul class=\"bullet-list\"><li>Depth can go back and forth, it works fiiine (3)</li></ul></li><li>Depth can go back and forth, it works fiiine (4)</li></ul></li><li>Depth can go back and forth, it works fiiine (5)</li></ul><img alt=\"Test image alt text\" height=\"200\" src=\"https://placekitten.com/g/300/200\" width=\"300\"/><h3>For developers \ud83d\ude80</h3><ol><li>Import the library</li><li>Define your configuration</li><li>Go!<ol><li>Optionally, define your custom components.</li></ol></li></ol><pre><code>def Blockquote(props):\n block_data = props[&#x27;block&#x27;][&#x27;data&#x27;]\n return DOM.create_element(&#x27;blockquote&#x27;, {\n &#x27;cite&#x27;: block_data.get(&#x27;cite&#x27;)\n }, props[&#x27;children&#x27;])\n</code></pre><p>Voilà!</p>" }, "content_state": { "entityMap": { diff --git a/tests/test_exports.py b/tests/test_exports.py index b9522be..f9b8650 100644 --- a/tests/test_exports.py +++ b/tests/test_exports.py @@ -18,11 +18,6 @@ from tests.test_entities import HR, Image, Link fixtures_path = os.path.join(os.path.dirname(__file__), 'test_exports.json') fixtures = json.loads(open(fixtures_path, 'r').read()) -engines = [ - 'bs', - 'lxml', -] - exporter = HTML({ 'entity_decorators': { ENTITY_TYPES.LINK: Link, @@ -67,6 +62,8 @@ class TestExportsMeta(type): engine = 'html5lib' elif name == 'TestExportsLXML': engine = 'lxml' + elif name == 'TestExportsSTRING': + engine = 'string' for export in fixtures: test_label = export['label'].lower().replace(' ', '_') @@ -109,5 +106,21 @@ class TestExportsLXML(six.with_metaclass(TestExportsMeta, unittest.TestCase)): self.assertIsInstance(exporter, HTML) +class TestExportsSTRING(six.with_metaclass(TestExportsMeta, unittest.TestCase)): + @classmethod + def setUpClass(cls): + cls.pr = cProfile.Profile() + cls.pr.enable() + print('\nstring') + + @classmethod + def tearDownClass(cls): + cls.pr.disable() + Stats(cls.pr).strip_dirs().sort_stats('cumulative').print_stats(0) + + def test_init(self): + self.assertIsInstance(exporter, HTML) + + if __name__ == "__main__": unittest.main()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 5 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[testing,docs]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc libxml2-dev libxslt1-dev" ], "python": "3.6", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 beautifulsoup4==4.12.3 certifi==2021.5.30 coverage==6.2 distlib==0.3.9 -e git+https://github.com/springload/draftjs_exporter.git@af50b3cfc656afac5eec6a8217355e120ba3f106#egg=draftjs_exporter execnet==1.9.0 filelock==3.4.1 flake8==5.0.4 html5lib==1.0b10 importlib-metadata==4.2.0 importlib-resources==5.4.0 iniconfig==1.1.1 isort==4.2.5 lxml==5.3.1 mccabe==0.7.0 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 pyparsing==3.1.4 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 six==1.17.0 soupsieve==2.3.2.post1 toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 virtualenv==20.16.2 webencodings==0.5.1 zipp==3.6.0
name: draftjs_exporter channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - beautifulsoup4==4.12.3 - coverage==6.2 - distlib==0.3.9 - execnet==1.9.0 - filelock==3.4.1 - flake8==5.0.4 - html5lib==1.0b10 - importlib-metadata==4.2.0 - importlib-resources==5.4.0 - iniconfig==1.1.1 - isort==4.2.5 - lxml==5.3.1 - mccabe==0.7.0 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - six==1.17.0 - soupsieve==2.3.2.post1 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - virtualenv==20.16.2 - webencodings==0.5.1 - zipp==3.6.0 prefix: /opt/conda/envs/draftjs_exporter
[ "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_html5lib_html_escaping", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_html5lib_html_parsing", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_html5lib_invalid_attributes", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_html5lib_namespaced_attributes", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_html5lib_self_closing_tags", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_lxml_html_escaping", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_lxml_html_parsing", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_lxml_invalid_attributes", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_lxml_namespaced_attributes", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_lxml_self_closing_tags", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_string_html_escaping", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_string_html_parsing", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_string_invalid_attributes", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_string_namespaced_attributes", "tests/engines/test_engines_differences.py::TestDOMEnginesDifferences::test_string_self_closing_tags", "tests/engines/test_engines_string.py::TestDOMString::test_append_child", "tests/engines/test_engines_string.py::TestDOMString::test_create_tag", "tests/engines/test_engines_string.py::TestDOMString::test_create_tag_empty", "tests/engines/test_engines_string.py::TestDOMString::test_parse_html", "tests/engines/test_engines_string.py::TestDOMString::test_render", "tests/engines/test_engines_string.py::TestDOMString::test_render_attrs", "tests/engines/test_engines_string.py::TestDOMString::test_render_children", "tests/engines/test_engines_string.py::TestDOMString::test_render_debug", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_adjacent_inline_styles", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_all_plain_html_elements_we_need", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_big_content_export", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_entity", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_entity_with_data-*", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_entity_with_inline_style", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_from_https://github.com/icelab/draft-js-ast-exporter/blob/651c807bea12d97dad6f4965ab40481c8f2130dd/test/fixtures/content.js", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_html_entities_escaping", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_multiple_decorators", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_nested_inline_styles", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_nested_inline_styles_(inverted)", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_ordered_list", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_plain_text", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_single_inline_style", "tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_style_map_defaults", "tests/test_exports.py::TestExportsHTML5LIB::test_init_html5lib", "tests/test_exports.py::TestExportsLXML::test_export_lxml_adjacent_inline_styles", "tests/test_exports.py::TestExportsLXML::test_export_lxml_all_plain_html_elements_we_need", "tests/test_exports.py::TestExportsLXML::test_export_lxml_entity", "tests/test_exports.py::TestExportsLXML::test_export_lxml_entity_with_inline_style", "tests/test_exports.py::TestExportsLXML::test_export_lxml_from_https://github.com/icelab/draft-js-ast-exporter/blob/651c807bea12d97dad6f4965ab40481c8f2130dd/test/fixtures/content.js", "tests/test_exports.py::TestExportsLXML::test_export_lxml_html_entities_escaping", "tests/test_exports.py::TestExportsLXML::test_export_lxml_multiple_decorators", "tests/test_exports.py::TestExportsLXML::test_export_lxml_nested_inline_styles", "tests/test_exports.py::TestExportsLXML::test_export_lxml_nested_inline_styles_(inverted)", "tests/test_exports.py::TestExportsLXML::test_export_lxml_ordered_list", "tests/test_exports.py::TestExportsLXML::test_export_lxml_plain_text", "tests/test_exports.py::TestExportsLXML::test_export_lxml_single_inline_style", "tests/test_exports.py::TestExportsLXML::test_export_lxml_style_map_defaults", "tests/test_exports.py::TestExportsLXML::test_init", "tests/test_exports.py::TestExportsSTRING::test_export_string_adjacent_inline_styles", "tests/test_exports.py::TestExportsSTRING::test_export_string_all_plain_html_elements_we_need", "tests/test_exports.py::TestExportsSTRING::test_export_string_big_content_export", "tests/test_exports.py::TestExportsSTRING::test_export_string_entity", "tests/test_exports.py::TestExportsSTRING::test_export_string_entity_with_data-*", "tests/test_exports.py::TestExportsSTRING::test_export_string_entity_with_inline_style", "tests/test_exports.py::TestExportsSTRING::test_export_string_from_https://github.com/icelab/draft-js-ast-exporter/blob/651c807bea12d97dad6f4965ab40481c8f2130dd/test/fixtures/content.js", "tests/test_exports.py::TestExportsSTRING::test_export_string_html_entities_escaping", "tests/test_exports.py::TestExportsSTRING::test_export_string_multiple_decorators", "tests/test_exports.py::TestExportsSTRING::test_export_string_nested_inline_styles", "tests/test_exports.py::TestExportsSTRING::test_export_string_nested_inline_styles_(inverted)", "tests/test_exports.py::TestExportsSTRING::test_export_string_ordered_list", "tests/test_exports.py::TestExportsSTRING::test_export_string_plain_text", "tests/test_exports.py::TestExportsSTRING::test_export_string_single_inline_style", "tests/test_exports.py::TestExportsSTRING::test_export_string_style_map_defaults", "tests/test_exports.py::TestExportsSTRING::test_init" ]
[ "tests/test_exports.py::TestExportsLXML::test_export_lxml_big_content_export", "tests/test_exports.py::TestExportsLXML::test_export_lxml_entity_with_data-*" ]
[]
[]
MIT License
1,798
[ "README.rst", "draftjs_exporter/dom.py", "docs/README.md", "CHANGELOG.md", "example.py", "draftjs_exporter/engines/string.py" ]
[ "README.rst", "draftjs_exporter/dom.py", "docs/README.md", "CHANGELOG.md", "example.py", "draftjs_exporter/engines/string.py" ]
pynamodb__PynamoDB-385
7a3cc8aba43b4cfe7630f5a4a199bddb8f8c7a86
2017-10-24 03:15:02
7a3cc8aba43b4cfe7630f5a4a199bddb8f8c7a86
jpinner-lyft: @garrettheel @jmphilli any idea why we do: ``` cls = getattr(item, '__class__', None) issubclass(cls, classinfo) ``` instead of ``` issubclass(item, classinfo) ``` jpinner-lyft: Some comments while looking at the history from #280: - previous implementations used `issubclass` directly and `type(cls)` - inspect `getmembers` has an identical implementation for 2.7+
diff --git a/pynamodb/attributes.py b/pynamodb/attributes.py index 4f0e286..99cdb33 100644 --- a/pynamodb/attributes.py +++ b/pynamodb/attributes.py @@ -14,6 +14,7 @@ from pynamodb.constants import ( STRING, STRING_SHORT, NUMBER, BINARY, UTC, DATETIME_FORMAT, BINARY_SET, STRING_SET, NUMBER_SET, MAP, MAP_SHORT, LIST, LIST_SHORT, DEFAULT_ENCODING, BOOLEAN, ATTR_TYPE_MAP, NUMBER_SHORT, NULL, SHORT_ATTR_TYPES ) +from pynamodb.compat import getmembers_issubclass from pynamodb.expressions.operand import Path import collections @@ -183,34 +184,24 @@ class AttributeContainerMeta(type): cls._attributes = {} cls._dynamo_to_python_attrs = {} - for item_name in dir(cls): - try: - item_cls = getattr(getattr(cls, item_name), "__class__", None) - except AttributeError: - continue + for name, attribute in getmembers_issubclass(cls, Attribute): + initialized = False + if isinstance(attribute, MapAttribute): + # MapAttribute instances that are class attributes of an AttributeContainer class + # should behave like an Attribute instance and not an AttributeContainer instance. + initialized = attribute._make_attribute() - if item_cls is None: - continue + cls._attributes[name] = attribute + if attribute.attr_name is not None: + cls._dynamo_to_python_attrs[attribute.attr_name] = name + else: + attribute.attr_name = name - if issubclass(item_cls, Attribute): - instance = getattr(cls, item_name) - initialized = False - if isinstance(instance, MapAttribute): - # MapAttribute instances that are class attributes of an AttributeContainer class - # should behave like an Attribute instance and not an AttributeContainer instance. - initialized = instance._make_attribute() - - cls._attributes[item_name] = instance - if instance.attr_name is not None: - cls._dynamo_to_python_attrs[instance.attr_name] = item_name - else: - instance.attr_name = item_name - - if initialized and isinstance(instance, MapAttribute): - # To support creating expressions from nested attributes, MapAttribute instances - # store local copies of the attributes in cls._attributes with `attr_path` set. - # Prepend the `attr_path` lists with the dynamo attribute name. - instance._update_attribute_paths(instance.attr_name) + if initialized and isinstance(attribute, MapAttribute): + # To support creating expressions from nested attributes, MapAttribute instances + # store local copies of the attributes in cls._attributes with `attr_path` set. + # Prepend the `attr_path` lists with the dynamo attribute name. + attribute._update_attribute_paths(attribute.attr_name) @add_metaclass(AttributeContainerMeta) diff --git a/pynamodb/compat.py b/pynamodb/compat.py index 2c246b1..5163399 100644 --- a/pynamodb/compat.py +++ b/pynamodb/compat.py @@ -59,3 +59,21 @@ class CompatTestCase(unittest.TestCase): class NullHandler(logging.Handler): def emit(self, record): pass + + +# Replace this function with inspect.getmembers() once we drop Python 2.6 support +# see https://bugs.python.org/issue1785 +# see https://bugs.launchpad.net/zope.interface/+bug/181371 +def getmembers_issubclass(object, classinfo): + results = [] + for key in dir(object): + try: + value = getattr(object, key) + except AttributeError: + continue + + value_cls = getattr(value, '__class__', None) + if value_cls and issubclass(value_cls, classinfo): + results.append((key, value)) + results.sort() + return results diff --git a/pynamodb/indexes.py b/pynamodb/indexes.py index 1c8d960..09b9687 100644 --- a/pynamodb/indexes.py +++ b/pynamodb/indexes.py @@ -7,6 +7,7 @@ from pynamodb.constants import ( ) from pynamodb.attributes import Attribute from pynamodb.types import HASH, RANGE +from pynamodb.compat import getmembers_issubclass from pynamodb.connection.util import pythonic from six import with_metaclass @@ -131,10 +132,8 @@ class Index(with_metaclass(IndexMeta)): """ if cls.Meta.attributes is None: cls.Meta.attributes = {} - for item in dir(cls): - item_cls = getattr(getattr(cls, item), "__class__", None) - if item_cls and issubclass(item_cls, (Attribute, )): - cls.Meta.attributes[item] = getattr(cls, item) + for name, attribute in getmembers_issubclass(cls, Attribute): + cls.Meta.attributes[name] = attribute return cls.Meta.attributes diff --git a/pynamodb/models.py b/pynamodb/models.py index cacb817..01c71f5 100644 --- a/pynamodb/models.py +++ b/pynamodb/models.py @@ -15,7 +15,7 @@ from pynamodb.connection.base import MetaTable from pynamodb.connection.table import TableConnection from pynamodb.connection.util import pythonic from pynamodb.types import HASH, RANGE -from pynamodb.compat import NullHandler +from pynamodb.compat import NullHandler, getmembers_issubclass from pynamodb.indexes import Index, GlobalSecondaryIndex from pynamodb.pagination import ResultIterator from pynamodb.settings import get_settings_value @@ -896,13 +896,18 @@ class Model(AttributeContainer): hash_key, attrs = data range_key = attrs.pop('range_key', None) attributes = attrs.pop(pythonic(ATTRIBUTES)) + hash_keyname = cls._get_meta_data().hash_keyname + hash_keytype = cls._get_meta_data().get_attribute_type(hash_keyname) + attributes[hash_keyname] = { + hash_keytype: hash_key + } if range_key is not None: range_keyname = cls._get_meta_data().range_keyname range_keytype = cls._get_meta_data().get_attribute_type(range_keyname) attributes[range_keyname] = { range_keytype: range_key } - item = cls(hash_key) + item = cls() item._deserialize(attributes) return item @@ -1103,34 +1108,29 @@ class Model(AttributeContainer): pythonic(ATTR_DEFINITIONS): [] } cls._index_classes = {} - for item in dir(cls): - item_cls = getattr(getattr(cls, item), "__class__", None) - if item_cls is None: - continue - if issubclass(item_cls, (Index, )): - item_cls = getattr(cls, item) - cls._index_classes[item_cls.Meta.index_name] = item_cls - schema = item_cls._get_schema() - idx = { - pythonic(INDEX_NAME): item_cls.Meta.index_name, - pythonic(KEY_SCHEMA): schema.get(pythonic(KEY_SCHEMA)), - pythonic(PROJECTION): { - PROJECTION_TYPE: item_cls.Meta.projection.projection_type, - }, + for name, index in getmembers_issubclass(cls, Index): + cls._index_classes[index.Meta.index_name] = index + schema = index._get_schema() + idx = { + pythonic(INDEX_NAME): index.Meta.index_name, + pythonic(KEY_SCHEMA): schema.get(pythonic(KEY_SCHEMA)), + pythonic(PROJECTION): { + PROJECTION_TYPE: index.Meta.projection.projection_type, + }, + } + if issubclass(index.__class__, GlobalSecondaryIndex): + idx[pythonic(PROVISIONED_THROUGHPUT)] = { + READ_CAPACITY_UNITS: index.Meta.read_capacity_units, + WRITE_CAPACITY_UNITS: index.Meta.write_capacity_units } - if issubclass(item_cls.__class__, GlobalSecondaryIndex): - idx[pythonic(PROVISIONED_THROUGHPUT)] = { - READ_CAPACITY_UNITS: item_cls.Meta.read_capacity_units, - WRITE_CAPACITY_UNITS: item_cls.Meta.write_capacity_units - } - cls._indexes[pythonic(ATTR_DEFINITIONS)].extend(schema.get(pythonic(ATTR_DEFINITIONS))) - if item_cls.Meta.projection.non_key_attributes: - idx[pythonic(PROJECTION)][NON_KEY_ATTRIBUTES] = item_cls.Meta.projection.non_key_attributes - if issubclass(item_cls.__class__, GlobalSecondaryIndex): - cls._indexes[pythonic(GLOBAL_SECONDARY_INDEXES)].append(idx) - else: - cls._indexes[pythonic(LOCAL_SECONDARY_INDEXES)].append(idx) + cls._indexes[pythonic(ATTR_DEFINITIONS)].extend(schema.get(pythonic(ATTR_DEFINITIONS))) + if index.Meta.projection.non_key_attributes: + idx[pythonic(PROJECTION)][NON_KEY_ATTRIBUTES] = index.Meta.projection.non_key_attributes + if issubclass(index.__class__, GlobalSecondaryIndex): + cls._indexes[pythonic(GLOBAL_SECONDARY_INDEXES)].append(idx) + else: + cls._indexes[pythonic(LOCAL_SECONDARY_INDEXES)].append(idx) return cls._indexes def _get_json(self): @@ -1257,13 +1257,13 @@ class Model(AttributeContainer): :param attrs: A dictionary of attributes to update this item with. """ - for name, attr in attrs.items(): - attr_instance = self._get_attributes().get(name, None) - if attr_instance: - attr_type = ATTR_TYPE_MAP[attr_instance.attr_type] - value = attr.get(attr_type, None) + for name, attr in self._get_attributes().items(): + value = attrs.get(attr.attr_name, None) + if value is not None: + value = value.get(ATTR_TYPE_MAP[attr.attr_type], None) if value is not None: - setattr(self, name, attr_instance.deserialize(value)) + value = attr.deserialize(value) + setattr(self, name, value) def _serialize(self, attr_map=False, null_check=True): """
Another crash caused by zope.interface Similar to #71 This one appear when using an index, the crash is in Model._get_indexes() ``` python Exception: AttributeError: __provides__ Exception class: AttributeError Exception message: __provides__ Backtrace: filename: "/xxx/local/lib/python2.7/site-packages/paste/deploy/config.py" line: 291 function: "__call__" text/code: "return self.app(environ, start_response)" filename: "/xxx/local/lib/python2.7/site-packages/wsgicors.py" line: 197 function: "__call__" text/code: "return self.application(environ, custom_start_response)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid/router.py" line: 242 function: "__call__" text/code: "response = self.invoke_subrequest(request, use_tweens=True)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid/router.py" line: 217 function: "invoke_subrequest" text/code: "response = handle_request(request)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid_metrics/tween.py" line: 24 function: "performance_tween" text/code: "response = handler(request)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid/tweens.py" line: 21 function: "excview_tween" text/code: "response = handler(request)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid_exclog/__init__.py" line: 115 function: "exclog_tween" text/code: "_handle_error(request, getLogger, get_message)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid_exclog/__init__.py" line: 111 function: "exclog_tween" text/code: "return handler(request)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid/router.py" line: 163 function: "handle_request" text/code: "response = view_callable(context, request)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid/config/views.py" line: 596 function: "__call__" text/code: "return view(context, request)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid/config/views.py" line: 329 function: "attr_view" text/code: "return view(context, request)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid/config/views.py" line: 305 function: "predicate_wrapper" text/code: "return view(context, request)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid/config/views.py" line: 245 function: "_secured_view" text/code: "return view(context, request)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid/config/views.py" line: 355 function: "rendered_view" text/code: "result = view(context, request)" filename: "/xxx/local/lib/python2.7/site-packages/pyramid/config/views.py" line: 491 function: "_class_view" text/code: "response = getattr(inst, attr)()" filename: "/xxx/local/lib/python2.7/site-packages/royal/views.py" line: 44 function: "index" text/code: "result = func(self.context.index_schema(query_params))" filename: "/xxx/local/lib/python2.7/site-packages/xxxx/resources.py" line: 286 function: "index" text/code: "devices = Device.get_devices_by_account(self.project_id, account_id)" filename: "/xxx/local/lib/python2.7/site-packages/xxxx/device.py" line: 67 function: "get_devices_by_account" text/code: "cls._get_devices_by_account(project_id, account_id)]" filename: "/xxx/local/lib/python2.7/site-packages/pynamodb/models.py" line: 521 function: "query" text/code: "cls._get_indexes()" filename: "/xxx/local/lib/python2.7/site-packages/pynamodb/models.py" line: 922 function: "_get_indexes" text/code: "item_cls = getattr(getattr(cls, item), "__class__", None)" ```
pynamodb/PynamoDB
diff --git a/pynamodb/tests/data.py b/pynamodb/tests/data.py index 3a76e22..973ac23 100644 --- a/pynamodb/tests/data.py +++ b/pynamodb/tests/data.py @@ -466,7 +466,7 @@ COMPLEX_MODEL_ITEM_DATA = { 'N': '31' }, 'is_dude': { - 'N': '1' + 'BOOL': True } } } @@ -826,6 +826,33 @@ SERIALIZED_TABLE_DATA = [ ] ] +COMPLEX_MODEL_SERIALIZED_TABLE_DATA = [ + [ + "123", + { + "attributes": { + 'weird_person': { + 'M': { + 'firstName': { + 'S': 'Justin' + }, + 'lname': { + 'S': 'Phillips' + }, + 'age': { + 'N': '31' + }, + 'is_dude': { + 'N': '1' + } + } + + } + } + } + ] +] + OFFICE_EMPLOYEE_MODEL_TABLE_DATA = { "Table": { "AttributeDefinitions": [ diff --git a/pynamodb/tests/test_model.py b/pynamodb/tests/test_model.py index 9870dbd..0e90d20 100644 --- a/pynamodb/tests/test_model.py +++ b/pynamodb/tests/test_model.py @@ -36,7 +36,7 @@ from pynamodb.tests.data import ( BATCH_GET_ITEMS, SIMPLE_BATCH_GET_ITEMS, COMPLEX_TABLE_DATA, COMPLEX_ITEM_DATA, INDEX_TABLE_DATA, LOCAL_INDEX_TABLE_DATA, DOG_TABLE_DATA, CUSTOM_ATTR_NAME_INDEX_TABLE_DATA, CUSTOM_ATTR_NAME_ITEM_DATA, - BINARY_ATTR_DATA, SERIALIZED_TABLE_DATA, OFFICE_EMPLOYEE_MODEL_TABLE_DATA, + BINARY_ATTR_DATA, SERIALIZED_TABLE_DATA, OFFICE_EMPLOYEE_MODEL_TABLE_DATA, COMPLEX_MODEL_SERIALIZED_TABLE_DATA, GET_OFFICE_EMPLOYEE_ITEM_DATA, GET_OFFICE_EMPLOYEE_ITEM_DATA_WITH_NULL, GROCERY_LIST_MODEL_TABLE_DATA, GET_GROCERY_LIST_ITEM_DATA, GET_OFFICE_ITEM_DATA, OFFICE_MODEL_TABLE_DATA, COMPLEX_MODEL_TABLE_DATA, COMPLEX_MODEL_ITEM_DATA, @@ -702,10 +702,12 @@ class ModelTestCase(TestCase): with patch(PATCH_METHOD) as req: req.return_value = GET_MODEL_ITEM_DATA + item.picture = b'to-be-removed' item.refresh() self.assertEqual( item.custom_user_name, GET_MODEL_ITEM_DATA.get(ITEM).get('user_name').get(STRING_SHORT)) + self.assertIsNone(item.picture) def test_complex_key(self): """ @@ -3732,6 +3734,20 @@ class ModelTestCase(TestCase): } self.assert_dict_lists_equal(req.call_args[0][1]['RequestItems']['UserModel'], args['UserModel']) + def test_loads_complex_model(self): + with patch(PATCH_METHOD) as req: + req.return_value = {} + ComplexModel.loads(json.dumps(COMPLEX_MODEL_SERIALIZED_TABLE_DATA)) + + args = { + 'ComplexModel': [ + { + 'PutRequest': COMPLEX_MODEL_ITEM_DATA + } + ] + } + self.assert_dict_lists_equal(req.call_args[0][1]['RequestItems']['ComplexModel'], args['ComplexModel']) + def _get_office_employee(self): justin = Person( fname='Justin', @@ -4193,8 +4209,8 @@ class ModelTestCase(TestCase): 'mapy': {'M': {'baz': {'S': 'bongo'}}} } } - instance = ExplicitRawMapModel(map_attr=map_native) - instance._deserialize(map_serialized) + instance = ExplicitRawMapModel() + instance._deserialize({'map_attr': map_serialized}) actual = instance.map_attr for k, v in six.iteritems(map_native): self.assertEqual(v, actual[k])
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 4 }
1.5
{ "env_vars": null, "env_yml_path": [], "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-mock" ], "pre_install": [], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 botocore==1.2.0 certifi==2021.5.30 coverage==6.2 docutils==0.18.1 importlib-metadata==4.8.3 iniconfig==1.1.1 jmespath==0.7.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 -e git+https://github.com/pynamodb/PynamoDB.git@7a3cc8aba43b4cfe7630f5a4a199bddb8f8c7a86#egg=pynamodb pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-mock==3.6.1 python-dateutil==2.9.0.post0 six==1.9.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: PynamoDB channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - botocore==1.2.0 - coverage==6.2 - docutils==0.18.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jmespath==0.7.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - python-dateutil==2.9.0.post0 - six==1.9.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/PynamoDB
[ "pynamodb/tests/test_model.py::ModelTestCase::test_loads_complex_model", "pynamodb/tests/test_model.py::ModelTestCase::test_refresh" ]
[]
[ "pynamodb/tests/test_model.py::ModelTestCase::test_batch_get", "pynamodb/tests/test_model.py::ModelTestCase::test_batch_write", "pynamodb/tests/test_model.py::ModelTestCase::test_batch_write_with_unprocessed", "pynamodb/tests/test_model.py::ModelTestCase::test_car_model_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_car_model_with_null_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_complex_key", "pynamodb/tests/test_model.py::ModelTestCase::test_complex_model_is_complex", "pynamodb/tests/test_model.py::ModelTestCase::test_complex_model_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_conditional_operator_map_attribute", "pynamodb/tests/test_model.py::ModelTestCase::test_count", "pynamodb/tests/test_model.py::ModelTestCase::test_count_no_hash_key", "pynamodb/tests/test_model.py::ModelTestCase::test_create_model", "pynamodb/tests/test_model.py::ModelTestCase::test_delete", "pynamodb/tests/test_model.py::ModelTestCase::test_delete_doesnt_do_validation_on_null_attributes", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_map_four_layers_deep_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_new_style_bool_false_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_new_style_bool_true_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_old_style_bool_false_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_old_style_bool_true_works", "pynamodb/tests/test_model.py::ModelTestCase::test_dumps", "pynamodb/tests/test_model.py::ModelTestCase::test_explicit_raw_map_serialize_pass", "pynamodb/tests/test_model.py::ModelTestCase::test_filter_count", "pynamodb/tests/test_model.py::ModelTestCase::test_get", "pynamodb/tests/test_model.py::ModelTestCase::test_global_index", "pynamodb/tests/test_model.py::ModelTestCase::test_index_count", "pynamodb/tests/test_model.py::ModelTestCase::test_index_multipage_count", "pynamodb/tests/test_model.py::ModelTestCase::test_index_queries", "pynamodb/tests/test_model.py::ModelTestCase::test_invalid_car_model_with_null_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_invalid_map_model_raises", "pynamodb/tests/test_model.py::ModelTestCase::test_list_of_map_works_like_list_of_map", "pynamodb/tests/test_model.py::ModelTestCase::test_list_works_like_list", "pynamodb/tests/test_model.py::ModelTestCase::test_loads", "pynamodb/tests/test_model.py::ModelTestCase::test_local_index", "pynamodb/tests/test_model.py::ModelTestCase::test_model_attrs", "pynamodb/tests/test_model.py::ModelTestCase::test_model_subclass_attributes_inherited_on_create", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_invalid_data_does_not_validate", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_of_map", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_of_map_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_with_nulls_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_with_pythonic_attributes", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_nulls_validates", "pynamodb/tests/test_model.py::ModelTestCase::test_model_works_like_model", "pynamodb/tests/test_model.py::ModelTestCase::test_multiple_indices_share_non_key_attribute", "pynamodb/tests/test_model.py::ModelTestCase::test_new_style_boolean_serializes_as_bool", "pynamodb/tests/test_model.py::ModelTestCase::test_old_style_boolean_serializes_as_bool", "pynamodb/tests/test_model.py::ModelTestCase::test_old_style_model_exception", "pynamodb/tests/test_model.py::ModelTestCase::test_overidden_defaults", "pynamodb/tests/test_model.py::ModelTestCase::test_overidden_session", "pynamodb/tests/test_model.py::ModelTestCase::test_overridden_attr_name", "pynamodb/tests/test_model.py::ModelTestCase::test_projections", "pynamodb/tests/test_model.py::ModelTestCase::test_query", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_and_page_size", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_multiple_page", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_single_page", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_identical_to_available_items_single_page", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_less_than_available_and_page_size", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_less_than_available_items_multiple_page", "pynamodb/tests/test_model.py::ModelTestCase::test_rate_limited_scan", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_deserialize", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_from_raw_data_works", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_serialize_pass", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_deserializes", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_from_raw_data_works", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_serialize_fun_one", "pynamodb/tests/test_model.py::ModelTestCase::test_result_set_init", "pynamodb/tests/test_model.py::ModelTestCase::test_result_set_iter", "pynamodb/tests/test_model.py::ModelTestCase::test_save", "pynamodb/tests/test_model.py::ModelTestCase::test_scan", "pynamodb/tests/test_model.py::ModelTestCase::test_scan_limit", "pynamodb/tests/test_model.py::ModelTestCase::test_scan_limit_with_page_size", "pynamodb/tests/test_model.py::ModelTestCase::test_update", "pynamodb/tests/test_model.py::ModelTestCase::test_update_item", "pynamodb/tests/test_model.py::ModelInitTestCase::test_raw_map_attribute_with_dict_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_raw_map_attribute_with_initialized_instance_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_dict_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_initialized_instance_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_map_attribute_member_with_initialized_instance_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_map_attributes_member_with_dict_init" ]
[]
MIT License
1,799
[ "pynamodb/indexes.py", "pynamodb/compat.py", "pynamodb/attributes.py", "pynamodb/models.py" ]
[ "pynamodb/indexes.py", "pynamodb/compat.py", "pynamodb/attributes.py", "pynamodb/models.py" ]
matheuscas__pycpfcnpj-16
775aa198de297538d77fcaf2df7ef4d0e94efbd2
2017-10-24 03:25:23
2d0737ac0973d9029e5c4ac79548f4c1d8ce1ffa
diff --git a/README.md b/README.md index 2bb5252..9bbf26f 100644 --- a/README.md +++ b/README.md @@ -58,6 +58,19 @@ Expected output: >>> 49384063495 >>> 20788274885880 ``` + +And you also can generate CPF or CǸPJ with punctuation marks. :) + +```python +from pycpfcnpj import gen +gen.cpf_with_punctuation() +gen.cnpj_with_punctuation() + +Expected output: +>>> 048.891.866-97 +>>> 63.212.638/0361-35 +``` + Have fun! In portuguese: @@ -108,6 +121,19 @@ Expected output: >>> 20788274885880 ``` +E você também pode gerar CPF ou CNPJ com pontuação :) + +```python +from pycpfcnpj import gen +gen.cpf_with_punctuation() +gen.cnpj_with_punctuation() + +Expected output: +>>> 048.891.866-97 +>>> 63.212.638/0361-35 +``` + + Divirta-se! Changelog @@ -118,3 +144,6 @@ Changelog 1.2 - Use `sys` rather than `six` to check python's version and keeps this project 100% free of dependencies. + +1.3 +- Generate CPF and CNPJ numbers with punctuation marks. \ No newline at end of file diff --git a/pycpfcnpj/gen.py b/pycpfcnpj/gen.py index 4f86ffa..2b47d44 100644 --- a/pycpfcnpj/gen.py +++ b/pycpfcnpj/gen.py @@ -16,3 +16,13 @@ def cnpj(): while not cnpj_module.validate(cnpj_ramdom): cnpj_ramdom = ''.join(random.choice(string.digits) for i in range(14)) return cnpj_ramdom + + +def cpf_with_punctuation(): + cpf_ramdom = cpf() + return '{}.{}.{}-{}'.format(cpf_ramdom[:3], cpf_ramdom[3:6], cpf_ramdom[6:9], cpf_ramdom[9:]) + + +def cnpj_with_punctuation(): + cnpj_ramdom = cnpj() + return '{}.{}.{}/{}-{}'.format(cnpj_ramdom[:2], cnpj_ramdom[2:5], cnpj_ramdom[5:8], cnpj_ramdom[8:12], cnpj_ramdom[12:]) \ No newline at end of file
Generates numbers with punctuation marks After the release 1.1, that handles input numbers with punctuation marks, it would be also nice generate numbers with correct punctuation marks to each type of number, CPF and CNPJ.
matheuscas/pycpfcnpj
diff --git a/tests/gen_test.py b/tests/gen_test.py new file mode 100644 index 0000000..a2933a9 --- /dev/null +++ b/tests/gen_test.py @@ -0,0 +1,27 @@ +import unittest +from pycpfcnpj import gen, cpf, cnpj + +class GenerateCPFTest(unittest.TestCase): + """docstring for GenerateCPFTest""" + def setUp(self): + self.masked_valid_cpf = gen.cpf_with_punctuation() + + def test_validate_masked_cnpj_true(self): + self.assertTrue(cpf.validate(self.masked_valid_cpf)) + + def test_valif_cpf_without_mask_true(self): + cpf_result =(self.masked_valid_cpf.replace(".","")).replace("-","") + self.assertTrue(cpf.validate(cpf_result)) + + +class GenerateCNPJTest(unittest.TestCase): + """docstring for GenerateCNPJTest""" + def setUp(self): + self.masked_valid_cnpj = gen.cnpj_with_punctuation() + + def test_validate_masked_cnpj_true(self): + self.assertTrue(cnpj.validate(self.masked_valid_cnpj)) + + def test_valid_cnpj_without_mask_true(self): + cnpj_result =(self.masked_valid_cnpj.replace(".","")).replace("-","") + self.assertTrue(cnpj.validate(cnpj_result)) \ No newline at end of file
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 2 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "nose", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work nose==1.3.7 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work -e git+https://github.com/matheuscas/pycpfcnpj.git@775aa198de297538d77fcaf2df7ef4d0e94efbd2#egg=pycpfcnpj pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: pycpfcnpj channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - nose==1.3.7 prefix: /opt/conda/envs/pycpfcnpj
[ "tests/gen_test.py::GenerateCPFTest::test_validate_masked_cnpj_true", "tests/gen_test.py::GenerateCPFTest::test_valif_cpf_without_mask_true", "tests/gen_test.py::GenerateCNPJTest::test_valid_cnpj_without_mask_true", "tests/gen_test.py::GenerateCNPJTest::test_validate_masked_cnpj_true" ]
[]
[]
[]
MIT License
1,800
[ "README.md", "pycpfcnpj/gen.py" ]
[ "README.md", "pycpfcnpj/gen.py" ]
pynamodb__PynamoDB-386
7a3cc8aba43b4cfe7630f5a4a199bddb8f8c7a86
2017-10-24 05:57:27
7a3cc8aba43b4cfe7630f5a4a199bddb8f8c7a86
diff --git a/pynamodb/models.py b/pynamodb/models.py index cacb817..19ca4f9 100644 --- a/pynamodb/models.py +++ b/pynamodb/models.py @@ -894,6 +894,9 @@ class Model(AttributeContainer): Reconstructs a model object from JSON. """ hash_key, attrs = data + hash_keyname = cls._get_meta_data().hash_keyname + hash_key_attr = cls._get_attributes().get(cls._dynamo_to_python_attr(hash_keyname)) + hash_key = hash_key_attr.deserialize(hash_key) range_key = attrs.pop('range_key', None) attributes = attrs.pop(pythonic(ATTRIBUTES)) if range_key is not None:
Deserialize hash key while loading from json While trying to load a table from a json dump, https://github.com/jlafon/PynamoDB/blob/devel/pynamodb/models.py#L764 does not deserialize the hash key. Hence, we run into an exception if the hash key is anything other than a UnicodeAttribute. For example, the following will lead to an exception. ``` from pynamodb.models import Model from pynamodb.attributes import ( UnicodeAttribute, NumberAttribute ) class Thread(Model): class Meta: table_name = 'Thread' forum_name = NumberAttribute(hash_key=True) subject = UnicodeAttribute(range_key=True) views = NumberAttribute(default=0) # After the table has been populated Thread.dump("thread_backup.json") Thread.load("thread_backup.json") ```
pynamodb/PynamoDB
diff --git a/pynamodb/tests/data.py b/pynamodb/tests/data.py index 3a76e22..5f624cd 100644 --- a/pynamodb/tests/data.py +++ b/pynamodb/tests/data.py @@ -466,7 +466,7 @@ COMPLEX_MODEL_ITEM_DATA = { 'N': '31' }, 'is_dude': { - 'N': '1' + 'BOOL': True } } } @@ -826,6 +826,33 @@ SERIALIZED_TABLE_DATA = [ ] ] +COMPLEX_MODEL_SERIALIZED_TABLE_DATA = [ + [ + "123", + { + "attributes": { + 'person': { # TODO(jpinner) fix deserialize lookup to use attr_name + 'M': { + 'firstName': { + 'S': 'Justin' + }, + 'lname': { + 'S': 'Phillips' + }, + 'age': { + 'N': '31' + }, + 'is_dude': { + 'N': '1' + } + } + + } + } + } + ] +] + OFFICE_EMPLOYEE_MODEL_TABLE_DATA = { "Table": { "AttributeDefinitions": [ diff --git a/pynamodb/tests/test_model.py b/pynamodb/tests/test_model.py index 9870dbd..0f15b96 100644 --- a/pynamodb/tests/test_model.py +++ b/pynamodb/tests/test_model.py @@ -36,7 +36,7 @@ from pynamodb.tests.data import ( BATCH_GET_ITEMS, SIMPLE_BATCH_GET_ITEMS, COMPLEX_TABLE_DATA, COMPLEX_ITEM_DATA, INDEX_TABLE_DATA, LOCAL_INDEX_TABLE_DATA, DOG_TABLE_DATA, CUSTOM_ATTR_NAME_INDEX_TABLE_DATA, CUSTOM_ATTR_NAME_ITEM_DATA, - BINARY_ATTR_DATA, SERIALIZED_TABLE_DATA, OFFICE_EMPLOYEE_MODEL_TABLE_DATA, + BINARY_ATTR_DATA, SERIALIZED_TABLE_DATA, OFFICE_EMPLOYEE_MODEL_TABLE_DATA, COMPLEX_MODEL_SERIALIZED_TABLE_DATA, GET_OFFICE_EMPLOYEE_ITEM_DATA, GET_OFFICE_EMPLOYEE_ITEM_DATA_WITH_NULL, GROCERY_LIST_MODEL_TABLE_DATA, GET_GROCERY_LIST_ITEM_DATA, GET_OFFICE_ITEM_DATA, OFFICE_MODEL_TABLE_DATA, COMPLEX_MODEL_TABLE_DATA, COMPLEX_MODEL_ITEM_DATA, @@ -3732,6 +3732,20 @@ class ModelTestCase(TestCase): } self.assert_dict_lists_equal(req.call_args[0][1]['RequestItems']['UserModel'], args['UserModel']) + def test_loads_complex_model(self): + with patch(PATCH_METHOD) as req: + req.return_value = {} + ComplexModel.loads(json.dumps(COMPLEX_MODEL_SERIALIZED_TABLE_DATA)) + + args = { + 'ComplexModel': [ + { + 'PutRequest': COMPLEX_MODEL_ITEM_DATA + } + ] + } + self.assert_dict_lists_equal(req.call_args[0][1]['RequestItems']['ComplexModel'], args['ComplexModel']) + def _get_office_employee(self): justin = Person( fname='Justin',
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 1 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-mock", "pytest" ], "pre_install": [], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 botocore==1.2.0 certifi==2021.5.30 coverage==6.2 docutils==0.18.1 importlib-metadata==4.8.3 iniconfig==1.1.1 jmespath==0.7.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 -e git+https://github.com/pynamodb/PynamoDB.git@7a3cc8aba43b4cfe7630f5a4a199bddb8f8c7a86#egg=pynamodb pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-mock==3.6.1 python-dateutil==2.9.0.post0 six==1.9.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: PynamoDB channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - botocore==1.2.0 - coverage==6.2 - docutils==0.18.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jmespath==0.7.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - python-dateutil==2.9.0.post0 - six==1.9.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/PynamoDB
[ "pynamodb/tests/test_model.py::ModelTestCase::test_loads_complex_model" ]
[]
[ "pynamodb/tests/test_model.py::ModelTestCase::test_batch_get", "pynamodb/tests/test_model.py::ModelTestCase::test_batch_write", "pynamodb/tests/test_model.py::ModelTestCase::test_batch_write_with_unprocessed", "pynamodb/tests/test_model.py::ModelTestCase::test_car_model_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_car_model_with_null_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_complex_key", "pynamodb/tests/test_model.py::ModelTestCase::test_complex_model_is_complex", "pynamodb/tests/test_model.py::ModelTestCase::test_complex_model_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_conditional_operator_map_attribute", "pynamodb/tests/test_model.py::ModelTestCase::test_count", "pynamodb/tests/test_model.py::ModelTestCase::test_count_no_hash_key", "pynamodb/tests/test_model.py::ModelTestCase::test_create_model", "pynamodb/tests/test_model.py::ModelTestCase::test_delete", "pynamodb/tests/test_model.py::ModelTestCase::test_delete_doesnt_do_validation_on_null_attributes", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_map_four_layers_deep_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_new_style_bool_false_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_new_style_bool_true_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_old_style_bool_false_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_old_style_bool_true_works", "pynamodb/tests/test_model.py::ModelTestCase::test_dumps", "pynamodb/tests/test_model.py::ModelTestCase::test_explicit_raw_map_serialize_pass", "pynamodb/tests/test_model.py::ModelTestCase::test_filter_count", "pynamodb/tests/test_model.py::ModelTestCase::test_get", "pynamodb/tests/test_model.py::ModelTestCase::test_global_index", "pynamodb/tests/test_model.py::ModelTestCase::test_index_count", "pynamodb/tests/test_model.py::ModelTestCase::test_index_multipage_count", "pynamodb/tests/test_model.py::ModelTestCase::test_index_queries", "pynamodb/tests/test_model.py::ModelTestCase::test_invalid_car_model_with_null_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_invalid_map_model_raises", "pynamodb/tests/test_model.py::ModelTestCase::test_list_of_map_works_like_list_of_map", "pynamodb/tests/test_model.py::ModelTestCase::test_list_works_like_list", "pynamodb/tests/test_model.py::ModelTestCase::test_loads", "pynamodb/tests/test_model.py::ModelTestCase::test_local_index", "pynamodb/tests/test_model.py::ModelTestCase::test_model_attrs", "pynamodb/tests/test_model.py::ModelTestCase::test_model_subclass_attributes_inherited_on_create", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_invalid_data_does_not_validate", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_of_map", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_of_map_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_with_nulls_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_with_pythonic_attributes", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_nulls_validates", "pynamodb/tests/test_model.py::ModelTestCase::test_model_works_like_model", "pynamodb/tests/test_model.py::ModelTestCase::test_multiple_indices_share_non_key_attribute", "pynamodb/tests/test_model.py::ModelTestCase::test_new_style_boolean_serializes_as_bool", "pynamodb/tests/test_model.py::ModelTestCase::test_old_style_boolean_serializes_as_bool", "pynamodb/tests/test_model.py::ModelTestCase::test_old_style_model_exception", "pynamodb/tests/test_model.py::ModelTestCase::test_overidden_defaults", "pynamodb/tests/test_model.py::ModelTestCase::test_overidden_session", "pynamodb/tests/test_model.py::ModelTestCase::test_overridden_attr_name", "pynamodb/tests/test_model.py::ModelTestCase::test_projections", "pynamodb/tests/test_model.py::ModelTestCase::test_query", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_and_page_size", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_multiple_page", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_single_page", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_identical_to_available_items_single_page", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_less_than_available_and_page_size", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_less_than_available_items_multiple_page", "pynamodb/tests/test_model.py::ModelTestCase::test_rate_limited_scan", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_deserialize", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_from_raw_data_works", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_serialize_pass", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_deserializes", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_from_raw_data_works", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_serialize_fun_one", "pynamodb/tests/test_model.py::ModelTestCase::test_refresh", "pynamodb/tests/test_model.py::ModelTestCase::test_result_set_init", "pynamodb/tests/test_model.py::ModelTestCase::test_result_set_iter", "pynamodb/tests/test_model.py::ModelTestCase::test_save", "pynamodb/tests/test_model.py::ModelTestCase::test_scan", "pynamodb/tests/test_model.py::ModelTestCase::test_scan_limit", "pynamodb/tests/test_model.py::ModelTestCase::test_scan_limit_with_page_size", "pynamodb/tests/test_model.py::ModelTestCase::test_update", "pynamodb/tests/test_model.py::ModelTestCase::test_update_item", "pynamodb/tests/test_model.py::ModelInitTestCase::test_raw_map_attribute_with_dict_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_raw_map_attribute_with_initialized_instance_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_dict_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_initialized_instance_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_map_attribute_member_with_initialized_instance_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_map_attributes_member_with_dict_init" ]
[]
MIT License
1,801
[ "pynamodb/models.py" ]
[ "pynamodb/models.py" ]
viraptor__phply-37
4a21aa038611ab6ebd0bf8ea5d3ee97e624339d3
2017-10-24 11:18:45
4a21aa038611ab6ebd0bf8ea5d3ee97e624339d3
diff --git a/phply/phpparse.py b/phply/phpparse.py index 69d012e..12dc62b 100644 --- a/phply/phpparse.py +++ b/phply/phpparse.py @@ -1569,8 +1569,17 @@ def p_encaps_list_string(p): if p[1] == '': p[0] = process_php_string_escapes(p[2]) else: - p[0] = ast.BinaryOp('.', p[1], process_php_string_escapes(p[2]), - lineno=p.lineno(2)) + if isinstance(p[1], string_type): + # if it's only a string so far, just append the contents + p[0] = p[1] + process_php_string_escapes(p[2]) + elif isinstance(p[1], ast.BinaryOp) and isinstance(p[1].right, string_type): + # if the last right leaf is a string, extend previous binop + p[0] = ast.BinaryOp('.', p[1].left, p[1].right + process_php_string_escapes(p[2]), + lineno=p[1].lineno) + else: + # worst case - insert a binaryop + p[0] = ast.BinaryOp('.', p[1], process_php_string_escapes(p[2]), + lineno=p.lineno(2)) def p_encaps_var(p): 'encaps_var : VARIABLE'
Issue with parsing HERE_DOC syntax I've got a PHP file that has a big heredoc block in it. When I attempt to parse it, I get a recursion limit error: ``` Traceback (most recent call last): File "php2json.py", line 31, in <module> output, indent=2) File "/Library/Python/2.7/site-packages/simplejson/__init__.py", line 276, in dump for chunk in iterable: File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 665, in _iterencode for chunk in _iterencode_list(o, _current_indent_level): File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 515, in _iterencode_list for chunk in chunks: File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 515, in _iterencode_list for chunk in chunks: File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 634, in _iterencode_dict for chunk in chunks: File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 515, in _iterencode_list for chunk in chunks: File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 515, in _iterencode_list for chunk in chunks: File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 634, in _iterencode_dict for chunk in chunks: File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 515, in _iterencode_list for chunk in chunks: File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 515, in _iterencode_list for chunk in chunks: File "/Library/Python/2.7/site-packages/simplejson/encoder.py", line 634, in _iterencode_dict for chunk in chunks: ``` And it goes on like that for hundreds or thousands of lines. I'm not sure what structure the data was parsed into, but it appears to be a loop of some sort.
viraptor/phply
diff --git a/tests/test_parser.py b/tests/test_parser.py index 0ecb9a8..62bf2cc 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -235,18 +235,12 @@ EOT; BinaryOp('.', BinaryOp('.', BinaryOp('.', - BinaryOp('.', - BinaryOp('.', - BinaryOp('.', - 'This', - ' is a "'), - Variable('$heredoc')), - '" with some '), - ObjectProperty(Variable('$embedded'), - 'variables')), - '.\n'), - 'This'), - ' is not the EOT; this is:')]), + 'This is a "', + Variable('$heredoc')), + '" with some '), + ObjectProperty(Variable('$embedded'), + 'variables')), + '.\nThis is not the EOT; this is:')]), ] eq_ast(input, expected) if sys.version_info[0] < 3:
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
1.1
{ "env_vars": null, "env_yml_path": [], "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "", "pip_packages": [ "nose", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 nose==1.3.7 packaging==21.3 -e git+https://github.com/viraptor/phply.git@4a21aa038611ab6ebd0bf8ea5d3ee97e624339d3#egg=phply pluggy==1.0.0 ply==3.11 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: phply channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - nose==1.3.7 - packaging==21.3 - pluggy==1.0.0 - ply==3.11 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/phply
[ "tests/test_parser.py::test_heredoc" ]
[]
[ "tests/test_parser.py::test_inline_html", "tests/test_parser.py::test_echo", "tests/test_parser.py::test_open_tag_with_echo", "tests/test_parser.py::test_exit", "tests/test_parser.py::test_isset", "tests/test_parser.py::test_namespace_names", "tests/test_parser.py::test_unary_ops", "tests/test_parser.py::test_assignment_ops", "tests/test_parser.py::test_object_properties", "tests/test_parser.py::test_string_unescape", "tests/test_parser.py::test_string_offset_lookups", "tests/test_parser.py::test_string_curly_dollar_expressions", "tests/test_parser.py::test_function_calls", "tests/test_parser.py::test_method_calls", "tests/test_parser.py::test_if", "tests/test_parser.py::test_foreach", "tests/test_parser.py::test_foreach_with_lists", "tests/test_parser.py::test_global_variables", "tests/test_parser.py::test_variable_variables", "tests/test_parser.py::test_classes", "tests/test_parser.py::test_new", "tests/test_parser.py::test_exceptions", "tests/test_parser.py::test_catch_finally", "tests/test_parser.py::test_just_finally", "tests/test_parser.py::test_declare", "tests/test_parser.py::test_instanceof", "tests/test_parser.py::test_static_members", "tests/test_parser.py::test_casts", "tests/test_parser.py::test_namespaces", "tests/test_parser.py::test_use_declarations", "tests/test_parser.py::test_constant_declarations", "tests/test_parser.py::test_closures", "tests/test_parser.py::test_magic_constants", "tests/test_parser.py::test_type_hinting", "tests/test_parser.py::test_static_scalar_class_constants", "tests/test_parser.py::test_backtick_shell_exec", "tests/test_parser.py::test_open_close_tags_ignore", "tests/test_parser.py::test_ternary", "tests/test_parser.py::test_array_dereferencing", "tests/test_parser.py::test_array_literal", "tests/test_parser.py::test_array_in_default_arg", "tests/test_parser.py::test_const_heredoc", "tests/test_parser.py::test_object_property_on_expr", "tests/test_parser.py::test_binary_string", "tests/test_parser.py::test_class_trait_use", "tests/test_parser.py::test_trait", "tests/test_parser.py::test_trait_renames", "tests/test_parser.py::test_class_name_as_string", "tests/test_parser.py::test_static_expressions", "tests/test_parser.py::test_const_arrays", "tests/test_parser.py::test_numbers", "tests/test_parser.py::test_result_multiple_offsets", "tests/test_parser.py::test_yield", "tests/test_parser.py::test_static_property_dynamic_access", "tests/test_parser.py::test_static_property_dynamic_call", "tests/test_parser.py::test_nowdoc", "tests/test_parser.py::test_exit_loc" ]
[]
BSD
1,802
[ "phply/phpparse.py" ]
[ "phply/phpparse.py" ]
EMCECS__python-ecsclient-48
671ad67baabb986cc33409ea5b41411b87013fcf
2017-10-24 12:28:09
40a22c4eb202e60d1c8fe6a7f925f4783b0d6a82
coveralls: [![Coverage Status](https://coveralls.io/builds/13861017/badge)](https://coveralls.io/builds/13861017) Coverage increased (+0.07%) to 57.514% when pulling **d6471385ce6b8908ac9dac4950bfd0831f64a106 on ecs-3_1-support** into **671ad67baabb986cc33409ea5b41411b87013fcf on master**.
diff --git a/VERSION b/VERSION index e25d8d9..0664a8f 100644 --- a/VERSION +++ b/VERSION @@ -1,1 +1,1 @@ -1.1.5 +1.1.6 diff --git a/ecsclient/common/provisioning/data_store.py b/ecsclient/common/provisioning/data_store.py index 69f0402..6968d2b 100644 --- a/ecsclient/common/provisioning/data_store.py +++ b/ecsclient/common/provisioning/data_store.py @@ -200,7 +200,7 @@ class DataStore(object): :param name: User provided name (not verified or unique) :param description: User provided description (not verified or unique) - :param node_id: IP address for the commodity node + :param node_id: ID of the commodity node :param storage_pool_id: Desired storage pool ID for creating data store :returns a task object """ diff --git a/ecsclient/common/provisioning/node.py b/ecsclient/common/provisioning/node.py index 2af37ec..6ab144a 100644 --- a/ecsclient/common/provisioning/node.py +++ b/ecsclient/common/provisioning/node.py @@ -19,9 +19,9 @@ class Node(object): """ self.conn = connection - def get_nodes(self): + def list(self): """ - Gets the data nodes that are currently configured in the cluster. + Gets a list of the data nodes that are currently configured in the cluster. Required role(s): diff --git a/ecsclient/schemas.py b/ecsclient/schemas.py index 4a9c003..8a763c3 100644 --- a/ecsclient/schemas.py +++ b/ecsclient/schemas.py @@ -796,3 +796,38 @@ VDC_KEYSTORE = { "chain" ] } + +NODE = { + "type": "object", + "properties": { + "rackId": {"type": "string"}, + "version": {"type": "string"}, + "nodeid": {"type": "string"}, + "isLocal": {"type": "boolean"}, + "nodename": {"type": "string"}, + "ip": {"type": "string"}, + }, + "required": [ + "rackId", + "version", + "nodeid", + "isLocal", + "nodename", + "ip" + ] +} + +NODE_LIST = { + "type": "object", + "properties": { + "node": { + "type": "array", + "items": NODE, + "minItems": 1, + "uniqueItems": True + } + }, + "required": [ + "node" + ] +}
Add support for the Nodes endpoint Implement the Node endpoint for versions 2.x, 3.0, and 3.1.
EMCECS/python-ecsclient
diff --git a/tests/functional/test_node.py b/tests/functional/test_node.py new file mode 100644 index 0000000..95a6492 --- /dev/null +++ b/tests/functional/test_node.py @@ -0,0 +1,9 @@ +from ecsclient import schemas +from tests import functional + + +class TestNode(functional.BaseTestCase): + + def test_node_list(self): + response = self.client.node.list() + self.assertValidSchema(response, schemas.NODE_LIST) diff --git a/tests/unit/test_node.py b/tests/unit/test_node.py index 5c01719..541e22f 100644 --- a/tests/unit/test_node.py +++ b/tests/unit/test_node.py @@ -2,7 +2,6 @@ import testtools from requests_mock.contrib import fixture from mock import MagicMock from mock import mock -from mock import patch from six.moves import http_client from ecsclient.client import Client from ecsclient.common.exceptions import ECSClientException @@ -46,14 +45,14 @@ class TestNode(testtools.TestCase): self.requests_mock = self.useFixture(fixture.Fixture()) @mock.patch('ecsclient.common.token_request.TokenRequest.get_token') - def test_get_nodes_throw_exception(self, mock_get_token): + def test_list_nodes_throw_exception(self, mock_get_token): self.requests_mock.register_uri('GET', 'https://127.0.0.1:4443/vdc/nodes', status_code=http_client.INTERNAL_SERVER_ERROR, text='Server Error') mock_get_token.return_value = 'FAKE-TOKEN-123' with super(testtools.TestCase, self).assertRaises(ECSClientException) as error: - self.client.node.get_nodes() + self.client.node.list() exception = error.exception self.assertEqual(self.requests_mock.last_request.method, 'GET') @@ -63,13 +62,13 @@ class TestNode(testtools.TestCase): self.assertEqual(exception.http_status, http_client.INTERNAL_SERVER_ERROR) @mock.patch('ecsclient.common.token_request.TokenRequest.get_token') - def test_get_nodes(self, mock_get_token): + def test_list_nodes(self, mock_get_token): mock_get_token.return_value = 'FAKE-TOKEN-123' self.requests_mock.register_uri('GET', 'https://127.0.0.1:4443/vdc/nodes', status_code=http_client.OK, json=self.returned_json) - response = self.client.node.get_nodes() + response = self.client.node.list() self.assertEqual(self.requests_mock.last_request.method, 'GET') self.assertEqual(self.requests_mock.last_request.url, 'https://127.0.0.1:4443/vdc/nodes')
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 4 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "flake8==3.2.1", "mock==2.0.0", "nose==1.3.7", "coverage==4.3.4", "jsonschema==2.6.0", "tox==2.6.0", "testtools==2.2.0", "requests-mock[fixture]==1.3.0", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==4.3.4 distlib==0.3.9 extras==1.0.0 filelock==3.4.1 fixtures==4.0.1 flake8==3.2.1 idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 jsonschema==2.6.0 linecache2==1.0.0 mccabe==0.5.3 mock==2.0.0 nose==1.3.7 packaging==21.3 pbr==6.1.1 platformdirs==2.4.0 pluggy==0.13.1 py==1.11.0 pycodestyle==2.2.0 pyflakes==1.3.0 pyparsing==3.1.4 pytest==7.0.1 -e git+https://github.com/EMCECS/python-ecsclient.git@671ad67baabb986cc33409ea5b41411b87013fcf#egg=python_ecsclient python-mimeparse==1.6.0 requests==2.27.1 requests-mock==1.3.0 six==1.17.0 testtools==2.2.0 tomli==1.2.3 tox==2.6.0 traceback2==1.4.0 typing_extensions==4.1.1 unittest2==1.1.0 urllib3==1.26.20 virtualenv==20.17.1 zipp==3.6.0
name: python-ecsclient channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - argparse==1.4.0 - attrs==22.2.0 - charset-normalizer==2.0.12 - coverage==4.3.4 - distlib==0.3.9 - extras==1.0.0 - filelock==3.4.1 - fixtures==4.0.1 - flake8==3.2.1 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - jsonschema==2.6.0 - linecache2==1.0.0 - mccabe==0.5.3 - mock==2.0.0 - nose==1.3.7 - packaging==21.3 - pbr==6.1.1 - platformdirs==2.4.0 - pluggy==0.13.1 - py==1.11.0 - pycodestyle==2.2.0 - pyflakes==1.3.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-mimeparse==1.6.0 - requests==2.27.1 - requests-mock==1.3.0 - six==1.17.0 - testtools==2.2.0 - tomli==1.2.3 - tox==2.6.0 - traceback2==1.4.0 - typing-extensions==4.1.1 - unittest2==1.1.0 - urllib3==1.26.20 - virtualenv==20.17.1 - zipp==3.6.0 prefix: /opt/conda/envs/python-ecsclient
[ "tests/unit/test_node.py::TestNode::test_list_nodes", "tests/unit/test_node.py::TestNode::test_list_nodes_throw_exception" ]
[]
[]
[]
Apache License 2.0
1,803
[ "VERSION", "ecsclient/common/provisioning/data_store.py", "ecsclient/schemas.py", "ecsclient/common/provisioning/node.py" ]
[ "VERSION", "ecsclient/common/provisioning/data_store.py", "ecsclient/schemas.py", "ecsclient/common/provisioning/node.py" ]
pynamodb__PynamoDB-388
f04c682242ef0b55feaa45af5048362942c898a2
2017-10-24 18:44:11
f04c682242ef0b55feaa45af5048362942c898a2
diff --git a/pynamodb/models.py b/pynamodb/models.py index a44d9e0..5e77a78 100644 --- a/pynamodb/models.py +++ b/pynamodb/models.py @@ -894,18 +894,20 @@ class Model(AttributeContainer): Reconstructs a model object from JSON. """ hash_key, attrs = data - hash_keyname = cls._get_meta_data().hash_keyname - hash_key_attr = cls._get_attributes().get(cls._dynamo_to_python_attr(hash_keyname)) - hash_key = hash_key_attr.deserialize(hash_key) range_key = attrs.pop('range_key', None) attributes = attrs.pop(pythonic(ATTRIBUTES)) + hash_keyname = cls._get_meta_data().hash_keyname + hash_keytype = cls._get_meta_data().get_attribute_type(hash_keyname) + attributes[hash_keyname] = { + hash_keytype: hash_key + } if range_key is not None: range_keyname = cls._get_meta_data().range_keyname range_keytype = cls._get_meta_data().get_attribute_type(range_keyname) attributes[range_keyname] = { range_keytype: range_key } - item = cls(hash_key) + item = cls() item._deserialize(attributes) return item @@ -1260,14 +1262,13 @@ class Model(AttributeContainer): :param attrs: A dictionary of attributes to update this item with. """ - for name, attr in attrs.items(): - attr_name = self._dynamo_to_python_attr(name) - attr_instance = self._get_attributes().get(attr_name, None) - if attr_instance: - attr_type = ATTR_TYPE_MAP[attr_instance.attr_type] - value = attr.get(attr_type, None) + for name, attr in self._get_attributes().items(): + value = attrs.get(attr.attr_name, None) + if value is not None: + value = value.get(ATTR_TYPE_MAP[attr.attr_type], None) if value is not None: - setattr(self, attr_name, attr_instance.deserialize(value)) + value = attr.deserialize(value) + setattr(self, name, value) def _serialize(self, attr_map=False, null_check=True): """
Model.refresh() doesn't respect attribute deletion Let's consider this situation. I have a model, like this: ``` class User(Model): name = UnicodeAttribute(hash_key=True) subscriptions = UnicodeSetAttribute() ``` Now, let's there is a user with just one subscription: ``` user = User(name='john', subscriptions=set(['news'])) user.save() ``` Then, in some other place of the program, we unsubscribe user from `news`: ``` u = User.get('john') u.subscriptions.discard('news') u.save() ``` And finally, in the initial object, we request a refresh: ``` user.refresh() ``` **What happens:** `user` object has stale value of `subscriptions` field **Expected behaviour:** Disappeared value should be noticed, and `user.subscriptions` should be set to `None`. **How to fix:** The problem seems to be in `Model._deserialize` method. It walks over received data object and then for each field found in the model it updates that field. What would probably be better is to walk over model fields and update them from incoming data, or set them to `None` if not found.
pynamodb/PynamoDB
diff --git a/pynamodb/tests/test_model.py b/pynamodb/tests/test_model.py index 0f15b96..0e90d20 100644 --- a/pynamodb/tests/test_model.py +++ b/pynamodb/tests/test_model.py @@ -702,10 +702,12 @@ class ModelTestCase(TestCase): with patch(PATCH_METHOD) as req: req.return_value = GET_MODEL_ITEM_DATA + item.picture = b'to-be-removed' item.refresh() self.assertEqual( item.custom_user_name, GET_MODEL_ITEM_DATA.get(ITEM).get('user_name').get(STRING_SHORT)) + self.assertIsNone(item.picture) def test_complex_key(self): """ @@ -4207,8 +4209,8 @@ class ModelTestCase(TestCase): 'mapy': {'M': {'baz': {'S': 'bongo'}}} } } - instance = ExplicitRawMapModel(map_attr=map_native) - instance._deserialize(map_serialized) + instance = ExplicitRawMapModel() + instance._deserialize({'map_attr': map_serialized}) actual = instance.map_attr for k, v in six.iteritems(map_native): self.assertEqual(v, actual[k])
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
2.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 botocore==1.2.0 certifi==2021.5.30 docutils==0.18.1 importlib-metadata==4.8.3 iniconfig==1.1.1 jmespath==0.7.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 -e git+https://github.com/pynamodb/PynamoDB.git@f04c682242ef0b55feaa45af5048362942c898a2#egg=pynamodb pyparsing==3.1.4 pytest==7.0.1 python-dateutil==2.9.0.post0 six==1.9.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: PynamoDB channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - botocore==1.2.0 - docutils==0.18.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jmespath==0.7.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-dateutil==2.9.0.post0 - six==1.9.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/PynamoDB
[ "pynamodb/tests/test_model.py::ModelTestCase::test_refresh" ]
[]
[ "pynamodb/tests/test_model.py::ModelTestCase::test_batch_get", "pynamodb/tests/test_model.py::ModelTestCase::test_batch_write", "pynamodb/tests/test_model.py::ModelTestCase::test_batch_write_with_unprocessed", "pynamodb/tests/test_model.py::ModelTestCase::test_car_model_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_car_model_with_null_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_complex_key", "pynamodb/tests/test_model.py::ModelTestCase::test_complex_model_is_complex", "pynamodb/tests/test_model.py::ModelTestCase::test_complex_model_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_conditional_operator_map_attribute", "pynamodb/tests/test_model.py::ModelTestCase::test_count", "pynamodb/tests/test_model.py::ModelTestCase::test_count_no_hash_key", "pynamodb/tests/test_model.py::ModelTestCase::test_create_model", "pynamodb/tests/test_model.py::ModelTestCase::test_delete", "pynamodb/tests/test_model.py::ModelTestCase::test_delete_doesnt_do_validation_on_null_attributes", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_map_four_layers_deep_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_new_style_bool_false_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_new_style_bool_true_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_old_style_bool_false_works", "pynamodb/tests/test_model.py::ModelTestCase::test_deserializing_old_style_bool_true_works", "pynamodb/tests/test_model.py::ModelTestCase::test_dumps", "pynamodb/tests/test_model.py::ModelTestCase::test_explicit_raw_map_serialize_pass", "pynamodb/tests/test_model.py::ModelTestCase::test_filter_count", "pynamodb/tests/test_model.py::ModelTestCase::test_get", "pynamodb/tests/test_model.py::ModelTestCase::test_global_index", "pynamodb/tests/test_model.py::ModelTestCase::test_index_count", "pynamodb/tests/test_model.py::ModelTestCase::test_index_multipage_count", "pynamodb/tests/test_model.py::ModelTestCase::test_index_queries", "pynamodb/tests/test_model.py::ModelTestCase::test_invalid_car_model_with_null_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_invalid_map_model_raises", "pynamodb/tests/test_model.py::ModelTestCase::test_list_of_map_works_like_list_of_map", "pynamodb/tests/test_model.py::ModelTestCase::test_list_works_like_list", "pynamodb/tests/test_model.py::ModelTestCase::test_loads", "pynamodb/tests/test_model.py::ModelTestCase::test_loads_complex_model", "pynamodb/tests/test_model.py::ModelTestCase::test_local_index", "pynamodb/tests/test_model.py::ModelTestCase::test_model_attrs", "pynamodb/tests/test_model.py::ModelTestCase::test_model_subclass_attributes_inherited_on_create", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_invalid_data_does_not_validate", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_of_map", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_of_map_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_list_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_with_nulls_retrieve_from_db", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_maps_with_pythonic_attributes", "pynamodb/tests/test_model.py::ModelTestCase::test_model_with_nulls_validates", "pynamodb/tests/test_model.py::ModelTestCase::test_model_works_like_model", "pynamodb/tests/test_model.py::ModelTestCase::test_multiple_indices_share_non_key_attribute", "pynamodb/tests/test_model.py::ModelTestCase::test_new_style_boolean_serializes_as_bool", "pynamodb/tests/test_model.py::ModelTestCase::test_old_style_boolean_serializes_as_bool", "pynamodb/tests/test_model.py::ModelTestCase::test_old_style_model_exception", "pynamodb/tests/test_model.py::ModelTestCase::test_overidden_defaults", "pynamodb/tests/test_model.py::ModelTestCase::test_overidden_session", "pynamodb/tests/test_model.py::ModelTestCase::test_overridden_attr_name", "pynamodb/tests/test_model.py::ModelTestCase::test_projections", "pynamodb/tests/test_model.py::ModelTestCase::test_query", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_and_page_size", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_multiple_page", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_greater_than_available_items_single_page", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_identical_to_available_items_single_page", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_less_than_available_and_page_size", "pynamodb/tests/test_model.py::ModelTestCase::test_query_limit_less_than_available_items_multiple_page", "pynamodb/tests/test_model.py::ModelTestCase::test_rate_limited_scan", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_deserialize", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_from_raw_data_works", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_as_sub_map_serialize_pass", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_deserializes", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_from_raw_data_works", "pynamodb/tests/test_model.py::ModelTestCase::test_raw_map_serialize_fun_one", "pynamodb/tests/test_model.py::ModelTestCase::test_result_set_init", "pynamodb/tests/test_model.py::ModelTestCase::test_result_set_iter", "pynamodb/tests/test_model.py::ModelTestCase::test_save", "pynamodb/tests/test_model.py::ModelTestCase::test_scan", "pynamodb/tests/test_model.py::ModelTestCase::test_scan_limit", "pynamodb/tests/test_model.py::ModelTestCase::test_scan_limit_with_page_size", "pynamodb/tests/test_model.py::ModelTestCase::test_update", "pynamodb/tests/test_model.py::ModelTestCase::test_update_item", "pynamodb/tests/test_model.py::ModelInitTestCase::test_raw_map_attribute_with_dict_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_raw_map_attribute_with_initialized_instance_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_dict_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_initialized_instance_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_map_attribute_member_with_initialized_instance_init", "pynamodb/tests/test_model.py::ModelInitTestCase::test_subclassed_map_attribute_with_map_attributes_member_with_dict_init" ]
[]
MIT License
1,804
[ "pynamodb/models.py" ]
[ "pynamodb/models.py" ]
jnothman__searchgrid-11
0c21b167008831b2a87259d31a6faabe711e33c4
2017-10-25 03:52:37
0c21b167008831b2a87259d31a6faabe711e33c4
diff --git a/CHANGELOG.rst b/CHANGELOG.rst new file mode 100644 index 0000000..72316db --- /dev/null +++ b/CHANGELOG.rst @@ -0,0 +1,4 @@ +## v0.2 + +- Fixed a bug where the grid of the default estimator in a Pipeline step was + attributed to alternatives for that step. :issue:`10`. diff --git a/README.rst b/README.rst index e57dd0d..19c83ea 100644 --- a/README.rst +++ b/README.rst @@ -152,7 +152,7 @@ Searching over multiple grids. ... reduce=[kbest, pca]) >>> gs = make_grid_search(pipe) -.. |py-versions| image:: https://img.shields.io/pypi/pyversions/Django.svg +.. |py-versions| image:: https://img.shields.io/pypi/pyversions/searchgrid.svg :alt: Python versions supported .. |version| image:: https://badge.fury.io/py/searchgrid.svg diff --git a/doc/conf.py b/doc/conf.py index c87eea2..16b8340 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -41,6 +41,7 @@ extensions = [ 'numpydoc', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', + 'sphinx_issues', ] # Add any paths that contain templates here, relative to this directory. @@ -65,8 +66,8 @@ copyright = u'2017, Joel Nothman' # # The short X.Y version. -version = '0.1' -release = '0.1a1' +version = '0.1.1' +release = '0.1.1' # version = searchgrid.__version__ # The full version, including alpha/beta/rc tags. diff --git a/doc/index.rst b/doc/index.rst index 5ea5fde..197487b 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -5,3 +5,8 @@ API Reference .. automodule:: searchgrid :members: + +Changelog +......... + +.. include:: ../README.rst diff --git a/doc/requirements.txt b/doc/requirements.txt index a58ea28..100a1f5 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -2,3 +2,4 @@ numpy scipy scikit-learn numpydoc +sphinx-issues diff --git a/searchgrid.py b/searchgrid.py index bbe4862..753a2a7 100644 --- a/searchgrid.py +++ b/searchgrid.py @@ -48,8 +48,16 @@ def _build_param_grid(estimator): # handle estimator parameters having their own grids for param_name, value in estimator.get_params().items(): if '__' not in param_name and hasattr(value, 'get_params'): - grid = _update_grid(grid, _build_param_grid(value), - param_name + '__') + out = [] + value_grid = _build_param_grid(value) + for sub_grid in grid: + if param_name in sub_grid: + sub_grid = [sub_grid] + else: + sub_grid = _update_grid([sub_grid], value_grid, + param_name + '__') + out.extend(sub_grid) + grid = out # handle grid values having their own grids out = [] diff --git a/setup.py b/setup.py index 7cdb90a..f2d609c 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ def setup_package(): try: # See setup.cfg setup(name='searchgrid', - version='0.1a1', + version='0.1.1', py_modules=['searchgrid'], setup_requires=['pytest-runner'], tests_require=['pytest>=2.7', 'pytest-cov~=2.4'],
default Pipeline step's grid gets assigned to alternative steps The final assertion here fails: ```python lr = set_grid(LogisticRegression(), C=[1, 2, 3]) svc = SVC() grid = build_param_grid(set_grid(Pipeline([('root', lr)]), root=[lr, svc])) assert len(grid) == 2 assert lr in grid[0]['root'] assert svc not in grid[0]['root'] assert 'root__C' in grid[0] assert svc in grid[1]['root'] assert lr not in grid[1]['root'] assert 'root__C' not in grid[1] ```
jnothman/searchgrid
diff --git a/test_searchgrid.py b/test_searchgrid.py index 242b268..4f31d04 100644 --- a/test_searchgrid.py +++ b/test_searchgrid.py @@ -48,6 +48,22 @@ def test_build_param_grid_set_estimator(): assert build_param_grid(estimator) == param_grid +def test_regression(): + lr = set_grid(LogisticRegression(), C=[1, 2, 3]) + svc = SVC() + grid = build_param_grid(set_grid(Pipeline([('root', lr)]), root=[lr, svc])) + + assert len(grid) == 2 + + assert lr in grid[0]['root'] + assert svc not in grid[0]['root'] + assert 'root__C' in grid[0] + + assert svc in grid[1]['root'] + assert lr not in grid[1]['root'] + assert 'root__C' not in grid[1] + + def test_make_grid_search(): X, y = load_iris(return_X_y=True) lr = LogisticRegression()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 6 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "python-coveralls", "numpydoc" ], "pre_install": null, "python": "3.6", "reqs_path": [ "test_requirements.txt", "doc/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 docutils==0.17.1 flake8==5.0.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.2.0 iniconfig==1.1.1 Jinja2==3.0.3 joblib==1.1.1 MarkupSafe==2.0.1 mccabe==0.7.0 numpy==1.19.5 numpydoc==1.1.0 packaging==21.3 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 python-coveralls==2.9.3 pytz==2025.2 PyYAML==6.0.1 requests==2.27.1 scikit-learn==0.24.2 scipy==1.5.4 -e git+https://github.com/jnothman/searchgrid.git@0c21b167008831b2a87259d31a6faabe711e33c4#egg=searchgrid six==1.17.0 snowballstemmer==2.2.0 Sphinx==4.3.2 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 threadpoolctl==3.1.0 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: searchgrid channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - coverage==6.2 - docutils==0.17.1 - flake8==5.0.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - jinja2==3.0.3 - joblib==1.1.1 - markupsafe==2.0.1 - mccabe==0.7.0 - numpy==1.19.5 - numpydoc==1.1.0 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - python-coveralls==2.9.3 - pytz==2025.2 - pyyaml==6.0.1 - requests==2.27.1 - scikit-learn==0.24.2 - scipy==1.5.4 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==4.3.2 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - threadpoolctl==3.1.0 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/searchgrid
[ "test_searchgrid.py::test_regression" ]
[]
[ "test_searchgrid.py::test_build_param_grid[estimator0-param_grid0]", "test_searchgrid.py::test_build_param_grid[estimator1-param_grid1]", "test_searchgrid.py::test_build_param_grid[estimator2-param_grid2]", "test_searchgrid.py::test_build_param_grid_set_estimator", "test_searchgrid.py::test_make_grid_search" ]
[]
BSD 3-Clause "New" or "Revised" License
1,805
[ "README.rst", "doc/requirements.txt", "setup.py", "searchgrid.py", "doc/conf.py", "CHANGELOG.rst", "doc/index.rst" ]
[ "README.rst", "doc/requirements.txt", "setup.py", "searchgrid.py", "doc/conf.py", "CHANGELOG.rst", "doc/index.rst" ]
jnothman__searchgrid-12
1f1a79f272dceb1afb44ec3228cdab0c52ea9278
2017-10-25 04:31:53
1f1a79f272dceb1afb44ec3228cdab0c52ea9278
diff --git a/README.rst b/README.rst index 980eaa0..c917f94 100644 --- a/README.rst +++ b/README.rst @@ -32,6 +32,12 @@ It provides two main functions: ``GridSearchCV`` object using the parameter space the estimator is annotated with. +Other utilities for constructing search spaces include: + +- `searchgrid.build_param_grid` +- `searchgrid.make_pipeline` +- `searchgrid.make_union` + Quick Start ........... @@ -151,6 +157,16 @@ Searching over multiple grids. ... reduce=[kbest, pca]) >>> gs = make_grid_search(pipe) + And since you no longer care about step names, use + `searchgrid.make_pipeline` to express alternative steps even more simply:: + + >>> from searchgrid import make_pipeline + >>> kbest = set_grid(SelectKBest(), k=[5, 10, 20]) + >>> pca = set_grid(PCA(), n_components=[5, 10, 20]) + >>> lr = set_grid(LogisticRegression(), C=[.1, 1, 10]) + >>> pipe = make_pipeline([kbest, pca], lr) + >>> gs = make_grid_search(pipe) + .. |py-versions| image:: https://img.shields.io/pypi/pyversions/searchgrid.svg :alt: Python versions supported diff --git a/searchgrid.py b/searchgrid.py index 753a2a7..f37bc7d 100644 --- a/searchgrid.py +++ b/searchgrid.py @@ -1,8 +1,10 @@ from collections import Mapping as _Mapping +from collections import defaultdict as _defaultdict import itertools as _itertools from sklearn.model_selection import GridSearchCV as _GridSearchCV from sklearn.pipeline import Pipeline as _Pipeline +from sklearn.pipeline import FeatureUnion as _FeatureUnion def set_grid(estimator, **grid): @@ -132,3 +134,126 @@ def make_grid_search(estimator, **kwargs): """ estimator = _check_estimator(estimator) return _GridSearchCV(estimator, build_param_grid(estimator), **kwargs) + + +def _name_steps(steps, default='alt'): + """Generate names for estimators.""" + steps = [estimators if isinstance(estimators, list) else [estimators] + for estimators in steps] + + names = [] + for estimators in steps: + estimators = estimators[:] + if len(estimators) > 1: + while None in estimators: + estimators.remove(None) + step_names = {type(estimator).__name__.lower() + for estimator in estimators} + if len(step_names) > 1: + names.append(default) + else: + names.append(step_names.pop()) + + namecount = _defaultdict(int) + for name in names: + namecount[name] += 1 + + for k, v in list(namecount.items()): + if v == 1: + del namecount[k] + + for i in reversed(range(len(names))): + name = names[i] + if name in namecount: + names[i] += "-%d" % namecount[name] + namecount[name] -= 1 + + named_steps = list(zip(names, [step[0] for step in steps])) + grid = {k: v for k, v in zip(names, steps) if len(v) > 1} + return named_steps, grid + + +def make_pipeline(*steps, **kwargs): + """Construct a Pipeline with alternative estimators to search over + + Parameters + ---------- + steps + Each step is specified as one of: + + * an estimator instance + * None (meaning no transformation) + * a list of the above, indicating that a grid search should alternate + over the estimators (or None) in the list + kwargs + Keyword arguments to the constructor of + :class:`sklearn.pipeline.Pipeline`. + + Examples + -------- + >>> from sklearn.feature_extraction.text import CountVectorizer + >>> from sklearn.feature_extraction.text import TfidfTransformer + >>> from sklearn.feature_selection import SelectKBest + >>> from sklearn.decomposition import PCA + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.ensemble import RandomForestClassifier + >>> from sklearn.model_selection import ParameterGrid + >>> from searchgrid import make_pipeline, build_param_grid + >>> pipe = make_pipeline(CountVectorizer(), + ... [TfidfTransformer(), None], + ... [PCA(n_components=5), SelectKBest(k=5)], + ... [set_grid(LogisticRegression(), + ... C=[.1, 1., 10.]), + ... RandomForestClassifier()]) + >>> pipe.steps # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS + [('countvectorizer', CountVectorizer(...)), + ('tfidftransformer', TfidfTransformer(...)), + ('alt-1', PCA(...)), + ('alt-2', LogisticRegression(...))] + >>> n_combinations = len(ParameterGrid(build_param_grid(pipe))) + >>> n_combinations + ... # 2 * 2 * (3 + 1) + 16 + + Notes + ----- + Each step is named according to the set of estimator types in its list: + + * if a step has only one type of estimator (disregarding None), it takes + that estimator's class name (lowercased) + * if a step has estimators of mixed type, the step is named 'alt' + * if there are multiple steps of the same name using the above rules, + a suffix '-1', '-2', etc. is added. + """ + steps, grid = _name_steps(steps) + return set_grid(_Pipeline(steps, **kwargs), **grid) + + +def make_union(*transformers, **kwargs): + """Construct a FeatureUnion with alternative estimators to search over + + Parameters + ---------- + steps + Each step is specified as one of: + + * an estimator instance + * None (meaning no features) + * a list of the above, indicating that a grid search should alternate + over the estimators (or None) in the list + kwargs + Keyword arguments to the constructor of + :class:`sklearn.pipeline.FeatureUnion`. + + Notes + ----- + Each step is named according to the set of estimator types in its list: + + * if a step has only one type of estimator (disregarding None), it takes + that estimator's class name (lowercased) + * if a step has estimators of mixed type, the step is named 'alt' + * if there are multiple steps of the same name using the above rules, + a suffix '-1', '-2', etc. is added. + """ + steps, grid = _name_steps(transformers) + return set_grid(_FeatureUnion(steps, **kwargs), **grid)
Shorthands for gridded pipelines and feature_unions Could have: ```python searchgrid.make_union([OptionalTransformer1(), None], [OptionalTransformer2(), OptionalTransformer3()], Transformer4()) ``` translate to ```python set_grid(FeatureUnion([('optionaltransformer1', None), ('auto1', None), ('transformer4', Transformer4())]), optionaltransformer1=[OptionalTransformer1(), None], auto1=[OptionalTransformer2(), OptionalTransformer3()]) ```
jnothman/searchgrid
diff --git a/test_searchgrid.py b/test_searchgrid.py index 4f31d04..57a4c96 100644 --- a/test_searchgrid.py +++ b/test_searchgrid.py @@ -1,10 +1,12 @@ import pytest -from sklearn.pipeline import Pipeline, make_pipeline +from sklearn.pipeline import Pipeline, FeatureUnion +from sklearn.pipeline import make_pipeline as skl_make_pipeline from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression, SGDClassifier -from sklearn.feature_selection import SelectKBest +from sklearn.feature_selection import SelectKBest, SelectPercentile from sklearn.datasets import load_iris from searchgrid import set_grid, build_param_grid, make_grid_search +from searchgrid import make_pipeline, make_union @pytest.mark.parametrize(('estimator', 'param_grid'), [ @@ -12,23 +14,12 @@ from searchgrid import set_grid, build_param_grid, make_grid_search {'C': [1, 2]}), (set_grid(SVC(), C=[1, 2], gamma=[1, 2]), {'C': [1, 2], 'gamma': [1, 2]}), - (make_pipeline(set_grid(SVC(), C=[1, 2], gamma=[1, 2])), + (skl_make_pipeline(set_grid(SVC(), C=[1, 2], gamma=[1, 2])), {'svc__C': [1, 2], 'svc__gamma': [1, 2]}), ]) def test_build_param_grid(estimator, param_grid): assert build_param_grid(estimator) == param_grid -# pytest.mark.xfail( -# (set_grid(SVC(), [{'kernel': ['linear']}, -# {'kernel': 'rbf', 'gamma': [1, 2]}]), -# [{'kernel': ['linear']}, {'kernel': 'rbf', 'gamma': [1, 2]}])), -# pytest.mark.xfail( -# (make_pipeline(set_grid(SVC(), [{'kernel': ['linear']}, -# {'kernel': ['rbf'], -# 'gamma': [1, 2]}])), -# [{'svc__kernel': ['linear']}, -# {'svc__kernel': 'rbf', 'svc__gamma': [1, 2]}])), - def test_build_param_grid_set_estimator(): clf1 = SVC() @@ -80,3 +71,42 @@ def test_make_grid_search(): assert svc_mask.sum() == 2 assert gs3.cv_results_['param_root__degree'][svc_mask].tolist() == [2, 3] assert gs3.cv_results_['param_root'][~svc_mask].tolist() == [lr] + + +def test_make_pipeline(): + t1 = SelectKBest() + t2 = SelectKBest() + t3 = SelectKBest() + t4 = SelectKBest() + t5 = SelectPercentile() + t6 = SelectKBest() + t7 = SelectKBest() + t8 = SelectKBest() + t9 = SelectPercentile() + in_steps = [[t1, None], + [t2, t3], + [t4, t5], # mixed + t6, + [None, t7], + [t8, None, t9], # mixed + None] + pipe = make_pipeline(*in_steps, memory='/path/to/nowhere') + union = make_union(*in_steps) + + for est, est_steps in [(pipe, pipe.steps), + (union, union.transformer_list)]: + names, steps = zip(*est_steps) + assert names == ('selectkbest-1', 'selectkbest-2', 'alt-1', + 'selectkbest-3', 'selectkbest-4', 'alt-2', 'nonetype') + assert steps == (t1, t2, t4, t6, None, t8, None) + + assert len(est._param_grid) == 5 + assert est._param_grid[names[0]] == [t1, None] + assert est._param_grid[names[1]] == [t2, t3] + assert est._param_grid[names[2]] == [t4, t5] + assert est._param_grid[names[4]] == [None, t7] + assert est._param_grid[names[5]] == [t8, None, t9] + + assert type(pipe) is Pipeline + assert type(union) is FeatureUnion + assert pipe.memory == '/path/to/nowhere'
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 2 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "python-coveralls" ], "pre_install": [], "python": "3.6", "reqs_path": [ "doc/requirements.txt", "test_requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 docutils==0.17.1 flake8==5.0.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.2.0 iniconfig==1.1.1 Jinja2==3.0.3 joblib==1.1.1 MarkupSafe==2.0.1 mccabe==0.7.0 numpy==1.19.5 numpydoc==1.1.0 packaging==21.3 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 python-coveralls==2.9.3 pytz==2025.2 PyYAML==6.0.1 requests==2.27.1 scikit-learn==0.24.2 scipy==1.5.4 -e git+https://github.com/jnothman/searchgrid.git@1f1a79f272dceb1afb44ec3228cdab0c52ea9278#egg=searchgrid six==1.17.0 snowballstemmer==2.2.0 Sphinx==4.3.2 sphinx-issues==3.0.1 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 threadpoolctl==3.1.0 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: searchgrid channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - coverage==6.2 - docutils==0.17.1 - flake8==5.0.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - jinja2==3.0.3 - joblib==1.1.1 - markupsafe==2.0.1 - mccabe==0.7.0 - numpy==1.19.5 - numpydoc==1.1.0 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - python-coveralls==2.9.3 - pytz==2025.2 - pyyaml==6.0.1 - requests==2.27.1 - scikit-learn==0.24.2 - scipy==1.5.4 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==4.3.2 - sphinx-issues==3.0.1 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - threadpoolctl==3.1.0 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/searchgrid
[ "test_searchgrid.py::test_build_param_grid[estimator0-param_grid0]", "test_searchgrid.py::test_build_param_grid[estimator1-param_grid1]", "test_searchgrid.py::test_build_param_grid[estimator2-param_grid2]", "test_searchgrid.py::test_build_param_grid_set_estimator", "test_searchgrid.py::test_regression", "test_searchgrid.py::test_make_grid_search" ]
[ "test_searchgrid.py::test_make_pipeline" ]
[]
[]
BSD 3-Clause "New" or "Revised" License
1,806
[ "README.rst", "searchgrid.py" ]
[ "README.rst", "searchgrid.py" ]
elastic__elasticsearch-dsl-py-759
269fef7fa12333f7622c3694df75a1b296d87ae2
2017-10-25 12:36:19
e8906dcd17eb2021bd191325817ff7541d838ea1
diff --git a/elasticsearch_dsl/analysis.py b/elasticsearch_dsl/analysis.py index 8424283..c2abd94 100644 --- a/elasticsearch_dsl/analysis.py +++ b/elasticsearch_dsl/analysis.py @@ -19,9 +19,9 @@ class AnalysisBase(object): class CustomAnalysis(object): name = 'custom' - def __init__(self, name, builtin_type='custom', **kwargs): + def __init__(self, filter_name, builtin_type='custom', **kwargs): self._builtin_type = builtin_type - self._name = name + self._name = filter_name super(CustomAnalysis, self).__init__(**kwargs) def to_dict(self):
Can't create custom stemming filter The stemming filter, requires the use a property called `name`, however the the library is using this property. (https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-stemmer-tokenfilter.html) This code ```python light_stemmer_ = token_filter(name="minimal_english",type= "stemmer") ``` will produce this: ```json "filter" : { "minimal_english" : { "type" : "stemmer" } } ``` while I would like this ```json "filter" : { "light_stemmer_" : { "type" : "stemmer", "name" : "minimal_english" } } ``` I suggest either changing the name of the variable the user is using, or allowing a variable like `_name` to become `name` when serializing. as a workaround I am changed this line https://github.com/elastic/elasticsearch-dsl-py/blob/29d28a012a5a3a930e66cee56178208f21cb5fdf/elasticsearch_dsl/analysis.py#L33 to only pop if the type is not stemming. like this ```python if self._builtin_type is 'stemmer' and 'name' in d[self.name] : d['name'] = d[self.name]['name'] d = d.pop(self.name) ``` and them in my code I do ```python light_stemmer_ = token_filter("light_stemmer_", "stemmer") light_stemmer_.name = "minimal_english" ``` but I know it is a hacky solution
elastic/elasticsearch-dsl-py
diff --git a/test_elasticsearch_dsl/test_analysis.py b/test_elasticsearch_dsl/test_analysis.py index 014c43d..6dc3c09 100644 --- a/test_elasticsearch_dsl/test_analysis.py +++ b/test_elasticsearch_dsl/test_analysis.py @@ -79,3 +79,11 @@ def test_custom_analyzer_can_collect_custom_items(): } } == a.get_analysis_definition() +def test_stemmer_analyzer_can_pass_name(): + t = analysis.token_filter('my_english_filter', name="minimal_english", type="stemmer") + assert t.to_dict() == 'my_english_filter' + assert { + "type" : "stemmer", + "name" : "minimal_english" + } == t.get_definition() +
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
5.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e '.[develop]'", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 docutils==0.18.1 elasticsearch==5.5.3 -e git+https://github.com/elastic/elasticsearch-dsl-py.git@269fef7fa12333f7622c3694df75a1b296d87ae2#egg=elasticsearch_dsl idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 Jinja2==3.0.3 MarkupSafe==2.0.1 mock==5.2.0 packaging==21.3 pluggy==1.0.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.27.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: elasticsearch-dsl-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - coverage==6.2 - docutils==0.18.1 - elasticsearch==5.5.3 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jinja2==3.0.3 - markupsafe==2.0.1 - mock==5.2.0 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.27.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/elasticsearch-dsl-py
[ "test_elasticsearch_dsl/test_analysis.py::test_stemmer_analyzer_can_pass_name" ]
[]
[ "test_elasticsearch_dsl/test_analysis.py::test_analyzer_serializes_as_name", "test_elasticsearch_dsl/test_analysis.py::test_analyzer_has_definition", "test_elasticsearch_dsl/test_analysis.py::test_normalizer_serializes_as_name", "test_elasticsearch_dsl/test_analysis.py::test_normalizer_has_definition", "test_elasticsearch_dsl/test_analysis.py::test_tokenizer", "test_elasticsearch_dsl/test_analysis.py::test_custom_analyzer_can_collect_custom_items" ]
[]
Apache License 2.0
1,807
[ "elasticsearch_dsl/analysis.py" ]
[ "elasticsearch_dsl/analysis.py" ]
rollbar__pyrollbar-213
c9d34b1d1544415a17d5a79e90179a763e739bfc
2017-10-25 21:10:12
a87ba1887c362cdc911eaa1b1c53311edfbed2c3
rokob: It turns out that copy/deepcopy go through the pickle machinery via __reduce__ and __reduce_ex__, so things that are not copyable throw TypeErrors from pickling.
diff --git a/rollbar/lib/__init__.py b/rollbar/lib/__init__.py index 1afaee1..9a3f664 100644 --- a/rollbar/lib/__init__.py +++ b/rollbar/lib/__init__.py @@ -173,7 +173,10 @@ def dict_merge(a, b): if k in result and isinstance(result[k], dict): result[k] = dict_merge(result[k], v) else: - result[k] = copy.deepcopy(v) + try: + result[k] = copy.deepcopy(v) + except: + result[k] = '<Uncopyable obj:(%s)>' % (v,) return result
Exception while reporting exc_info to Rollbar. TypeError("can't pickle select.epoll objects",) I'm using `rollbar==0.13.16` with `tornado==4.5.2` and getting the following exception: ``` [E 171025 05:47:48 __init__:411] Exception while reporting message to Rollbar. TypeError("can't pickle select.epoll objects",) Traceback (most recent call last): File "/usr/local/lib/python3.6/site-packages/tornado/web.py", line 1511, in _execute result = yield result File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/usr/local/lib/python3.6/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "<string>", line 4, in raise_exc_info File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/project-ps-hsm/src/handlers/balance.py", line 21, in get yield self.check_blacklist() File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/usr/local/lib/python3.6/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "<string>", line 4, in raise_exc_info File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/project-ps-hsm/src/handlers/_base.py", line 81, in check_blacklist reason = yield is_blacklisted(self.can, self.current_user) File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/usr/local/lib/python3.6/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "<string>", line 4, in raise_exc_info File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1063, in run yielded = self.gen.throw(*exc_info) File "/project-ps-hsm/src/utils/afp_processing.py", line 42, in is_blacklisted 'SECRET': config.INTERNAL_API_SECRET['customer'], File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1055, in run value = future.result() File "/usr/local/lib/python3.6/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "<string>", line 4, in raise_exc_info tornado.httpclient.HTTPError: HTTP 500: Internal Server Error During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/local/lib/python3.6/site-packages/rollbar/__init__.py", line 409, in report_message return _report_message(message, level, request, extra_data, payload_data) File "/usr/local/lib/python3.6/site-packages/rollbar/__init__.py", line 712, in _report_message data = dict_merge(data, payload_data) File "/usr/local/lib/python3.6/site-packages/rollbar/lib/__init__.py", line 174, in dict_merge result[k] = dict_merge(result[k], v) File "/usr/local/lib/python3.6/site-packages/rollbar/lib/__init__.py", line 176, in dict_merge result[k] = copy.deepcopy(v) File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy y = copier(x, memo) File "/usr/local/lib/python3.6/copy.py", line 215, in _deepcopy_list append(deepcopy(a, memo)) File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy y = copier(x, memo) File "/usr/local/lib/python3.6/copy.py", line 240, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy y = copier(x, memo) File "/usr/local/lib/python3.6/copy.py", line 220, in _deepcopy_tuple y = [deepcopy(a, memo) for a in x] File "/usr/local/lib/python3.6/copy.py", line 220, in <listcomp> y = [deepcopy(a, memo) for a in x] File "/usr/local/lib/python3.6/copy.py", line 180, in deepcopy y = _reconstruct(x, memo, *rv) File "/usr/local/lib/python3.6/copy.py", line 280, in _reconstruct state = deepcopy(state, memo) File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy y = copier(x, memo) File "/usr/local/lib/python3.6/copy.py", line 240, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) File "/usr/local/lib/python3.6/copy.py", line 180, in deepcopy y = _reconstruct(x, memo, *rv) File "/usr/local/lib/python3.6/copy.py", line 280, in _reconstruct state = deepcopy(state, memo) File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy y = copier(x, memo) File "/usr/local/lib/python3.6/copy.py", line 240, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) File "/usr/local/lib/python3.6/copy.py", line 180, in deepcopy y = _reconstruct(x, memo, *rv) File "/usr/local/lib/python3.6/copy.py", line 280, in _reconstruct state = deepcopy(state, memo) File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy y = copier(x, memo) File "/usr/local/lib/python3.6/copy.py", line 240, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) File "/usr/local/lib/python3.6/copy.py", line 180, in deepcopy y = _reconstruct(x, memo, *rv) File "/usr/local/lib/python3.6/copy.py", line 280, in _reconstruct state = deepcopy(state, memo) File "/usr/local/lib/python3.6/copy.py", line 150, in deepcopy y = copier(x, memo) File "/usr/local/lib/python3.6/copy.py", line 240, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) File "/usr/local/lib/python3.6/copy.py", line 169, in deepcopy rv = reductor(4) TypeError: can't pickle select.epoll objects ``` It results in certain errors don't show up in Rollbar which is pretty bad 😞 It seems like not every `exc_info` is pickable and this situation should be handled properly. I suggest still reporting the error message, even without the exception info.
rollbar/pyrollbar
diff --git a/rollbar/test/test_lib.py b/rollbar/test/test_lib.py new file mode 100644 index 0000000..201a2ed --- /dev/null +++ b/rollbar/test/test_lib.py @@ -0,0 +1,59 @@ +from rollbar.lib import dict_merge + +from rollbar.test import BaseTest + +class RollbarLibTest(BaseTest): + def test_dict_merge_not_dict(self): + a = {'a': {'b': 42}} + b = 99 + result = dict_merge(a, b) + + self.assertEqual(99, result) + + def test_dict_merge_dicts_independent(self): + a = {'a': {'b': 42}} + b = {'x': {'y': 99}} + result = dict_merge(a, b) + + self.assertIn('a', result) + self.assertIn('b', result['a']) + self.assertEqual(42, result['a']['b']) + self.assertIn('x', result) + self.assertIn('y', result['x']) + self.assertEqual(99, result['x']['y']) + + def test_dict_merge_dicts(self): + a = {'a': {'b': 42}} + b = {'a': {'c': 99}} + result = dict_merge(a, b) + + self.assertIn('a', result) + self.assertIn('b', result['a']) + self.assertIn('c', result['a']) + self.assertEqual(42, result['a']['b']) + self.assertEqual(99, result['a']['c']) + + def test_dict_merge_dicts_second_wins(self): + a = {'a': {'b': 42}} + b = {'a': {'b': 99}} + result = dict_merge(a, b) + + self.assertIn('a', result) + self.assertIn('b', result['a']) + self.assertEqual(99, result['a']['b']) + + def test_dict_merge_dicts_select_poll(self): + import select + poll = getattr(select, 'poll', None) + if poll is None: + return + p = poll() + a = {'a': {'b': 42}} + b = {'a': {'y': p}} + result = dict_merge(a, b) + + self.assertIn('a', result) + self.assertIn('b', result['a']) + self.assertEqual(42, result['a']['b']) + self.assertIn('y', result['a']) + self.assertRegex(result['a']['y'], r'Uncopyable obj')
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 1 }
0.13
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "", "pip_packages": [ "mock", "webob", "blinker", "unittest2", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.4", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 blinker==1.5 certifi==2021.5.30 charset-normalizer==2.0.12 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 linecache2==1.0.0 mock==5.2.0 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 requests==2.27.1 -e git+https://github.com/rollbar/pyrollbar.git@c9d34b1d1544415a17d5a79e90179a763e739bfc#egg=rollbar six==1.17.0 tomli==1.2.3 traceback2==1.4.0 typing_extensions==4.1.1 unittest2==1.1.0 urllib3==1.26.20 WebOb==1.8.9 zipp==3.6.0
name: pyrollbar channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - argparse==1.4.0 - attrs==22.2.0 - blinker==1.5 - charset-normalizer==2.0.12 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - linecache2==1.0.0 - mock==5.2.0 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - requests==2.27.1 - six==1.17.0 - tomli==1.2.3 - traceback2==1.4.0 - typing-extensions==4.1.1 - unittest2==1.1.0 - urllib3==1.26.20 - webob==1.8.9 - zipp==3.6.0 prefix: /opt/conda/envs/pyrollbar
[ "rollbar/test/test_lib.py::RollbarLibTest::test_dict_merge_dicts_select_poll" ]
[]
[ "rollbar/test/test_lib.py::RollbarLibTest::test_dict_merge_dicts", "rollbar/test/test_lib.py::RollbarLibTest::test_dict_merge_dicts_independent", "rollbar/test/test_lib.py::RollbarLibTest::test_dict_merge_dicts_second_wins", "rollbar/test/test_lib.py::RollbarLibTest::test_dict_merge_not_dict" ]
[]
MIT License
1,808
[ "rollbar/lib/__init__.py" ]
[ "rollbar/lib/__init__.py" ]
ahawker__ulid-59
efdac942d7f969c802903f574965ca860882a891
2017-10-26 02:56:21
64db8e687fcb5faaf68c92dc5d8adef2b4b1bddd
diff --git a/ulid/base32.py b/ulid/base32.py index 83f8a8a..f7377b6 100644 --- a/ulid/base32.py +++ b/ulid/base32.py @@ -31,11 +31,11 @@ DECODING = array.array( 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, - 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, + 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14, 0x15, 0x00, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, - 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, + 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14, + 0x15, 0x00, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
Non-Crockford's Base32 letters converted differently in Java or Python implementations Hi Andrew, first of all, thanks for the amazing library, we've been using a lot! I have a doubt regarding how we fix the conversion of ULIDs which are not following Crockford's Base32 standard. We are using Lua to generate some guids (https://github.com/Tieske/ulid.lua) and for some reason, we get from time to time letters outside the Crockford's Base32. While trying to fix this on our side (we're not sure how this is happening to be honest), we realised that Java and Python implementations silently corrects this issue in different ways: ### Java ```java ULID.Value ulidValueFromString = ULID.parseULID("01BX73KC0TNH409RTFD1JXKmO0") --> "01BX73KC0TNH409RTFD1JXKM00" ``` `mO` is silently converted into `M0` ### Python ```python In [1]: import ulid In [2]: u = ulid.from_str('01BX73KC0TNH409RTFD1JXKmO0') In [3]: u Out[3]: <ULID('01BX73KC0TNH409RTFD1JXKQZ0')> In [4]: u.str Out[4]: '01BX73KC0TNH409RTFD1JXKQZ0' ``` `mO` is silently converted into `QZ` Shouldn't the python library behave as the Java one as per the [Crockford's Base32](http://crockford.com/wrmg/base32.html) spec, converting `L` and `I` to `1` and `O` to `0` and only upper casing lower case letters instead of changing them? Thanks a lot in advance! Eddie
ahawker/ulid
diff --git a/tests/test_base32.py b/tests/test_base32.py index ab2df67..cac8214 100644 --- a/tests/test_base32.py +++ b/tests/test_base32.py @@ -9,6 +9,14 @@ import pytest from ulid import base32 [email protected](scope='session') +def decoding_alphabet(): + """ + Fixture that yields the entire alphabet that is valid for base32 decoding. + """ + return base32.ENCODING + 'lLiIoO' + + def test_encode_handles_ulid_and_returns_26_char_string(valid_bytes_128): """ Assert that :func:`~ulid.base32.encode` encodes a valid 128 bit bytes object into a :class:`~str` @@ -235,3 +243,12 @@ def test_decode_randomness_raises_on_non_ascii_str(invalid_str_encoding): """ with pytest.raises(ValueError): base32.decode_randomness(invalid_str_encoding) + + +def test_decode_table_has_value_for_entire_decoding_alphabet(decoding_alphabet): + """ + Assert that :attr:`~ulid.base32.DECODING` stores a valid value mapping for all characters that + can be base32 decoded. + """ + for char in decoding_alphabet: + assert base32.DECODING[ord(char)] != 0xFF, 'Character "{}" decoded improperly'.format(char) diff --git a/tests/test_bugs.py b/tests/test_bugs.py new file mode 100644 index 0000000..6ab8fcb --- /dev/null +++ b/tests/test_bugs.py @@ -0,0 +1,21 @@ +""" + test_bugs + ~~~~~~~~~ + + Tests for validating reported bugs have been fixed. +""" +from ulid import api + + +def test_github_issue_58(): + """ + Assert that :func:`~ulid.api.from_str` can properly decode strings that + contain Base32 "translate" characters. + + Base32 "translate" characters are: "iI, lL, oO". + + Issue: https://github.com/ahawker/ulid/issues/58 + """ + value = '01BX73KC0TNH409RTFD1JXKmO0' + instance = api.from_str(value) + assert instance.str == '01BX73KC0TNH409RTFD1JXKM00'
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements/base.txt", "requirements/dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==2.3.0 attrs==22.2.0 bandit==1.4.0 bumpversion==0.5.3 certifi==2021.5.30 charset-normalizer==2.0.12 click==8.0.4 coverage==6.2 dparse==0.6.3 execnet==1.9.0 gitdb==4.0.9 GitPython==3.1.18 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 isort==5.10.1 lazy-object-proxy==1.7.1 mccabe==0.7.0 mypy==0.540 packaging==21.3 pbr==6.1.1 pluggy==1.0.0 py==1.11.0 pylint==1.7.4 pyparsing==3.1.4 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 PyYAML==6.0.1 requests==2.27.1 safety==1.6.1 six==1.17.0 smmap==5.0.0 stevedore==3.5.2 tomli==1.2.3 typed-ast==1.1.2 typing_extensions==4.1.1 -e git+https://github.com/ahawker/ulid.git@efdac942d7f969c802903f574965ca860882a891#egg=ulid_py urllib3==1.26.20 wrapt==1.16.0 zipp==3.6.0
name: ulid channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==2.3.0 - attrs==22.2.0 - bandit==1.4.0 - bumpversion==0.5.3 - charset-normalizer==2.0.12 - click==8.0.4 - coverage==6.2 - dparse==0.6.3 - execnet==1.9.0 - gitdb==4.0.9 - gitpython==3.1.18 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isort==5.10.1 - lazy-object-proxy==1.7.1 - mccabe==0.7.0 - mypy==0.540 - packaging==21.3 - pbr==6.1.1 - pluggy==1.0.0 - py==1.11.0 - pylint==1.7.4 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - pyyaml==6.0.1 - requests==2.27.1 - safety==1.6.1 - six==1.17.0 - smmap==5.0.0 - stevedore==3.5.2 - tomli==1.2.3 - typed-ast==1.1.2 - typing-extensions==4.1.1 - urllib3==1.26.20 - wrapt==1.16.0 - zipp==3.6.0 prefix: /opt/conda/envs/ulid
[ "tests/test_base32.py::test_decode_table_has_value_for_entire_decoding_alphabet", "tests/test_bugs.py::test_github_issue_58" ]
[]
[ "tests/test_base32.py::test_encode_handles_ulid_and_returns_26_char_string", "tests/test_base32.py::test_encode_handles_timestamp_and_returns_10_char_string", "tests/test_base32.py::test_encode_handles_randomness_and_returns_16_char_string", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[0]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[1]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[2]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[3]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[4]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[5]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[6]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[7]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[8]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[9]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[10]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[11]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[12]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[13]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[14]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[15]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[16]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[17]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[18]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[19]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[20]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[21]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[22]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[23]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[24]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[25]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[26]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[27]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[28]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[29]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[30]", "tests/test_base32.py::test_encode_raises_on_bytes_length_mismatch[31]", "tests/test_base32.py::test_encode_ulid_returns_26_char_string", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[0]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[1]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[2]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[3]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[4]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[5]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[6]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[7]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[8]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[9]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[10]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[11]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[12]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[13]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[14]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[15]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[16]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[17]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[18]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[19]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[20]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[21]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[22]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[23]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[24]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[25]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[26]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[27]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[28]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[29]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[30]", "tests/test_base32.py::test_encode_ulid_raises_on_bytes_length_mismatch[31]", "tests/test_base32.py::test_encode_timestamp_returns_10_char_string", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[0]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[1]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[2]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[3]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[4]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[5]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[6]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[7]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[8]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[9]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[10]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[11]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[12]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[13]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[14]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[15]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[16]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[17]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[18]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[19]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[20]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[21]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[22]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[23]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[24]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[25]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[26]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[27]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[28]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[29]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[30]", "tests/test_base32.py::test_encode_timestamp_raises_on_bytes_length_mismatch[31]", "tests/test_base32.py::test_encode_randomness_returns_16_char_string", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[0]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[1]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[2]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[3]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[4]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[5]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[6]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[7]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[8]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[9]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[10]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[11]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[12]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[13]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[14]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[15]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[16]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[17]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[18]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[19]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[20]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[21]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[22]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[23]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[24]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[25]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[26]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[27]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[28]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[29]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[30]", "tests/test_base32.py::test_encode_randomness_raises_on_bytes_length_mismatch[31]", "tests/test_base32.py::test_decode_handles_ulid_and_returns_16_bytes", "tests/test_base32.py::test_decode_handles_timestamp_and_returns_6_bytes", "tests/test_base32.py::test_decode_handles_randomness_and_returns_10_bytes", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[0]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[1]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[2]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[3]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[4]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[5]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[6]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[7]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[8]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[9]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[10]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[11]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[12]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[13]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[14]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[15]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[16]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[17]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[18]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[19]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[20]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[21]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[22]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[23]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[24]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[25]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[26]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[27]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[28]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[29]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[30]", "tests/test_base32.py::test_decode_raises_on_str_length_mismatch[31]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[0]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[1]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[2]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[3]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[4]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[5]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[6]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[7]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[8]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[9]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[10]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[11]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[12]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[13]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[14]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[15]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[16]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[17]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[18]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[19]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[20]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[21]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[22]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[23]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[24]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[25]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[26]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[27]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[28]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[29]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[30]", "tests/test_base32.py::test_decode_raises_on_non_ascii_str[31]", "tests/test_base32.py::test_decode_ulid_returns_16_bytes", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[0]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[1]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[2]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[3]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[4]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[5]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[6]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[7]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[8]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[9]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[10]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[11]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[12]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[13]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[14]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[15]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[16]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[17]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[18]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[19]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[20]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[21]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[22]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[23]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[24]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[25]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[26]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[27]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[28]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[29]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[30]", "tests/test_base32.py::test_decode_ulid_raises_on_str_length_mismatch[31]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[0]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[1]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[2]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[3]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[4]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[5]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[6]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[7]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[8]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[9]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[10]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[11]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[12]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[13]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[14]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[15]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[16]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[17]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[18]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[19]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[20]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[21]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[22]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[23]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[24]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[25]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[26]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[27]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[28]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[29]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[30]", "tests/test_base32.py::test_decode_ulid_raises_on_non_ascii_str[31]", "tests/test_base32.py::test_decode_timestamp_returns_6_bytes", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[0]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[1]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[2]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[3]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[4]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[5]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[6]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[7]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[8]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[9]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[10]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[11]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[12]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[13]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[14]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[15]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[16]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[17]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[18]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[19]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[20]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[21]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[22]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[23]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[24]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[25]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[26]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[27]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[28]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[29]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[30]", "tests/test_base32.py::test_decode_timestamp_raises_on_str_length_mismatch[31]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[0]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[1]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[2]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[3]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[4]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[5]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[6]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[7]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[8]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[9]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[10]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[11]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[12]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[13]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[14]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[15]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[16]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[17]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[18]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[19]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[20]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[21]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[22]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[23]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[24]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[25]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[26]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[27]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[28]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[29]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[30]", "tests/test_base32.py::test_decode_timestamp_raises_on_non_ascii_str[31]", "tests/test_base32.py::test_decode_randomness_returns_10_bytes", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[0]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[1]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[2]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[3]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[4]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[5]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[6]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[7]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[8]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[9]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[10]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[11]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[12]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[13]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[14]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[15]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[16]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[17]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[18]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[19]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[20]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[21]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[22]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[23]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[24]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[25]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[26]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[27]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[28]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[29]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[30]", "tests/test_base32.py::test_decode_randomness_raises_on_str_length_mismatch[31]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[0]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[1]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[2]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[3]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[4]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[5]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[6]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[7]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[8]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[9]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[10]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[11]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[12]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[13]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[14]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[15]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[16]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[17]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[18]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[19]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[20]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[21]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[22]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[23]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[24]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[25]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[26]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[27]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[28]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[29]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[30]", "tests/test_base32.py::test_decode_randomness_raises_on_non_ascii_str[31]" ]
[]
Apache License 2.0
1,810
[ "ulid/base32.py" ]
[ "ulid/base32.py" ]
jjhelmus__pyfive-38
3bb186febb5f9afa8994208f172457b49500ab92
2017-10-26 22:07:37
d21662e6c95ed08a0b909f9aef4ee3f2428c0036
diff --git a/pyfive/high_level.py b/pyfive/high_level.py index 121b980..7bcaded 100644 --- a/pyfive/high_level.py +++ b/pyfive/high_level.py @@ -321,6 +321,13 @@ class Dataset(object): """ dtype attribute. """ return self._dataobjects.dtype + @property + def value(self): + """ alias for dataset[()]. """ + DeprecationWarning( + "dataset.value has been deprecated. Use dataset[()] instead.") + return self[()] + @property def size(self): """ size attribute. """
Access values with path One thing that is not yet possible and that would help a lot. For example, with version 0.2.0: Using Python 2.7.12 (default, Jul 1 2016, 15:12:24) [GCC 5.4.0 20160609] on linux2 > > > import h5py > > > import pyfive > > > > > > f5 = h5py.File('tests/latest.hdf5') > > > f5["group1/subgroup1/dataset3"].value > > > array([ 0., 1., 2., 3.], dtype=float32) > > > ffive = pyfive.File('tests/latest.hdf5') > > > ffive["group1/subgroup1/dataset3"].value > > > Traceback (most recent call last): > > > File "<stdin>", line 1, in <module> > > > File "pyfive/high_level.py", line 48, in **getitem** > > > raise KeyError('%s not found in group' % (y)) > > > KeyError: 'group1/subgroup1/dataset3 not found in group'
jjhelmus/pyfive
diff --git a/tests/test_high_level.py b/tests/test_high_level.py index c92eca1..7741cbd 100644 --- a/tests/test_high_level.py +++ b/tests/test_high_level.py @@ -92,6 +92,9 @@ def test_dataset_class(): assert_array_equal(dset1[:], np.arange(4)) assert_array_equal(dset2[:], np.arange(4)) + assert_array_equal(dset1.value, np.arange(4)) + assert_array_equal(dset2.value, np.arange(4)) + assert dset1.len() == 4 assert dset2.len() == 4
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "numpy", "pip_packages": [ "numpy", "nose", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 nose==1.3.7 numpy @ file:///tmp/build/80754af9/numpy_and_numpy_base_1603483703303/work packaging==21.3 pluggy==1.0.0 py==1.11.0 -e git+https://github.com/jjhelmus/pyfive.git@3bb186febb5f9afa8994208f172457b49500ab92#egg=pyfive pyparsing==3.1.4 pytest==7.0.1 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: pyfive channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - blas=1.0=openblas - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgfortran-ng=11.2.0=h00389a5_1 - libgfortran5=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libopenblas=0.3.21=h043d6bf_0 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - numpy=1.19.2=py36h6163131_0 - numpy-base=1.19.2=py36h75fe3a5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - nose==1.3.7 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/pyfive
[ "tests/test_high_level.py::test_dataset_class" ]
[]
[ "tests/test_high_level.py::test_file_class", "tests/test_high_level.py::test_group_class", "tests/test_high_level.py::test_get_objects_by_path", "tests/test_high_level.py::test_astype", "tests/test_high_level.py::test_read_direct" ]
[]
BSD 3-Clause "New" or "Revised" License
1,811
[ "pyfive/high_level.py" ]
[ "pyfive/high_level.py" ]