instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
4.94k
| PASS_TO_PASS
listlengths 0
7.82k
| meta
dict | created_at
stringlengths 25
25
| license
stringclasses 8
values | __index_level_0__
int64 0
6.41k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
maximkulkin__lollipop-53
|
diff --git a/lollipop/types.py b/lollipop/types.py
index 5f3ac70..3ec8a50 100644
--- a/lollipop/types.py
+++ b/lollipop/types.py
@@ -1,6 +1,6 @@
from lollipop.errors import ValidationError, ValidationErrorBuilder, \
ErrorMessagesMixin, merge_errors
-from lollipop.utils import is_list, is_dict, make_context_aware, \
+from lollipop.utils import is_sequence, is_mapping, make_context_aware, \
constant, identity, OpenStruct
from lollipop.compat import string_types, int_types, iteritems, OrderedDict
import datetime
@@ -377,7 +377,7 @@ class List(Type):
self._fail('required')
# TODO: Make more intelligent check for collections
- if not is_list(data):
+ if not is_sequence(data):
self._fail('invalid')
errors_builder = ValidationErrorBuilder()
@@ -395,7 +395,7 @@ class List(Type):
if value is MISSING or value is None:
self._fail('required')
- if not is_list(value):
+ if not is_sequence(value):
self._fail('invalid')
errors_builder = ValidationErrorBuilder()
@@ -422,6 +422,7 @@ class Tuple(Type):
Example: ::
Tuple([String(), Integer(), Boolean()]).load(['foo', 123, False])
+ # => ('foo', 123, False)
:param list item_types: List of item types.
:param kwargs: Same keyword arguments as for :class:`Type`.
@@ -439,11 +440,13 @@ class Tuple(Type):
if data is MISSING or data is None:
self._fail('required')
- if not is_list(data):
+ if not is_sequence(data):
self._fail('invalid')
if len(data) != len(self.item_types):
- self._fail('invalid_length', expected_length=len(self.item_types))
+ self._fail('invalid_length',
+ expected_length=len(self.item_types),
+ actual_length=len(data))
errors_builder = ValidationErrorBuilder()
result = []
@@ -454,13 +457,13 @@ class Tuple(Type):
errors_builder.add_errors({idx: ve.messages})
errors_builder.raise_errors()
- return super(Tuple, self).load(result, *args, **kwargs)
+ return tuple(super(Tuple, self).load(result, *args, **kwargs))
def dump(self, value, *args, **kwargs):
if value is MISSING or value is None:
self._fail('required')
- if not is_list(value):
+ if not is_sequence(value):
self._fail('invalid')
if len(value) != len(self.item_types):
@@ -560,7 +563,7 @@ class OneOf(Type):
if data is MISSING or data is None:
self._fail('required')
- if is_dict(self.types) and self.load_hint:
+ if is_mapping(self.types) and self.load_hint:
type_id = self.load_hint(data)
if type_id not in self.types:
self._fail('unknown_type_id', type_id=type_id)
@@ -569,7 +572,8 @@ class OneOf(Type):
result = item_type.load(data, *args, **kwargs)
return super(OneOf, self).load(result, *args, **kwargs)
else:
- for item_type in (self.types.values() if is_dict(self.types) else self.types):
+ for item_type in (self.types.values()
+ if is_mapping(self.types) else self.types):
try:
result = item_type.load(data, *args, **kwargs)
return super(OneOf, self).load(result, *args, **kwargs)
@@ -582,7 +586,7 @@ class OneOf(Type):
if data is MISSING or data is None:
self._fail('required')
- if is_dict(self.types) and self.dump_hint:
+ if is_mapping(self.types) and self.dump_hint:
type_id = self.dump_hint(data)
if type_id not in self.types:
self._fail('unknown_type_id', type_id=type_id)
@@ -591,7 +595,8 @@ class OneOf(Type):
result = item_type.dump(data, *args, **kwargs)
return super(OneOf, self).dump(result, *args, **kwargs)
else:
- for item_type in (self.types.values() if is_dict(self.types) else self.types):
+ for item_type in (self.types.values()
+ if is_mapping(self.types) else self.types):
try:
result = item_type.dump(data, *args, **kwargs)
return super(OneOf, self).dump(result, *args, **kwargs)
@@ -668,7 +673,7 @@ class Dict(Type):
if data is MISSING or data is None:
self._fail('required')
- if not is_dict(data):
+ if not is_mapping(data):
self._fail('invalid')
errors_builder = ValidationErrorBuilder()
@@ -695,7 +700,7 @@ class Dict(Type):
if value is MISSING or value is None:
self._fail('required')
- if not is_dict(value):
+ if not is_mapping(value):
self._fail('invalid')
errors_builder = ValidationErrorBuilder()
@@ -1091,10 +1096,10 @@ class Object(Type):
if isinstance(bases_or_fields, Type):
bases = [bases_or_fields]
- if is_list(bases_or_fields) and \
+ if is_sequence(bases_or_fields) and \
all([isinstance(base, Type) for base in bases_or_fields]):
bases = bases_or_fields
- elif is_list(bases_or_fields) or is_dict(bases_or_fields):
+ elif is_sequence(bases_or_fields) or is_mapping(bases_or_fields):
if fields is None:
bases = []
fields = bases_or_fields
@@ -1108,9 +1113,9 @@ class Object(Type):
self._allow_extra_fields = allow_extra_fields
self._immutable = immutable
self._ordered = ordered
- if only is not None and not is_list(only):
+ if only is not None and not is_sequence(only):
only = [only]
- if exclude is not None and not is_list(exclude):
+ if exclude is not None and not is_sequence(exclude):
exclude = [exclude]
self._only = only
self._exclude = exclude
@@ -1155,7 +1160,8 @@ class Object(Type):
if fields is not None:
all_fields += [
(name, self._normalize_field(field))
- for name, field in (iteritems(fields) if is_dict(fields) else fields)
+ for name, field in (iteritems(fields)
+ if is_mapping(fields) else fields)
]
return OrderedDict(all_fields)
@@ -1164,7 +1170,7 @@ class Object(Type):
if data is MISSING or data is None:
self._fail('required')
- if not is_dict(data):
+ if not is_mapping(data):
self._fail('invalid')
errors_builder = ValidationErrorBuilder()
@@ -1213,7 +1219,7 @@ class Object(Type):
if data is None:
self._fail('required')
- if not is_dict(data):
+ if not is_mapping(data):
self._fail('invalid')
errors_builder = ValidationErrorBuilder()
@@ -1528,7 +1534,7 @@ def validated_type(base_type, name=None, validate=None):
"""
if validate is None:
validate = []
- if not is_list(validate):
+ if not is_sequence(validate):
validate = [validate]
class ValidatedSubtype(base_type):
diff --git a/lollipop/utils.py b/lollipop/utils.py
index 596706c..fa0bb4b 100644
--- a/lollipop/utils.py
+++ b/lollipop/utils.py
@@ -1,6 +1,7 @@
import inspect
import re
from lollipop.compat import DictMixin, iterkeys
+import collections
def identity(value):
@@ -14,14 +15,18 @@ def constant(value):
return func
-def is_list(value):
+def is_sequence(value):
"""Returns True if value supports list interface; False - otherwise"""
- return isinstance(value, list)
+ return isinstance(value, collections.Sequence)
-
-def is_dict(value):
+def is_mapping(value):
"""Returns True if value supports dict interface; False - otherwise"""
- return isinstance(value, dict)
+ return isinstance(value, collections.Mapping)
+
+
+# Backward compatibility
+is_list = is_sequence
+is_dict = is_mapping
def make_context_aware(func, numargs):
diff --git a/lollipop/validators.py b/lollipop/validators.py
index 8652d96..c29b1da 100644
--- a/lollipop/validators.py
+++ b/lollipop/validators.py
@@ -1,7 +1,7 @@
from lollipop.errors import ValidationError, ValidationErrorBuilder, \
ErrorMessagesMixin
from lollipop.compat import string_types, iteritems
-from lollipop.utils import make_context_aware, is_list, identity
+from lollipop.utils import make_context_aware, is_sequence, identity
import re
@@ -291,7 +291,7 @@ class Unique(Validator):
self._error_messages['unique'] = error
def __call__(self, value, context=None):
- if not is_list(value):
+ if not is_sequence(value):
self._fail('invalid')
seen = set()
@@ -318,12 +318,12 @@ class Each(Validator):
def __init__(self, validators, **kwargs):
super(Validator, self).__init__(**kwargs)
- if not is_list(validators):
+ if not is_sequence(validators):
validators = [validators]
self.validators = validators
def __call__(self, value, context=None):
- if not is_list(value):
+ if not is_sequence(value):
self._fail('invalid', data=value)
error_builder = ValidationErrorBuilder()
|
maximkulkin/lollipop
|
f7c50ac54610b7965d41f28d0ee5ee5e24dea0ee
|
diff --git a/tests/test_types.py b/tests/test_types.py
index 2ab07e1..e652fb6 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -509,7 +509,7 @@ class TestList(NameDescriptionTestsMixin, RequiredTestsMixin, ValidationTestsMix
def test_loading_non_list_value_raises_ValidationError(self):
with pytest.raises(ValidationError) as exc_info:
- List(String()).load('1, 2, 3')
+ List(String()).load(123)
assert exc_info.value.messages == List.default_error_messages['invalid']
def test_loading_list_value_with_items_of_incorrect_type_raises_ValidationError(self):
@@ -542,9 +542,13 @@ class TestList(NameDescriptionTestsMixin, RequiredTestsMixin, ValidationTestsMix
def test_dumping_list_value(self):
assert List(String()).dump(['foo', 'bar', 'baz']) == ['foo', 'bar', 'baz']
+ def test_dumping_sequence_value(self):
+ assert List(String()).dump(('foo', 'bar', 'baz')) == ['foo', 'bar', 'baz']
+ assert List(String()).dump('foobar') == ['f', 'o', 'o', 'b', 'a', 'r']
+
def test_dumping_non_list_value_raises_ValidationError(self):
with pytest.raises(ValidationError) as exc_info:
- List(String()).dump('1, 2, 3')
+ List(String()).dump(123)
assert exc_info.value.messages == List.default_error_messages['invalid']
def test_dumping_list_value_with_items_of_incorrect_type_raises_ValidationError(self):
@@ -563,15 +567,15 @@ class TestList(NameDescriptionTestsMixin, RequiredTestsMixin, ValidationTestsMix
class TestTuple(NameDescriptionTestsMixin, RequiredTestsMixin, ValidationTestsMixin):
tested_type = partial(Tuple, [Integer(), Integer()])
valid_data = [123, 456]
- valid_value = [123, 456]
+ valid_value = (123, 456)
def test_loading_tuple_with_values_of_same_type(self):
assert Tuple([Integer(), Integer()]).load([123, 456]) == \
- [123, 456]
+ (123, 456)
def test_loading_tuple_with_values_of_different_type(self):
assert Tuple([String(), Integer(), Boolean()]).load(['foo', 123, False]) == \
- ['foo', 123, False]
+ ('foo', 123, False)
def test_loading_non_tuple_value_raises_ValidationError(self):
with pytest.raises(ValidationError) as exc_info:
@@ -596,23 +600,35 @@ class TestTuple(NameDescriptionTestsMixin, RequiredTestsMixin, ValidationTestsMi
assert inner_type.load_context == context
def test_dump_tuple(self):
- assert Tuple([Integer(), Integer()]).dump([123, 456]) == [123, 456]
+ assert Tuple([String(), Integer()]).dump(('hello', 123)) == ['hello', 123]
+
+ def test_dump_sequence(self):
+ assert Tuple([String(), Integer()]).dump(['hello', 123]) == ['hello', 123]
def test_dumping_non_tuple_raises_ValidationError(self):
with pytest.raises(ValidationError) as exc_info:
- Tuple(String()).dump('foo')
+ Tuple([String()]).dump(123)
assert exc_info.value.messages == Tuple.default_error_messages['invalid']
+ def test_dumping_sequence_of_incorrect_length_raises_ValidationError(self):
+ with pytest.raises(ValidationError) as exc_info:
+ Tuple([String(), Integer()]).dump(['hello', 123, 456])
+ assert exc_info.value.messages == \
+ Tuple.default_error_messages['invalid_length'].format(
+ expected_length=2,
+ actual_length=3,
+ )
+
def test_dumping_tuple_with_items_of_incorrect_type_raises_ValidationError(self):
with pytest.raises(ValidationError) as exc_info:
- Tuple([String(), String()]).dump([123, 456])
+ Tuple([String(), String()]).dump(('hello', 456))
message = String.default_error_messages['invalid']
- assert exc_info.value.messages == {0: message, 1: message}
+ assert exc_info.value.messages == {1: message}
def test_dumping_tuple_passes_context_to_inner_type_dump(self):
inner_type = SpyType()
context = object()
- Tuple([inner_type, inner_type]).dump(['foo','foo'], context)
+ Tuple([inner_type, inner_type]).dump(('foo','foo'), context)
assert inner_type.dump_context == context
diff --git a/tests/test_validators.py b/tests/test_validators.py
index fa84610..5d6421a 100644
--- a/tests/test_validators.py
+++ b/tests/test_validators.py
@@ -318,7 +318,7 @@ class TestRegexp:
class TestUnique:
def test_raising_ValidationError_if_value_is_not_collection(self):
with raises(ValidationError) as exc_info:
- Unique()('foo')
+ Unique()(123)
assert exc_info.value.messages == Unique.default_error_messages['invalid']
def test_matching_empty_collection(self):
@@ -371,7 +371,7 @@ is_small = Predicate(lambda x: x <= 5, 'Value should be small')
class TestEach:
def test_raising_ValidationError_if_value_is_not_collection(self):
with raises(ValidationError) as exc_info:
- Each(lambda x: x)('foo')
+ Each(lambda x: x)(123)
assert exc_info.value.messages == Each.default_error_messages['invalid']
def test_matching_empty_collections(self):
|
Misleading Tuple type
Is it on purpose that the Tuple type dumps into / loads from a list instead of a tuple? If yes, I think the naming is a bit misleading…
```
Tuple([String(), Integer(), Boolean()]).load(('foo', 123, False))
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "site-packages\lollipop\types.py", line 443, in load self._fail('invalid')
File "site-packages\lollipop\errors.py", line 63, in _fail raise ValidationError(msg)
lollipop.errors.ValidationError: Invalid data: 'Value should be list'
```
|
0.0
|
f7c50ac54610b7965d41f28d0ee5ee5e24dea0ee
|
[
"tests/test_types.py::TestList::test_dumping_sequence_value",
"tests/test_types.py::TestTuple::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestTuple::test_loading_tuple_with_values_of_same_type",
"tests/test_types.py::TestTuple::test_loading_tuple_with_values_of_different_type",
"tests/test_types.py::TestTuple::test_dump_tuple",
"tests/test_types.py::TestTuple::test_dumping_tuple_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestTuple::test_dumping_tuple_passes_context_to_inner_type_dump"
] |
[
"tests/test_types.py::TestString::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestString::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestString::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestString::test_loading_passes_context_to_validator",
"tests/test_types.py::TestString::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestString::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestString::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestString::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestString::test_loading_None_raises_required_error",
"tests/test_types.py::TestString::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestString::test_dumping_None_raises_required_error",
"tests/test_types.py::TestString::test_name",
"tests/test_types.py::TestString::test_description",
"tests/test_types.py::TestString::test_loading_string_value",
"tests/test_types.py::TestString::test_loading_non_string_value_raises_ValidationError",
"tests/test_types.py::TestString::test_dumping_string_value",
"tests/test_types.py::TestString::test_dumping_non_string_value_raises_ValidationError",
"tests/test_types.py::TestNumber::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestNumber::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestNumber::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestNumber::test_loading_passes_context_to_validator",
"tests/test_types.py::TestNumber::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestNumber::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestNumber::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestNumber::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestNumber::test_loading_None_raises_required_error",
"tests/test_types.py::TestNumber::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestNumber::test_dumping_None_raises_required_error",
"tests/test_types.py::TestNumber::test_name",
"tests/test_types.py::TestNumber::test_description",
"tests/test_types.py::TestNumber::test_loading_float_value",
"tests/test_types.py::TestNumber::test_loading_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestNumber::test_dumping_float_value",
"tests/test_types.py::TestNumber::test_dumping_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestInteger::test_loading_integer_value",
"tests/test_types.py::TestInteger::test_loading_long_value",
"tests/test_types.py::TestInteger::test_loading_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestInteger::test_dumping_integer_value",
"tests/test_types.py::TestInteger::test_dumping_long_value",
"tests/test_types.py::TestInteger::test_dumping_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestFloat::test_loading_float_value",
"tests/test_types.py::TestFloat::test_loading_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestFloat::test_dumping_float_value",
"tests/test_types.py::TestFloat::test_dumping_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestBoolean::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestBoolean::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestBoolean::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestBoolean::test_loading_passes_context_to_validator",
"tests/test_types.py::TestBoolean::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestBoolean::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestBoolean::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestBoolean::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestBoolean::test_loading_None_raises_required_error",
"tests/test_types.py::TestBoolean::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestBoolean::test_dumping_None_raises_required_error",
"tests/test_types.py::TestBoolean::test_name",
"tests/test_types.py::TestBoolean::test_description",
"tests/test_types.py::TestBoolean::test_loading_boolean_value",
"tests/test_types.py::TestBoolean::test_loading_non_boolean_value_raises_ValidationError",
"tests/test_types.py::TestBoolean::test_dumping_boolean_value",
"tests/test_types.py::TestBoolean::test_dumping_non_boolean_value_raises_ValidationError",
"tests/test_types.py::TestDateTime::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestDateTime::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestDateTime::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestDateTime::test_loading_passes_context_to_validator",
"tests/test_types.py::TestDateTime::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestDateTime::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestDateTime::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestDateTime::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestDateTime::test_loading_None_raises_required_error",
"tests/test_types.py::TestDateTime::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestDateTime::test_dumping_None_raises_required_error",
"tests/test_types.py::TestDateTime::test_name",
"tests/test_types.py::TestDateTime::test_description",
"tests/test_types.py::TestDateTime::test_loading_string_date",
"tests/test_types.py::TestDateTime::test_loading_using_predefined_format",
"tests/test_types.py::TestDateTime::test_loading_using_custom_format",
"tests/test_types.py::TestDateTime::test_loading_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestDateTime::test_customizing_error_message_if_value_is_not_string",
"tests/test_types.py::TestDateTime::test_loading_raises_ValidationError_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestDateTime::test_customizing_error_message_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestDateTime::test_loading_passes_deserialized_date_to_validator",
"tests/test_types.py::TestDateTime::test_dumping_date",
"tests/test_types.py::TestDateTime::test_dumping_using_predefined_format",
"tests/test_types.py::TestDateTime::test_dumping_using_custom_format",
"tests/test_types.py::TestDateTime::test_dumping_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestDate::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestDate::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestDate::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestDate::test_loading_passes_context_to_validator",
"tests/test_types.py::TestDate::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestDate::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestDate::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestDate::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestDate::test_loading_None_raises_required_error",
"tests/test_types.py::TestDate::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestDate::test_dumping_None_raises_required_error",
"tests/test_types.py::TestDate::test_name",
"tests/test_types.py::TestDate::test_description",
"tests/test_types.py::TestDate::test_loading_string_date",
"tests/test_types.py::TestDate::test_loading_using_predefined_format",
"tests/test_types.py::TestDate::test_loading_using_custom_format",
"tests/test_types.py::TestDate::test_loading_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestDate::test_customizing_error_message_if_value_is_not_string",
"tests/test_types.py::TestDate::test_loading_raises_ValidationError_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestDate::test_customizing_error_message_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestDate::test_loading_passes_deserialized_date_to_validator",
"tests/test_types.py::TestDate::test_dumping_date",
"tests/test_types.py::TestDate::test_dumping_using_predefined_format",
"tests/test_types.py::TestDate::test_dumping_using_custom_format",
"tests/test_types.py::TestDate::test_dumping_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestTime::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestTime::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestTime::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestTime::test_loading_passes_context_to_validator",
"tests/test_types.py::TestTime::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestTime::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestTime::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestTime::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestTime::test_loading_None_raises_required_error",
"tests/test_types.py::TestTime::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestTime::test_dumping_None_raises_required_error",
"tests/test_types.py::TestTime::test_name",
"tests/test_types.py::TestTime::test_description",
"tests/test_types.py::TestTime::test_loading_string_date",
"tests/test_types.py::TestTime::test_loading_using_custom_format",
"tests/test_types.py::TestTime::test_loading_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestTime::test_customizing_error_message_if_value_is_not_string",
"tests/test_types.py::TestTime::test_loading_raises_ValidationError_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestTime::test_customizing_error_message_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestTime::test_loading_passes_deserialized_date_to_validator",
"tests/test_types.py::TestTime::test_dumping_date",
"tests/test_types.py::TestTime::test_dumping_using_custom_format",
"tests/test_types.py::TestTime::test_dumping_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestList::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestList::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestList::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestList::test_loading_passes_context_to_validator",
"tests/test_types.py::TestList::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestList::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestList::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestList::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestList::test_loading_None_raises_required_error",
"tests/test_types.py::TestList::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestList::test_dumping_None_raises_required_error",
"tests/test_types.py::TestList::test_name",
"tests/test_types.py::TestList::test_description",
"tests/test_types.py::TestList::test_loading_list_value",
"tests/test_types.py::TestList::test_loading_non_list_value_raises_ValidationError",
"tests/test_types.py::TestList::test_loading_list_value_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestList::test_loading_list_value_with_items_that_have_validation_errors_raises_ValidationError",
"tests/test_types.py::TestList::test_loading_does_not_validate_whole_list_if_items_have_errors",
"tests/test_types.py::TestList::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestList::test_dumping_list_value",
"tests/test_types.py::TestList::test_dumping_non_list_value_raises_ValidationError",
"tests/test_types.py::TestList::test_dumping_list_value_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestList::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestTuple::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestTuple::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestTuple::test_loading_passes_context_to_validator",
"tests/test_types.py::TestTuple::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestTuple::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestTuple::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestTuple::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestTuple::test_loading_None_raises_required_error",
"tests/test_types.py::TestTuple::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestTuple::test_dumping_None_raises_required_error",
"tests/test_types.py::TestTuple::test_name",
"tests/test_types.py::TestTuple::test_description",
"tests/test_types.py::TestTuple::test_loading_non_tuple_value_raises_ValidationError",
"tests/test_types.py::TestTuple::test_loading_tuple_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestTuple::test_loading_tuple_with_items_that_have_validation_errors_raises_ValidationErrors",
"tests/test_types.py::TestTuple::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestTuple::test_dump_sequence",
"tests/test_types.py::TestTuple::test_dumping_non_tuple_raises_ValidationError",
"tests/test_types.py::TestTuple::test_dumping_sequence_of_incorrect_length_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestDict::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestDict::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestDict::test_loading_passes_context_to_validator",
"tests/test_types.py::TestDict::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestDict::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestDict::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestDict::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestDict::test_loading_None_raises_required_error",
"tests/test_types.py::TestDict::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestDict::test_dumping_None_raises_required_error",
"tests/test_types.py::TestDict::test_name",
"tests/test_types.py::TestDict::test_description",
"tests/test_types.py::TestDict::test_loading_dict_with_custom_key_type",
"tests/test_types.py::TestDict::test_loading_accepts_any_key_if_key_type_is_not_specified",
"tests/test_types.py::TestDict::test_loading_dict_with_values_of_the_same_type",
"tests/test_types.py::TestDict::test_loading_dict_with_values_of_different_types",
"tests/test_types.py::TestDict::test_loading_accepts_any_value_if_value_types_are_not_specified",
"tests/test_types.py::TestDict::test_loading_non_dict_value_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_dict_with_incorrect_key_value_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_dict_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_dict_with_items_that_have_validation_errors_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_does_not_validate_whole_list_if_items_have_errors",
"tests/test_types.py::TestDict::test_loading_dict_with_incorrect_key_value_and_incorrect_value_raises_ValidationError_with_both_errors",
"tests/test_types.py::TestDict::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestDict::test_dumping_dict_with_custom_key_type",
"tests/test_types.py::TestDict::test_dumping_accepts_any_key_if_key_type_is_not_specified",
"tests/test_types.py::TestDict::test_dumping_dict_with_values_of_the_same_type",
"tests/test_types.py::TestDict::test_dumping_dict_with_values_of_different_types",
"tests/test_types.py::TestDict::test_dumping_accepts_any_value_if_value_types_are_not_specified",
"tests/test_types.py::TestDict::test_dumping_non_dict_value_raises_ValidationError",
"tests/test_types.py::TestDict::test_dumping_dict_with_incorrect_key_value_raises_ValidationError",
"tests/test_types.py::TestDict::test_dumping_dict_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestDict::test_dumping_dict_with_incorrect_key_value_and_incorrect_value_raises_ValidationError_with_both_errors",
"tests/test_types.py::TestDict::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestOneOf::test_loading_values_of_one_of_listed_types",
"tests/test_types.py::TestOneOf::test_loading_raises_ValidationError_if_value_is_of_unlisted_type",
"tests/test_types.py::TestOneOf::test_loading_raises_ValidationError_if_deserialized_value_has_errors",
"tests/test_types.py::TestOneOf::test_loading_raises_ValidationError_if_type_hint_is_unknown",
"tests/test_types.py::TestOneOf::test_loading_with_type_hinting",
"tests/test_types.py::TestOneOf::test_loading_with_type_hinting_raises_ValidationError_if_deserialized_value_has_errors",
"tests/test_types.py::TestOneOf::test_dumping_values_of_one_of_listed_types",
"tests/test_types.py::TestOneOf::test_dumping_raises_ValidationError_if_value_is_of_unlisted_type",
"tests/test_types.py::TestOneOf::test_dumping_raises_ValidationError_if_type_hint_is_unknown",
"tests/test_types.py::TestOneOf::test_dumping_raises_ValidationError_if_serialized_value_has_errors",
"tests/test_types.py::TestOneOf::test_dumping_with_type_hinting",
"tests/test_types.py::TestOneOf::test_dumping_with_type_hinting_raises_ValidationError_if_deserialized_value_has_errors",
"tests/test_types.py::TestAttributeField::test_getting_value_returns_value_of_given_object_attribute",
"tests/test_types.py::TestAttributeField::test_getting_value_returns_value_of_configured_object_attribute",
"tests/test_types.py::TestAttributeField::test_getting_value_returns_value_of_field_name_transformed_with_given_name_transformation",
"tests/test_types.py::TestAttributeField::test_setting_value_sets_given_value_to_given_object_attribute",
"tests/test_types.py::TestAttributeField::test_setting_value_sets_given_value_to_configured_object_attribute",
"tests/test_types.py::TestAttributeField::test_setting_value_sets_given_value_to_field_name_transformed_with_given_name_transformation",
"tests/test_types.py::TestAttributeField::test_loading_value_with_field_type",
"tests/test_types.py::TestAttributeField::test_loading_given_attribute_regardless_of_attribute_override",
"tests/test_types.py::TestAttributeField::test_loading_missing_value_if_attribute_does_not_exist",
"tests/test_types.py::TestAttributeField::test_loading_passes_context_to_field_type_load",
"tests/test_types.py::TestAttributeField::test_dumping_given_attribute_from_object",
"tests/test_types.py::TestAttributeField::test_dumping_object_attribute_with_field_type",
"tests/test_types.py::TestAttributeField::test_dumping_a_different_attribute_from_object",
"tests/test_types.py::TestAttributeField::test_dumping_passes_context_to_field_type_dump",
"tests/test_types.py::TestMethodField::test_get_value_returns_result_of_calling_configured_method_on_object",
"tests/test_types.py::TestMethodField::test_get_value_returns_result_of_calling_method_calculated_by_given_function_on_object",
"tests/test_types.py::TestMethodField::test_get_value_returns_MISSING_if_get_method_is_not_specified",
"tests/test_types.py::TestMethodField::test_get_value_raises_ValueError_if_method_does_not_exist",
"tests/test_types.py::TestMethodField::test_get_value_raises_ValueError_if_property_is_not_callable",
"tests/test_types.py::TestMethodField::test_get_value_passes_context_to_method",
"tests/test_types.py::TestMethodField::test_set_value_calls_configure_method_on_object",
"tests/test_types.py::TestMethodField::test_set_value_calls_method_calculated_by_given_function_on_object",
"tests/test_types.py::TestMethodField::test_set_value_does_not_do_anything_if_set_method_is_not_specified",
"tests/test_types.py::TestMethodField::test_set_value_raises_ValueError_if_method_does_not_exist",
"tests/test_types.py::TestMethodField::test_set_value_raises_ValueError_if_property_is_not_callable",
"tests/test_types.py::TestMethodField::test_set_value_passes_context_to_method",
"tests/test_types.py::TestMethodField::test_loading_value_with_field_type",
"tests/test_types.py::TestMethodField::test_loading_value_returns_loaded_value",
"tests/test_types.py::TestMethodField::test_loading_value_passes_context_to_field_types_load",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_calls_field_types_load_into",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_calls_field_types_load_if_load_into_is_not_available",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_calls_field_types_load_if_old_value_is_None",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_calls_field_types_load_if_old_value_is_MISSING",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_passes_context_to_field_types_load_into",
"tests/test_types.py::TestMethodField::test_dumping_result_of_given_objects_method",
"tests/test_types.py::TestMethodField::test_dumping_result_of_objects_method_with_field_type",
"tests/test_types.py::TestMethodField::test_dumping_result_of_a_different_objects_method",
"tests/test_types.py::TestMethodField::test_dumping_raises_ValueError_if_given_method_does_not_exist",
"tests/test_types.py::TestMethodField::test_dumping_raises_ValueError_if_given_method_is_not_callable",
"tests/test_types.py::TestMethodField::test_dumping_passes_context_to_field_type_dump",
"tests/test_types.py::TestFunctionField::test_get_value_returns_result_of_calling_configured_function_with_object",
"tests/test_types.py::TestFunctionField::test_get_value_returns_MISSING_if_get_func_is_not_specified",
"tests/test_types.py::TestFunctionField::test_get_value_raises_ValueError_if_property_is_not_callable",
"tests/test_types.py::TestFunctionField::test_get_value_passes_context_to_func",
"tests/test_types.py::TestFunctionField::test_set_value_calls_configure_method_on_object",
"tests/test_types.py::TestFunctionField::test_set_value_does_not_do_anything_if_set_func_is_not_specified",
"tests/test_types.py::TestFunctionField::test_set_value_raises_ValueError_if_property_is_not_callable",
"tests/test_types.py::TestFunctionField::test_set_value_passes_context_to_func",
"tests/test_types.py::TestFunctionField::test_loading_value_with_field_type",
"tests/test_types.py::TestFunctionField::test_loading_value_returns_loaded_value",
"tests/test_types.py::TestFunctionField::test_loading_value_passes_context_to_field_types_load",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_calls_field_types_load_into",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_calls_field_types_load_if_load_into_is_not_available",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_calls_field_types_load_if_old_value_is_None",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_calls_field_types_load_if_old_value_is_MISSING",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_passes_context_to_field_types_load_into",
"tests/test_types.py::TestFunctionField::test_dumping_result_of_given_function",
"tests/test_types.py::TestFunctionField::test_dumping_result_of_objects_method_with_field_type",
"tests/test_types.py::TestFunctionField::test_dumping_raises_ValueError_if_given_get_func_is_not_callable",
"tests/test_types.py::TestFunctionField::test_dumping_passes_context_to_field_type_dump",
"tests/test_types.py::TestConstant::test_name",
"tests/test_types.py::TestConstant::test_description",
"tests/test_types.py::TestConstant::test_loading_always_returns_missing",
"tests/test_types.py::TestConstant::test_loading_raises_ValidationError_if_loaded_value_is_not_a_constant_value_specified",
"tests/test_types.py::TestConstant::test_loading_value_with_inner_type_before_checking_value_correctness",
"tests/test_types.py::TestConstant::test_customizing_error_message_when_value_is_incorrect",
"tests/test_types.py::TestConstant::test_dumping_always_returns_given_value",
"tests/test_types.py::TestConstant::test_dumping_given_constant_with_field_type",
"tests/test_types.py::TestObject::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestObject::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestObject::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestObject::test_loading_passes_context_to_validator",
"tests/test_types.py::TestObject::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestObject::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestObject::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestObject::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestObject::test_loading_None_raises_required_error",
"tests/test_types.py::TestObject::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestObject::test_dumping_None_raises_required_error",
"tests/test_types.py::TestObject::test_name",
"tests/test_types.py::TestObject::test_description",
"tests/test_types.py::TestObject::test_default_field_type_is_unset_by_default",
"tests/test_types.py::TestObject::test_inheriting_default_field_type_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_constructor_is_unset_by_default",
"tests/test_types.py::TestObject::test_inheriting_constructor_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_allow_extra_fields_is_unset_by_default",
"tests/test_types.py::TestObject::test_inheriting_allow_extra_fields_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_immutable_is_unset_by_default",
"tests/test_types.py::TestObject::test_inheriting_immutable_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_ordered_is_unset_by_default",
"tests/test_types.py::TestObject::test_iheriting_ordered_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_loading_dict_value",
"tests/test_types.py::TestObject::test_loading_non_dict_values_raises_ValidationError",
"tests/test_types.py::TestObject::test_loading_bypasses_values_for_which_field_type_returns_missing_value",
"tests/test_types.py::TestObject::test_loading_dict_with_field_errors_raises_ValidationError_with_all_field_errors_merged",
"tests/test_types.py::TestObject::test_loading_dict_with_field_errors_does_not_run_whole_object_validators",
"tests/test_types.py::TestObject::test_loading_calls_field_load_passing_field_name_and_whole_data",
"tests/test_types.py::TestObject::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestObject::test_constructing_objects_with_default_constructor_on_load",
"tests/test_types.py::TestObject::test_constructing_custom_objects_on_load",
"tests/test_types.py::TestObject::test_load_ignores_extra_fields_by_default",
"tests/test_types.py::TestObject::test_load_raises_ValidationError_if_reporting_extra_fields",
"tests/test_types.py::TestObject::test_loading_inherited_fields",
"tests/test_types.py::TestObject::test_loading_multiple_inherited_fields",
"tests/test_types.py::TestObject::test_loading_raises_ValidationError_if_inherited_fields_have_errors",
"tests/test_types.py::TestObject::test_loading_only_specified_fields",
"tests/test_types.py::TestObject::test_loading_only_specified_fields_does_not_affect_own_fields",
"tests/test_types.py::TestObject::test_loading_all_but_specified_base_class_fields",
"tests/test_types.py::TestObject::test_loading_all_but_specified_fields_does_not_affect_own_fields",
"tests/test_types.py::TestObject::test_loading_values_into_existing_object",
"tests/test_types.py::TestObject::test_loading_values_into_existing_object_returns_that_object",
"tests/test_types.py::TestObject::test_loading_values_into_existing_object_passes_all_object_attributes_to_validators",
"tests/test_types.py::TestObject::test_loading_values_into_immutable_object_creates_a_copy",
"tests/test_types.py::TestObject::test_loading_values_into_immutable_object_does_not_modify_original_object",
"tests/test_types.py::TestObject::test_loading_values_into_nested_object_of_immutable_object_creates_copy_of_it_regardless_of_nested_objects_immutable_flag",
"tests/test_types.py::TestObject::test_loading_values_into_nested_object_of_immutable_object_does_not_modify_original_objects",
"tests/test_types.py::TestObject::test_loading_values_into_nested_objects_with_inplace_False_does_not_modify_original_objects",
"tests/test_types.py::TestObject::test_loading_values_into_existing_objects_ignores_missing_fields",
"tests/test_types.py::TestObject::test_loading_MISSING_into_existing_object_does_not_do_anything",
"tests/test_types.py::TestObject::test_loading_None_into_existing_objects_raises_ValidationError",
"tests/test_types.py::TestObject::test_loading_None_into_field_of_existing_object_passes_None_to_field",
"tests/test_types.py::TestObject::test_loading_values_into_existing_objects_raises_ValidationError_if_data_contains_errors",
"tests/test_types.py::TestObject::test_loading_values_into_existing_objects_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestObject::test_loading_values_into_existing_objects_annotates_field_errors_with_field_names",
"tests/test_types.py::TestObject::test_loading_values_into_existing_nested_objects",
"tests/test_types.py::TestObject::test_loading_values_into_existing_object_when_nested_object_does_not_exist",
"tests/test_types.py::TestObject::test_validating_data_for_existing_objects_returns_None_if_data_is_valid",
"tests/test_types.py::TestObject::test_validating_data_for_existing_objects_returns_errors_if_data_contains_errors",
"tests/test_types.py::TestObject::test_validating_data_for_existing_objects_returns_errors_if_validator_fails",
"tests/test_types.py::TestObject::test_validating_data_for_existing_objects_does_not_modify_original_objects",
"tests/test_types.py::TestObject::test_dumping_object_attributes",
"tests/test_types.py::TestObject::test_dumping_calls_field_dump_passing_field_name_and_whole_object",
"tests/test_types.py::TestObject::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestObject::test_dumping_inherited_fields",
"tests/test_types.py::TestObject::test_dumping_multiple_inherited_fields",
"tests/test_types.py::TestObject::test_dumping_only_specified_fields_of_base_classes",
"tests/test_types.py::TestObject::test_dumping_only_specified_fields_does_not_affect_own_fields",
"tests/test_types.py::TestObject::test_dumping_all_but_specified_base_class_fields",
"tests/test_types.py::TestObject::test_dumping_all_but_specified_fields_does_not_affect_own_fields",
"tests/test_types.py::TestObject::test_shortcut_for_specifying_constant_fields",
"tests/test_types.py::TestObject::test_dumping_fields_in_declared_order_if_ordered_is_True",
"tests/test_types.py::TestOptional::test_loading_value_calls_load_of_inner_type",
"tests/test_types.py::TestOptional::test_loading_missing_value_returns_None",
"tests/test_types.py::TestOptional::test_loading_None_returns_None",
"tests/test_types.py::TestOptional::test_loading_missing_value_does_not_call_inner_type_load",
"tests/test_types.py::TestOptional::test_loading_None_does_not_call_inner_type_load",
"tests/test_types.py::TestOptional::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestOptional::test_overriding_missing_value_on_load",
"tests/test_types.py::TestOptional::test_overriding_None_value_on_load",
"tests/test_types.py::TestOptional::test_using_function_to_override_value_on_load",
"tests/test_types.py::TestOptional::test_loading_passes_context_to_override_function",
"tests/test_types.py::TestOptional::test_dumping_value_calls_dump_of_inner_type",
"tests/test_types.py::TestOptional::test_dumping_missing_value_returns_None",
"tests/test_types.py::TestOptional::test_dumping_None_returns_None",
"tests/test_types.py::TestOptional::test_dumping_missing_value_does_not_call_inner_type_dump",
"tests/test_types.py::TestOptional::test_dumping_None_does_not_call_inner_type_dump",
"tests/test_types.py::TestOptional::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestOptional::test_overriding_missing_value_on_dump",
"tests/test_types.py::TestOptional::test_overriding_None_value_on_dump",
"tests/test_types.py::TestOptional::test_using_function_to_override_value_on_dump",
"tests/test_types.py::TestOptional::test_dumping_passes_context_to_override_function",
"tests/test_types.py::TestLoadOnly::test_name",
"tests/test_types.py::TestLoadOnly::test_description",
"tests/test_types.py::TestLoadOnly::test_loading_returns_inner_type_load_result",
"tests/test_types.py::TestLoadOnly::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestLoadOnly::test_dumping_always_returns_missing",
"tests/test_types.py::TestLoadOnly::test_dumping_does_not_call_inner_type_dump",
"tests/test_types.py::TestDumpOnly::test_name",
"tests/test_types.py::TestDumpOnly::test_description",
"tests/test_types.py::TestDumpOnly::test_loading_always_returns_missing",
"tests/test_types.py::TestDumpOnly::test_loading_does_not_call_inner_type_dump",
"tests/test_types.py::TestDumpOnly::test_dumping_returns_inner_type_dump_result",
"tests/test_types.py::TestDumpOnly::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestTransform::test_name",
"tests/test_types.py::TestTransform::test_description",
"tests/test_types.py::TestTransform::test_loading_calls_pre_load_with_original_value",
"tests/test_types.py::TestTransform::test_loading_calls_inner_type_load_with_result_of_pre_load",
"tests/test_types.py::TestTransform::test_loading_calls_post_load_with_result_of_inner_type_load",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_inner_type_load",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_pre_load",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_post_load",
"tests/test_types.py::TestTransform::test_dumping_calls_pre_dump_with_original_value",
"tests/test_types.py::TestTransform::test_dumping_calls_inner_type_dump_with_result_of_pre_dump",
"tests/test_types.py::TestTransform::test_dumping_calls_post_dump_with_result_of_inner_type_dump",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_inner_type_dump",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_pre_dump",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_post_dump",
"tests/test_types.py::TestValidatedType::test_returns_subclass_of_given_type",
"tests/test_types.py::TestValidatedType::test_returns_type_that_has_single_given_validator",
"tests/test_types.py::TestValidatedType::test_accepts_context_unaware_validators",
"tests/test_types.py::TestValidatedType::test_returns_type_that_has_multiple_given_validators",
"tests/test_types.py::TestValidatedType::test_specifying_more_validators_on_type_instantiation",
"tests/test_types.py::TestValidatedType::test_new_type_accepts_same_constructor_arguments_as_base_type",
"tests/test_validators.py::TestPredicate::test_matching_values",
"tests/test_validators.py::TestPredicate::test_raising_ValidationError_if_predicate_returns_False",
"tests/test_validators.py::TestPredicate::test_customizing_validation_error",
"tests/test_validators.py::TestPredicate::test_passing_context_to_predicate",
"tests/test_validators.py::TestRange::test_matching_min_value",
"tests/test_validators.py::TestRange::test_raising_ValidationError_when_matching_min_value_and_given_value_is_less",
"tests/test_validators.py::TestRange::test_customzing_min_error_message",
"tests/test_validators.py::TestRange::test_matching_max_value",
"tests/test_validators.py::TestRange::test_raising_ValidationError_when_matching_max_value_and_given_value_is_greater",
"tests/test_validators.py::TestRange::test_customzing_max_error_message",
"tests/test_validators.py::TestRange::test_matching_range",
"tests/test_validators.py::TestRange::test_raising_ValidationError_when_matching_range_and_given_value_is_less",
"tests/test_validators.py::TestRange::test_raising_ValidationError_when_matching_range_and_given_value_is_greater",
"tests/test_validators.py::TestRange::test_customzing_range_error_message",
"tests/test_validators.py::TestRange::test_customizing_all_error_messages_at_once",
"tests/test_validators.py::TestLength::test_matching_exact_value",
"tests/test_validators.py::TestLength::test_raising_ValidationError_when_matching_exact_value_and_given_value_does_not_match",
"tests/test_validators.py::TestLength::test_customizing_exact_error_message",
"tests/test_validators.py::TestLength::test_matching_min_value",
"tests/test_validators.py::TestLength::test_raising_ValidationError_when_matching_min_value_and_given_value_is_less",
"tests/test_validators.py::TestLength::test_customzing_min_error_message",
"tests/test_validators.py::TestLength::test_matching_max_value",
"tests/test_validators.py::TestLength::test_raising_ValidationError_when_matching_max_value_and_given_value_is_greater",
"tests/test_validators.py::TestLength::test_customzing_max_error_message",
"tests/test_validators.py::TestLength::test_matching_range",
"tests/test_validators.py::TestLength::test_raising_ValidationError_when_matching_range_and_given_value_is_less",
"tests/test_validators.py::TestLength::test_raising_ValidationError_when_matching_range_and_given_value_is_greater",
"tests/test_validators.py::TestLength::test_customzing_range_error_message",
"tests/test_validators.py::TestLength::test_customizing_all_error_messages_at_once",
"tests/test_validators.py::TestNoneOf::test_matching_values_other_than_given_values",
"tests/test_validators.py::TestNoneOf::test_raising_ValidationError_when_value_is_one_of_forbidden_values",
"tests/test_validators.py::TestNoneOf::test_customizing_error_message",
"tests/test_validators.py::TestAnyOf::test_matching_given_values",
"tests/test_validators.py::TestAnyOf::test_raising_ValidationError_when_value_is_other_than_given_values",
"tests/test_validators.py::TestAnyOf::test_customizing_error_message",
"tests/test_validators.py::TestRegexp::test_matching_by_string_regexp",
"tests/test_validators.py::TestRegexp::test_matching_by_string_regexp_with_flags",
"tests/test_validators.py::TestRegexp::test_matching_by_regexp",
"tests/test_validators.py::TestRegexp::test_matching_by_regexp_ignores_flags",
"tests/test_validators.py::TestRegexp::test_raising_ValidationError_if_given_string_does_not_match_string_regexp",
"tests/test_validators.py::TestRegexp::test_raising_ValidationError_if_given_string_does_not_match_regexp",
"tests/test_validators.py::TestRegexp::test_customizing_error_message",
"tests/test_validators.py::TestUnique::test_raising_ValidationError_if_value_is_not_collection",
"tests/test_validators.py::TestUnique::test_matching_empty_collection",
"tests/test_validators.py::TestUnique::test_matching_collection_of_unique_values",
"tests/test_validators.py::TestUnique::test_matching_collection_of_values_with_unique_custom_keys",
"tests/test_validators.py::TestUnique::test_raising_ValidationError_if_item_appears_more_than_once",
"tests/test_validators.py::TestUnique::test_raising_ValidationError_if_custom_key_appears_more_than_once",
"tests/test_validators.py::TestUnique::test_customizing_error_message",
"tests/test_validators.py::TestEach::test_raising_ValidationError_if_value_is_not_collection",
"tests/test_validators.py::TestEach::test_matching_empty_collections",
"tests/test_validators.py::TestEach::test_matching_collections_each_elemenet_of_which_matches_given_validators",
"tests/test_validators.py::TestEach::test_raising_ValidationError_if_single_validator_fails",
"tests/test_validators.py::TestEach::test_raising_ValidationError_if_any_item_fails_any_validator"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-06-09 05:22:05+00:00
|
mit
| 3,835 |
|
maximkulkin__lollipop-55
|
diff --git a/lollipop/types.py b/lollipop/types.py
index 3ec8a50..acb7f3b 100644
--- a/lollipop/types.py
+++ b/lollipop/types.py
@@ -679,16 +679,22 @@ class Dict(Type):
errors_builder = ValidationErrorBuilder()
result = {}
for k, v in iteritems(data):
- value_type = self.value_types.get(k)
- if value_type is None:
- continue
try:
k = self.key_type.load(k, *args, **kwargs)
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
+ if k is MISSING:
+ continue
+
+ value_type = self.value_types.get(k)
+ if value_type is None:
+ continue
+
try:
- result[k] = value_type.load(v, *args, **kwargs)
+ value = value_type.load(v, *args, **kwargs)
+ if value is not MISSING:
+ result[k] = value
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
@@ -715,8 +721,13 @@ class Dict(Type):
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
+ if k is MISSING:
+ continue
+
try:
- result[k] = value_type.dump(v, *args, **kwargs)
+ value = value_type.dump(v, *args, **kwargs)
+ if value is not MISSING:
+ result[k] = value
except ValidationError as ve:
errors_builder.add_error(k, ve.messages)
|
maximkulkin/lollipop
|
360bbc8f9c2b6203ab5af8a3cd051f852ba8dae3
|
diff --git a/tests/test_types.py b/tests/test_types.py
index e652fb6..6489dbf 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -641,6 +641,30 @@ class TestDict(NameDescriptionTestsMixin, RequiredTestsMixin, ValidationTestsMix
assert Dict(Any(), key_type=Integer())\
.load({'123': 'foo', '456': 'bar'}) == {123: 'foo', 456: 'bar'}
+ def test_loading_dict_with_custom_key_type_and_values_of_different_types(self):
+ assert Dict({1: Integer(), 2: String()}, key_type=Integer())\
+ .load({'1': '123', '2': 'bar'}) == {1: 123, 2: 'bar'}
+
+ def test_loading_skips_key_value_if_custom_key_type_loads_to_missing(self):
+ class CustomKeyType(String):
+ def load(self, data, *args, **kwargs):
+ if data == 'foo':
+ return MISSING
+ return super(CustomKeyType, self).load(data, *args, **kwargs)
+
+ assert Dict(String(), key_type=CustomKeyType())\
+ .load({'foo': 'hello', 'bar': 'goodbye'}) == {'bar': 'goodbye'}
+
+ def test_loading_skips_key_value_if_value_type_loads_to_missing(self):
+ class CustomValueType(String):
+ def load(self, data, *args, **kwargs):
+ if data == 'foo':
+ return MISSING
+ return super(CustomValueType, self).load(data, *args, **kwargs)
+
+ assert Dict(CustomValueType())\
+ .load({'key1': 'foo', 'key2': 'bar'}) == {'key2': 'bar'}
+
def test_loading_accepts_any_key_if_key_type_is_not_specified(self):
assert Dict(Any())\
.load({'123': 'foo', 456: 'bar'}) == {'123': 'foo', 456: 'bar'}
@@ -719,7 +743,27 @@ class TestDict(NameDescriptionTestsMixin, RequiredTestsMixin, ValidationTestsMix
def test_dumping_dict_with_values_of_different_types(self):
value = {'foo': 1, 'bar': 'hello', 'baz': True}
assert Dict({'foo': Integer(), 'bar': String(), 'baz': Boolean()})\
- .load(value) == value
+ .dump(value) == value
+
+ def test_dumping_skips_key_value_if_custom_key_type_loads_to_missing(self):
+ class CustomKeyType(String):
+ def dump(self, data, *args, **kwargs):
+ if data == 'foo':
+ return MISSING
+ return super(CustomKeyType, self).load(data, *args, **kwargs)
+
+ assert Dict(String(), key_type=CustomKeyType())\
+ .dump({'foo': 'hello', 'bar': 'goodbye'}) == {'bar': 'goodbye'}
+
+ def test_dumping_skips_key_value_if_value_type_loads_to_missing(self):
+ class CustomValueType(String):
+ def dump(self, data, *args, **kwargs):
+ if data == 'foo':
+ return MISSING
+ return super(CustomValueType, self).load(data, *args, **kwargs)
+
+ assert Dict(CustomValueType())\
+ .dump({'key1': 'foo', 'key2': 'bar'}) == {'key2': 'bar'}
def test_dumping_accepts_any_value_if_value_types_are_not_specified(self):
assert Dict()\
|
Asymmetry in load/dump for Dict with key_type and dict as values_type
I think there is a problem with the Dict type when using both key_type verification and a values_type dictionary:
In Dict.dump, the key is used in its original / non-dumped form to lookup the value type. However, in Dict.load, the dumped key is used to lookup the value type.
This works fine when using native types as key type such as String and Integer since they map to the same loaded/dumped value. But it's causing a problem when using a more complex key type (e.g. in my case an Enum that dumps to a string).
I believe, in Dict.load, key_type.load should be called **before** the lookup of the value type, so that the lookup is again performed with the original / non-dumped value.
|
0.0
|
360bbc8f9c2b6203ab5af8a3cd051f852ba8dae3
|
[
"tests/test_types.py::TestDict::test_loading_dict_with_custom_key_type_and_values_of_different_types",
"tests/test_types.py::TestDict::test_loading_skips_key_value_if_custom_key_type_loads_to_missing",
"tests/test_types.py::TestDict::test_loading_skips_key_value_if_value_type_loads_to_missing",
"tests/test_types.py::TestDict::test_dumping_skips_key_value_if_custom_key_type_loads_to_missing",
"tests/test_types.py::TestDict::test_dumping_skips_key_value_if_value_type_loads_to_missing"
] |
[
"tests/test_types.py::TestString::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestString::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestString::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestString::test_loading_passes_context_to_validator",
"tests/test_types.py::TestString::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestString::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestString::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestString::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestString::test_loading_None_raises_required_error",
"tests/test_types.py::TestString::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestString::test_dumping_None_raises_required_error",
"tests/test_types.py::TestString::test_name",
"tests/test_types.py::TestString::test_description",
"tests/test_types.py::TestString::test_loading_string_value",
"tests/test_types.py::TestString::test_loading_non_string_value_raises_ValidationError",
"tests/test_types.py::TestString::test_dumping_string_value",
"tests/test_types.py::TestString::test_dumping_non_string_value_raises_ValidationError",
"tests/test_types.py::TestNumber::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestNumber::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestNumber::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestNumber::test_loading_passes_context_to_validator",
"tests/test_types.py::TestNumber::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestNumber::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestNumber::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestNumber::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestNumber::test_loading_None_raises_required_error",
"tests/test_types.py::TestNumber::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestNumber::test_dumping_None_raises_required_error",
"tests/test_types.py::TestNumber::test_name",
"tests/test_types.py::TestNumber::test_description",
"tests/test_types.py::TestNumber::test_loading_float_value",
"tests/test_types.py::TestNumber::test_loading_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestNumber::test_dumping_float_value",
"tests/test_types.py::TestNumber::test_dumping_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestInteger::test_loading_integer_value",
"tests/test_types.py::TestInteger::test_loading_long_value",
"tests/test_types.py::TestInteger::test_loading_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestInteger::test_dumping_integer_value",
"tests/test_types.py::TestInteger::test_dumping_long_value",
"tests/test_types.py::TestInteger::test_dumping_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestFloat::test_loading_float_value",
"tests/test_types.py::TestFloat::test_loading_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestFloat::test_dumping_float_value",
"tests/test_types.py::TestFloat::test_dumping_non_numeric_value_raises_ValidationError",
"tests/test_types.py::TestBoolean::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestBoolean::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestBoolean::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestBoolean::test_loading_passes_context_to_validator",
"tests/test_types.py::TestBoolean::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestBoolean::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestBoolean::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestBoolean::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestBoolean::test_loading_None_raises_required_error",
"tests/test_types.py::TestBoolean::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestBoolean::test_dumping_None_raises_required_error",
"tests/test_types.py::TestBoolean::test_name",
"tests/test_types.py::TestBoolean::test_description",
"tests/test_types.py::TestBoolean::test_loading_boolean_value",
"tests/test_types.py::TestBoolean::test_loading_non_boolean_value_raises_ValidationError",
"tests/test_types.py::TestBoolean::test_dumping_boolean_value",
"tests/test_types.py::TestBoolean::test_dumping_non_boolean_value_raises_ValidationError",
"tests/test_types.py::TestDateTime::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestDateTime::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestDateTime::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestDateTime::test_loading_passes_context_to_validator",
"tests/test_types.py::TestDateTime::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestDateTime::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestDateTime::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestDateTime::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestDateTime::test_loading_None_raises_required_error",
"tests/test_types.py::TestDateTime::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestDateTime::test_dumping_None_raises_required_error",
"tests/test_types.py::TestDateTime::test_name",
"tests/test_types.py::TestDateTime::test_description",
"tests/test_types.py::TestDateTime::test_loading_string_date",
"tests/test_types.py::TestDateTime::test_loading_using_predefined_format",
"tests/test_types.py::TestDateTime::test_loading_using_custom_format",
"tests/test_types.py::TestDateTime::test_loading_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestDateTime::test_customizing_error_message_if_value_is_not_string",
"tests/test_types.py::TestDateTime::test_loading_raises_ValidationError_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestDateTime::test_customizing_error_message_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestDateTime::test_loading_passes_deserialized_date_to_validator",
"tests/test_types.py::TestDateTime::test_dumping_date",
"tests/test_types.py::TestDateTime::test_dumping_using_predefined_format",
"tests/test_types.py::TestDateTime::test_dumping_using_custom_format",
"tests/test_types.py::TestDateTime::test_dumping_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestDate::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestDate::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestDate::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestDate::test_loading_passes_context_to_validator",
"tests/test_types.py::TestDate::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestDate::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestDate::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestDate::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestDate::test_loading_None_raises_required_error",
"tests/test_types.py::TestDate::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestDate::test_dumping_None_raises_required_error",
"tests/test_types.py::TestDate::test_name",
"tests/test_types.py::TestDate::test_description",
"tests/test_types.py::TestDate::test_loading_string_date",
"tests/test_types.py::TestDate::test_loading_using_predefined_format",
"tests/test_types.py::TestDate::test_loading_using_custom_format",
"tests/test_types.py::TestDate::test_loading_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestDate::test_customizing_error_message_if_value_is_not_string",
"tests/test_types.py::TestDate::test_loading_raises_ValidationError_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestDate::test_customizing_error_message_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestDate::test_loading_passes_deserialized_date_to_validator",
"tests/test_types.py::TestDate::test_dumping_date",
"tests/test_types.py::TestDate::test_dumping_using_predefined_format",
"tests/test_types.py::TestDate::test_dumping_using_custom_format",
"tests/test_types.py::TestDate::test_dumping_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestTime::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestTime::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestTime::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestTime::test_loading_passes_context_to_validator",
"tests/test_types.py::TestTime::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestTime::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestTime::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestTime::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestTime::test_loading_None_raises_required_error",
"tests/test_types.py::TestTime::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestTime::test_dumping_None_raises_required_error",
"tests/test_types.py::TestTime::test_name",
"tests/test_types.py::TestTime::test_description",
"tests/test_types.py::TestTime::test_loading_string_date",
"tests/test_types.py::TestTime::test_loading_using_custom_format",
"tests/test_types.py::TestTime::test_loading_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestTime::test_customizing_error_message_if_value_is_not_string",
"tests/test_types.py::TestTime::test_loading_raises_ValidationError_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestTime::test_customizing_error_message_if_value_string_does_not_match_date_format",
"tests/test_types.py::TestTime::test_loading_passes_deserialized_date_to_validator",
"tests/test_types.py::TestTime::test_dumping_date",
"tests/test_types.py::TestTime::test_dumping_using_custom_format",
"tests/test_types.py::TestTime::test_dumping_raises_ValidationError_if_value_is_not_string",
"tests/test_types.py::TestList::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestList::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestList::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestList::test_loading_passes_context_to_validator",
"tests/test_types.py::TestList::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestList::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestList::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestList::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestList::test_loading_None_raises_required_error",
"tests/test_types.py::TestList::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestList::test_dumping_None_raises_required_error",
"tests/test_types.py::TestList::test_name",
"tests/test_types.py::TestList::test_description",
"tests/test_types.py::TestList::test_loading_list_value",
"tests/test_types.py::TestList::test_loading_non_list_value_raises_ValidationError",
"tests/test_types.py::TestList::test_loading_list_value_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestList::test_loading_list_value_with_items_that_have_validation_errors_raises_ValidationError",
"tests/test_types.py::TestList::test_loading_does_not_validate_whole_list_if_items_have_errors",
"tests/test_types.py::TestList::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestList::test_dumping_list_value",
"tests/test_types.py::TestList::test_dumping_sequence_value",
"tests/test_types.py::TestList::test_dumping_non_list_value_raises_ValidationError",
"tests/test_types.py::TestList::test_dumping_list_value_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestList::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestTuple::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestTuple::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestTuple::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestTuple::test_loading_passes_context_to_validator",
"tests/test_types.py::TestTuple::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestTuple::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestTuple::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestTuple::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestTuple::test_loading_None_raises_required_error",
"tests/test_types.py::TestTuple::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestTuple::test_dumping_None_raises_required_error",
"tests/test_types.py::TestTuple::test_name",
"tests/test_types.py::TestTuple::test_description",
"tests/test_types.py::TestTuple::test_loading_tuple_with_values_of_same_type",
"tests/test_types.py::TestTuple::test_loading_tuple_with_values_of_different_type",
"tests/test_types.py::TestTuple::test_loading_non_tuple_value_raises_ValidationError",
"tests/test_types.py::TestTuple::test_loading_tuple_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestTuple::test_loading_tuple_with_items_that_have_validation_errors_raises_ValidationErrors",
"tests/test_types.py::TestTuple::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestTuple::test_dump_tuple",
"tests/test_types.py::TestTuple::test_dump_sequence",
"tests/test_types.py::TestTuple::test_dumping_non_tuple_raises_ValidationError",
"tests/test_types.py::TestTuple::test_dumping_sequence_of_incorrect_length_raises_ValidationError",
"tests/test_types.py::TestTuple::test_dumping_tuple_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestTuple::test_dumping_tuple_passes_context_to_inner_type_dump",
"tests/test_types.py::TestDict::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestDict::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestDict::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestDict::test_loading_passes_context_to_validator",
"tests/test_types.py::TestDict::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestDict::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestDict::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestDict::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestDict::test_loading_None_raises_required_error",
"tests/test_types.py::TestDict::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestDict::test_dumping_None_raises_required_error",
"tests/test_types.py::TestDict::test_name",
"tests/test_types.py::TestDict::test_description",
"tests/test_types.py::TestDict::test_loading_dict_with_custom_key_type",
"tests/test_types.py::TestDict::test_loading_accepts_any_key_if_key_type_is_not_specified",
"tests/test_types.py::TestDict::test_loading_dict_with_values_of_the_same_type",
"tests/test_types.py::TestDict::test_loading_dict_with_values_of_different_types",
"tests/test_types.py::TestDict::test_loading_accepts_any_value_if_value_types_are_not_specified",
"tests/test_types.py::TestDict::test_loading_non_dict_value_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_dict_with_incorrect_key_value_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_dict_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_dict_with_items_that_have_validation_errors_raises_ValidationError",
"tests/test_types.py::TestDict::test_loading_does_not_validate_whole_list_if_items_have_errors",
"tests/test_types.py::TestDict::test_loading_dict_with_incorrect_key_value_and_incorrect_value_raises_ValidationError_with_both_errors",
"tests/test_types.py::TestDict::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestDict::test_dumping_dict_with_custom_key_type",
"tests/test_types.py::TestDict::test_dumping_accepts_any_key_if_key_type_is_not_specified",
"tests/test_types.py::TestDict::test_dumping_dict_with_values_of_the_same_type",
"tests/test_types.py::TestDict::test_dumping_dict_with_values_of_different_types",
"tests/test_types.py::TestDict::test_dumping_accepts_any_value_if_value_types_are_not_specified",
"tests/test_types.py::TestDict::test_dumping_non_dict_value_raises_ValidationError",
"tests/test_types.py::TestDict::test_dumping_dict_with_incorrect_key_value_raises_ValidationError",
"tests/test_types.py::TestDict::test_dumping_dict_with_items_of_incorrect_type_raises_ValidationError",
"tests/test_types.py::TestDict::test_dumping_dict_with_incorrect_key_value_and_incorrect_value_raises_ValidationError_with_both_errors",
"tests/test_types.py::TestDict::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestOneOf::test_loading_values_of_one_of_listed_types",
"tests/test_types.py::TestOneOf::test_loading_raises_ValidationError_if_value_is_of_unlisted_type",
"tests/test_types.py::TestOneOf::test_loading_raises_ValidationError_if_deserialized_value_has_errors",
"tests/test_types.py::TestOneOf::test_loading_raises_ValidationError_if_type_hint_is_unknown",
"tests/test_types.py::TestOneOf::test_loading_with_type_hinting",
"tests/test_types.py::TestOneOf::test_loading_with_type_hinting_raises_ValidationError_if_deserialized_value_has_errors",
"tests/test_types.py::TestOneOf::test_dumping_values_of_one_of_listed_types",
"tests/test_types.py::TestOneOf::test_dumping_raises_ValidationError_if_value_is_of_unlisted_type",
"tests/test_types.py::TestOneOf::test_dumping_raises_ValidationError_if_type_hint_is_unknown",
"tests/test_types.py::TestOneOf::test_dumping_raises_ValidationError_if_serialized_value_has_errors",
"tests/test_types.py::TestOneOf::test_dumping_with_type_hinting",
"tests/test_types.py::TestOneOf::test_dumping_with_type_hinting_raises_ValidationError_if_deserialized_value_has_errors",
"tests/test_types.py::TestAttributeField::test_getting_value_returns_value_of_given_object_attribute",
"tests/test_types.py::TestAttributeField::test_getting_value_returns_value_of_configured_object_attribute",
"tests/test_types.py::TestAttributeField::test_getting_value_returns_value_of_field_name_transformed_with_given_name_transformation",
"tests/test_types.py::TestAttributeField::test_setting_value_sets_given_value_to_given_object_attribute",
"tests/test_types.py::TestAttributeField::test_setting_value_sets_given_value_to_configured_object_attribute",
"tests/test_types.py::TestAttributeField::test_setting_value_sets_given_value_to_field_name_transformed_with_given_name_transformation",
"tests/test_types.py::TestAttributeField::test_loading_value_with_field_type",
"tests/test_types.py::TestAttributeField::test_loading_given_attribute_regardless_of_attribute_override",
"tests/test_types.py::TestAttributeField::test_loading_missing_value_if_attribute_does_not_exist",
"tests/test_types.py::TestAttributeField::test_loading_passes_context_to_field_type_load",
"tests/test_types.py::TestAttributeField::test_dumping_given_attribute_from_object",
"tests/test_types.py::TestAttributeField::test_dumping_object_attribute_with_field_type",
"tests/test_types.py::TestAttributeField::test_dumping_a_different_attribute_from_object",
"tests/test_types.py::TestAttributeField::test_dumping_passes_context_to_field_type_dump",
"tests/test_types.py::TestMethodField::test_get_value_returns_result_of_calling_configured_method_on_object",
"tests/test_types.py::TestMethodField::test_get_value_returns_result_of_calling_method_calculated_by_given_function_on_object",
"tests/test_types.py::TestMethodField::test_get_value_returns_MISSING_if_get_method_is_not_specified",
"tests/test_types.py::TestMethodField::test_get_value_raises_ValueError_if_method_does_not_exist",
"tests/test_types.py::TestMethodField::test_get_value_raises_ValueError_if_property_is_not_callable",
"tests/test_types.py::TestMethodField::test_get_value_passes_context_to_method",
"tests/test_types.py::TestMethodField::test_set_value_calls_configure_method_on_object",
"tests/test_types.py::TestMethodField::test_set_value_calls_method_calculated_by_given_function_on_object",
"tests/test_types.py::TestMethodField::test_set_value_does_not_do_anything_if_set_method_is_not_specified",
"tests/test_types.py::TestMethodField::test_set_value_raises_ValueError_if_method_does_not_exist",
"tests/test_types.py::TestMethodField::test_set_value_raises_ValueError_if_property_is_not_callable",
"tests/test_types.py::TestMethodField::test_set_value_passes_context_to_method",
"tests/test_types.py::TestMethodField::test_loading_value_with_field_type",
"tests/test_types.py::TestMethodField::test_loading_value_returns_loaded_value",
"tests/test_types.py::TestMethodField::test_loading_value_passes_context_to_field_types_load",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_calls_field_types_load_into",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_calls_field_types_load_if_load_into_is_not_available",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_calls_field_types_load_if_old_value_is_None",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_calls_field_types_load_if_old_value_is_MISSING",
"tests/test_types.py::TestMethodField::test_loading_value_into_existing_object_passes_context_to_field_types_load_into",
"tests/test_types.py::TestMethodField::test_dumping_result_of_given_objects_method",
"tests/test_types.py::TestMethodField::test_dumping_result_of_objects_method_with_field_type",
"tests/test_types.py::TestMethodField::test_dumping_result_of_a_different_objects_method",
"tests/test_types.py::TestMethodField::test_dumping_raises_ValueError_if_given_method_does_not_exist",
"tests/test_types.py::TestMethodField::test_dumping_raises_ValueError_if_given_method_is_not_callable",
"tests/test_types.py::TestMethodField::test_dumping_passes_context_to_field_type_dump",
"tests/test_types.py::TestFunctionField::test_get_value_returns_result_of_calling_configured_function_with_object",
"tests/test_types.py::TestFunctionField::test_get_value_returns_MISSING_if_get_func_is_not_specified",
"tests/test_types.py::TestFunctionField::test_get_value_raises_ValueError_if_property_is_not_callable",
"tests/test_types.py::TestFunctionField::test_get_value_passes_context_to_func",
"tests/test_types.py::TestFunctionField::test_set_value_calls_configure_method_on_object",
"tests/test_types.py::TestFunctionField::test_set_value_does_not_do_anything_if_set_func_is_not_specified",
"tests/test_types.py::TestFunctionField::test_set_value_raises_ValueError_if_property_is_not_callable",
"tests/test_types.py::TestFunctionField::test_set_value_passes_context_to_func",
"tests/test_types.py::TestFunctionField::test_loading_value_with_field_type",
"tests/test_types.py::TestFunctionField::test_loading_value_returns_loaded_value",
"tests/test_types.py::TestFunctionField::test_loading_value_passes_context_to_field_types_load",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_calls_field_types_load_into",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_calls_field_types_load_if_load_into_is_not_available",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_calls_field_types_load_if_old_value_is_None",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_calls_field_types_load_if_old_value_is_MISSING",
"tests/test_types.py::TestFunctionField::test_loading_value_into_existing_object_passes_context_to_field_types_load_into",
"tests/test_types.py::TestFunctionField::test_dumping_result_of_given_function",
"tests/test_types.py::TestFunctionField::test_dumping_result_of_objects_method_with_field_type",
"tests/test_types.py::TestFunctionField::test_dumping_raises_ValueError_if_given_get_func_is_not_callable",
"tests/test_types.py::TestFunctionField::test_dumping_passes_context_to_field_type_dump",
"tests/test_types.py::TestConstant::test_name",
"tests/test_types.py::TestConstant::test_description",
"tests/test_types.py::TestConstant::test_loading_always_returns_missing",
"tests/test_types.py::TestConstant::test_loading_raises_ValidationError_if_loaded_value_is_not_a_constant_value_specified",
"tests/test_types.py::TestConstant::test_loading_value_with_inner_type_before_checking_value_correctness",
"tests/test_types.py::TestConstant::test_customizing_error_message_when_value_is_incorrect",
"tests/test_types.py::TestConstant::test_dumping_always_returns_given_value",
"tests/test_types.py::TestConstant::test_dumping_given_constant_with_field_type",
"tests/test_types.py::TestObject::test_loading_does_not_raise_ValidationError_if_validators_succeed",
"tests/test_types.py::TestObject::test_loading_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestObject::test_loading_raises_ValidationError_with_combined_messages_if_multiple_validators_fail",
"tests/test_types.py::TestObject::test_loading_passes_context_to_validator",
"tests/test_types.py::TestObject::test_validation_returns_None_if_validators_succeed",
"tests/test_types.py::TestObject::test_validation_returns_errors_if_validator_fails",
"tests/test_types.py::TestObject::test_validation_returns_combined_errors_if_multiple_validators_fail",
"tests/test_types.py::TestObject::test_loading_missing_value_raises_required_error",
"tests/test_types.py::TestObject::test_loading_None_raises_required_error",
"tests/test_types.py::TestObject::test_dumping_missing_value_raises_required_error",
"tests/test_types.py::TestObject::test_dumping_None_raises_required_error",
"tests/test_types.py::TestObject::test_name",
"tests/test_types.py::TestObject::test_description",
"tests/test_types.py::TestObject::test_default_field_type_is_unset_by_default",
"tests/test_types.py::TestObject::test_inheriting_default_field_type_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_constructor_is_unset_by_default",
"tests/test_types.py::TestObject::test_inheriting_constructor_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_allow_extra_fields_is_unset_by_default",
"tests/test_types.py::TestObject::test_inheriting_allow_extra_fields_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_immutable_is_unset_by_default",
"tests/test_types.py::TestObject::test_inheriting_immutable_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_ordered_is_unset_by_default",
"tests/test_types.py::TestObject::test_iheriting_ordered_from_first_base_class_that_has_it_set",
"tests/test_types.py::TestObject::test_loading_dict_value",
"tests/test_types.py::TestObject::test_loading_non_dict_values_raises_ValidationError",
"tests/test_types.py::TestObject::test_loading_bypasses_values_for_which_field_type_returns_missing_value",
"tests/test_types.py::TestObject::test_loading_dict_with_field_errors_raises_ValidationError_with_all_field_errors_merged",
"tests/test_types.py::TestObject::test_loading_dict_with_field_errors_does_not_run_whole_object_validators",
"tests/test_types.py::TestObject::test_loading_calls_field_load_passing_field_name_and_whole_data",
"tests/test_types.py::TestObject::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestObject::test_constructing_objects_with_default_constructor_on_load",
"tests/test_types.py::TestObject::test_constructing_custom_objects_on_load",
"tests/test_types.py::TestObject::test_load_ignores_extra_fields_by_default",
"tests/test_types.py::TestObject::test_load_raises_ValidationError_if_reporting_extra_fields",
"tests/test_types.py::TestObject::test_loading_inherited_fields",
"tests/test_types.py::TestObject::test_loading_multiple_inherited_fields",
"tests/test_types.py::TestObject::test_loading_raises_ValidationError_if_inherited_fields_have_errors",
"tests/test_types.py::TestObject::test_loading_only_specified_fields",
"tests/test_types.py::TestObject::test_loading_only_specified_fields_does_not_affect_own_fields",
"tests/test_types.py::TestObject::test_loading_all_but_specified_base_class_fields",
"tests/test_types.py::TestObject::test_loading_all_but_specified_fields_does_not_affect_own_fields",
"tests/test_types.py::TestObject::test_loading_values_into_existing_object",
"tests/test_types.py::TestObject::test_loading_values_into_existing_object_returns_that_object",
"tests/test_types.py::TestObject::test_loading_values_into_existing_object_passes_all_object_attributes_to_validators",
"tests/test_types.py::TestObject::test_loading_values_into_immutable_object_creates_a_copy",
"tests/test_types.py::TestObject::test_loading_values_into_immutable_object_does_not_modify_original_object",
"tests/test_types.py::TestObject::test_loading_values_into_nested_object_of_immutable_object_creates_copy_of_it_regardless_of_nested_objects_immutable_flag",
"tests/test_types.py::TestObject::test_loading_values_into_nested_object_of_immutable_object_does_not_modify_original_objects",
"tests/test_types.py::TestObject::test_loading_values_into_nested_objects_with_inplace_False_does_not_modify_original_objects",
"tests/test_types.py::TestObject::test_loading_values_into_existing_objects_ignores_missing_fields",
"tests/test_types.py::TestObject::test_loading_MISSING_into_existing_object_does_not_do_anything",
"tests/test_types.py::TestObject::test_loading_None_into_existing_objects_raises_ValidationError",
"tests/test_types.py::TestObject::test_loading_None_into_field_of_existing_object_passes_None_to_field",
"tests/test_types.py::TestObject::test_loading_values_into_existing_objects_raises_ValidationError_if_data_contains_errors",
"tests/test_types.py::TestObject::test_loading_values_into_existing_objects_raises_ValidationError_if_validator_fails",
"tests/test_types.py::TestObject::test_loading_values_into_existing_objects_annotates_field_errors_with_field_names",
"tests/test_types.py::TestObject::test_loading_values_into_existing_nested_objects",
"tests/test_types.py::TestObject::test_loading_values_into_existing_object_when_nested_object_does_not_exist",
"tests/test_types.py::TestObject::test_validating_data_for_existing_objects_returns_None_if_data_is_valid",
"tests/test_types.py::TestObject::test_validating_data_for_existing_objects_returns_errors_if_data_contains_errors",
"tests/test_types.py::TestObject::test_validating_data_for_existing_objects_returns_errors_if_validator_fails",
"tests/test_types.py::TestObject::test_validating_data_for_existing_objects_does_not_modify_original_objects",
"tests/test_types.py::TestObject::test_dumping_object_attributes",
"tests/test_types.py::TestObject::test_dumping_calls_field_dump_passing_field_name_and_whole_object",
"tests/test_types.py::TestObject::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestObject::test_dumping_inherited_fields",
"tests/test_types.py::TestObject::test_dumping_multiple_inherited_fields",
"tests/test_types.py::TestObject::test_dumping_only_specified_fields_of_base_classes",
"tests/test_types.py::TestObject::test_dumping_only_specified_fields_does_not_affect_own_fields",
"tests/test_types.py::TestObject::test_dumping_all_but_specified_base_class_fields",
"tests/test_types.py::TestObject::test_dumping_all_but_specified_fields_does_not_affect_own_fields",
"tests/test_types.py::TestObject::test_shortcut_for_specifying_constant_fields",
"tests/test_types.py::TestObject::test_dumping_fields_in_declared_order_if_ordered_is_True",
"tests/test_types.py::TestOptional::test_loading_value_calls_load_of_inner_type",
"tests/test_types.py::TestOptional::test_loading_missing_value_returns_None",
"tests/test_types.py::TestOptional::test_loading_None_returns_None",
"tests/test_types.py::TestOptional::test_loading_missing_value_does_not_call_inner_type_load",
"tests/test_types.py::TestOptional::test_loading_None_does_not_call_inner_type_load",
"tests/test_types.py::TestOptional::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestOptional::test_overriding_missing_value_on_load",
"tests/test_types.py::TestOptional::test_overriding_None_value_on_load",
"tests/test_types.py::TestOptional::test_using_function_to_override_value_on_load",
"tests/test_types.py::TestOptional::test_loading_passes_context_to_override_function",
"tests/test_types.py::TestOptional::test_dumping_value_calls_dump_of_inner_type",
"tests/test_types.py::TestOptional::test_dumping_missing_value_returns_None",
"tests/test_types.py::TestOptional::test_dumping_None_returns_None",
"tests/test_types.py::TestOptional::test_dumping_missing_value_does_not_call_inner_type_dump",
"tests/test_types.py::TestOptional::test_dumping_None_does_not_call_inner_type_dump",
"tests/test_types.py::TestOptional::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestOptional::test_overriding_missing_value_on_dump",
"tests/test_types.py::TestOptional::test_overriding_None_value_on_dump",
"tests/test_types.py::TestOptional::test_using_function_to_override_value_on_dump",
"tests/test_types.py::TestOptional::test_dumping_passes_context_to_override_function",
"tests/test_types.py::TestLoadOnly::test_name",
"tests/test_types.py::TestLoadOnly::test_description",
"tests/test_types.py::TestLoadOnly::test_loading_returns_inner_type_load_result",
"tests/test_types.py::TestLoadOnly::test_loading_passes_context_to_inner_type_load",
"tests/test_types.py::TestLoadOnly::test_dumping_always_returns_missing",
"tests/test_types.py::TestLoadOnly::test_dumping_does_not_call_inner_type_dump",
"tests/test_types.py::TestDumpOnly::test_name",
"tests/test_types.py::TestDumpOnly::test_description",
"tests/test_types.py::TestDumpOnly::test_loading_always_returns_missing",
"tests/test_types.py::TestDumpOnly::test_loading_does_not_call_inner_type_dump",
"tests/test_types.py::TestDumpOnly::test_dumping_returns_inner_type_dump_result",
"tests/test_types.py::TestDumpOnly::test_dumping_passes_context_to_inner_type_dump",
"tests/test_types.py::TestTransform::test_name",
"tests/test_types.py::TestTransform::test_description",
"tests/test_types.py::TestTransform::test_loading_calls_pre_load_with_original_value",
"tests/test_types.py::TestTransform::test_loading_calls_inner_type_load_with_result_of_pre_load",
"tests/test_types.py::TestTransform::test_loading_calls_post_load_with_result_of_inner_type_load",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_inner_type_load",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_pre_load",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_post_load",
"tests/test_types.py::TestTransform::test_dumping_calls_pre_dump_with_original_value",
"tests/test_types.py::TestTransform::test_dumping_calls_inner_type_dump_with_result_of_pre_dump",
"tests/test_types.py::TestTransform::test_dumping_calls_post_dump_with_result_of_inner_type_dump",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_inner_type_dump",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_pre_dump",
"tests/test_types.py::TestTransform::test_transform_passes_context_to_post_dump",
"tests/test_types.py::TestValidatedType::test_returns_subclass_of_given_type",
"tests/test_types.py::TestValidatedType::test_returns_type_that_has_single_given_validator",
"tests/test_types.py::TestValidatedType::test_accepts_context_unaware_validators",
"tests/test_types.py::TestValidatedType::test_returns_type_that_has_multiple_given_validators",
"tests/test_types.py::TestValidatedType::test_specifying_more_validators_on_type_instantiation",
"tests/test_types.py::TestValidatedType::test_new_type_accepts_same_constructor_arguments_as_base_type"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2017-06-12 19:52:49+00:00
|
mit
| 3,836 |
|
mbillow__python-redpocket-4
|
diff --git a/redpocket/api.py b/redpocket/api.py
index a69964e..f20da88 100644
--- a/redpocket/api.py
+++ b/redpocket/api.py
@@ -204,12 +204,15 @@ class RedPocket:
self,
method: str = "get",
url: str = "",
+ params: dict = None,
data: dict = None,
_is_retry: bool = False,
) -> requests.Response:
self._logger.debug("API Request: [%s] URL: %s", method.upper(), url)
- request = self._session.request(method=method, url=url, data=data)
+ request = self._session.request(
+ method=method, url=url, params=params, data=data
+ )
if request.status_code != requests.codes.ok:
raise RedPocketAPIError("API Returned Non-200 Response!")
@@ -241,8 +244,13 @@ class RedPocket:
]
def get_line_details(self, account_hash: str) -> RedPocketLineDetails:
+ params = {
+ "id": account_hash,
+ "type": "api",
+ }
details = self.request(
- url=f"https://www.redpocket.com/account/get-details?id={account_hash}"
+ url="https://www.redpocket.com/account/get-details",
+ params=params,
)
details_json = details.json()
return RedPocketLineDetails.from_api(
|
mbillow/python-redpocket
|
0ae02c28feb4a88f0f07032ccc3f691f6fe58507
|
diff --git a/tests/test_api.py b/tests/test_api.py
index caf6de1..3cce41d 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -134,7 +134,7 @@ def test_get_line_details(
)
responses.add(
responses.GET,
- "https://www.redpocket.com/account/get-details?id=MTIzNDU2",
+ "https://www.redpocket.com/account/get-details?id=MTIzNDU2&type=api",
status=200,
json={"return_code": 1, "return_data": mock_line_details},
)
@@ -157,7 +157,7 @@ def test_get_all_line_details(
)
responses.add(
responses.GET,
- "https://www.redpocket.com/account/get-details?id=MTIzNDU2",
+ "https://www.redpocket.com/account/get-details?id=MTIzNDU2&type=api",
status=200,
json={"return_code": 1, "return_data": mock_line_details},
)
|
Retrieve quicker updating data
I found that when retrieving data from the `/get-details` endpoint, it appeared to update quicker when `type=api` was included. Without it, the data retrieved appeared to be cached. There was an increase in response time as a side effect, but I took this as a good sign.
Call: ```https://www.redpocket.com/account/get-details?id=AAA1BBB2&type=api```
Instead of: ```https://www.redpocket.com/account/get-details?id=AAA1BBB2```
|
0.0
|
0ae02c28feb4a88f0f07032ccc3f691f6fe58507
|
[
"tests/test_api.py::test_get_all_line_details",
"tests/test_api.py::test_get_line_details"
] |
[
"tests/test_api.py::test_request_retry_login_success",
"tests/test_api.py::test_line_details_from_api",
"tests/test_api.py::test_request_unknown_return_code",
"tests/test_api.py::test_login_good_credentials",
"tests/test_api.py::test_get_line",
"tests/test_api.py::test_request_non_200",
"tests/test_api.py::test_login_missing_csrf",
"tests/test_api.py::test_line_without_callback",
"tests/test_api.py::test_login_bad_credentials",
"tests/test_api.py::test_line_from_api",
"tests/test_api.py::test_today",
"tests/test_api.py::test_phone_from_api",
"tests/test_api.py::test_request_retry_login_failure"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-05-13 20:14:58+00:00
|
mit
| 3,837 |
|
mbiokyle29__place-8
|
diff --git a/place/lib/utils.py b/place/lib/utils.py
index f7e3733..cfb3da5 100644
--- a/place/lib/utils.py
+++ b/place/lib/utils.py
@@ -1,9 +1,25 @@
# file: utils.py
# author: mbiokyle29
+import logging
import os.path as path
+logger = logging.getLogger(__name__)
+
+
def is_dir(input_path):
""" Return a boolean if the path is not a dir """
abs_path = path.abspath(input_path)
+ logger.debug("Checking if path: %s is a directory", abs_path)
return path.isdir(abs_path)
+
+
+def configure_logger(logger, level):
+ logger.setLevel(level)
+ sh = logging.StreamHandler()
+ sh.setLevel(level)
+ formatter = logging.Formatter("[%(name)s][%(levelname)s]: %(message)s")
+ sh.setFormatter(formatter)
+ logger.addHandler(sh)
+
+ return logger
diff --git a/place/main.py b/place/main.py
index 1e99b58..4c93112 100644
--- a/place/main.py
+++ b/place/main.py
@@ -1,21 +1,31 @@
# file: main.py
# author: mbiokyle29
+import logging
import os
import sys
-from click import argument, command, Path
+from click import argument, command, option, Path
-from place.lib.utils import is_dir
+from place.lib.utils import is_dir, configure_logger
+
+
+root_logger = logging.getLogger("")
@command()
@argument("sources", nargs=-1, type=Path(exists=True, resolve_path=True))
@argument("target", nargs=1, type=Path())
-def cli(sources, target):
+@option("-v", "--verbose", default=False, is_flag=True, help="Enable verbose logging.")
+@option("-d", "--debug", default=False, is_flag=True, help="Enable debug logging.")
+def cli(sources, target, verbose, debug):
""" mv file(s) in SOURCES to TARGET while updating config files """
+ log_level = logging.INFO if verbose else logging.WARN
+ log_level = logging.DEBUG if debug else log_level
+ configure_logger(root_logger, log_level)
# rename - 1 source, target must not be a dir
if len(sources) == 1 and not is_dir(target):
+ root_logger.debug("Renaming source file %s to target %s", sources[0], target)
os.rename(sources[0], target)
# move - n sources, target must be a dir
@@ -25,8 +35,10 @@ def cli(sources, target):
os.path.abspath(target),
os.path.basename(source)
)
+ root_logger.debug("Moving source file %s to target location %s", source, target)
os.rename(source, new_path)
# badness
else:
+ root_logger.warning("No placeable files detected in input. Quitting...")
sys.exit(-1)
|
mbiokyle29/place
|
994186e72f63004c7882e0a9f2434bd0a361bf23
|
diff --git a/tests/lib/test_utils.py b/tests/lib/test_utils.py
index 22039bf..0e16a93 100644
--- a/tests/lib/test_utils.py
+++ b/tests/lib/test_utils.py
@@ -1,11 +1,13 @@
# file: test_utils.py
# author: mbiokyle29
+import logging
import os
import unittest
from shutil import rmtree
from tempfile import mkdtemp, NamedTemporaryFile
+from unittest.mock import MagicMock
-from place.lib.utils import is_dir
+from place.lib.utils import is_dir, configure_logger
class TestIsDir(unittest.TestCase):
@@ -33,3 +35,16 @@ class TestIsDir(unittest.TestCase):
with NamedTemporaryFile(dir=self.test_dir) as temp_file:
file_path = os.path.join(self.test_dir, temp_file.name)
self.assertFalse(is_dir(file_path))
+
+
+class TestConfigureLogger(unittest.TestCase):
+ """ Tests for the configure_logger utility function """
+
+ def test_configure_logger(self):
+ """ Test that configure_logger configured the logger as expected """
+ mock_logger = MagicMock()
+
+ configure_logger(mock_logger, logging.INFO)
+
+ mock_logger.setLevel.assert_called_once_with(logging.INFO)
+ mock_logger.addHandler.assert_called_once()
diff --git a/tests/test_main.py b/tests/test_main.py
index f078fbb..223b7d7 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -20,6 +20,26 @@ class TestMainCli(unittest.TestCase):
result = self.runner.invoke(cli, ["foo"])
self.assertEqual(result.exit_code, -1)
+ def test_cli_verbose(self):
+ """ Test that cli sets the log level to info when given --verbose/-v """
+ for verbose_flag in ["--verbose", "-v"]:
+ with self.runner.isolated_filesystem() as fs:
+ source_file = NamedTemporaryFile(dir=fs, suffix=".txt", delete=False)
+ target_name = os.path.join(fs, "foo.txt")
+ result = self.runner.invoke(cli, [verbose_flag, source_file.name, target_name])
+
+ self.assertEqual(result.exit_code, 0)
+
+ def test_cli_debug(self):
+ """ Test that cli sets the log level to debug when given --debug/-d """
+ with self.runner.isolated_filesystem() as fs:
+ source_file = NamedTemporaryFile(dir=fs, suffix=".txt", delete=False)
+ target_name = os.path.join(fs, "foo.txt")
+ result = self.runner.invoke(cli, ["--debug", source_file.name, target_name])
+
+ self.assertEqual(result.exit_code, 0)
+ self.assertIn("DEBUG", result.output)
+
def test_cli_rename_file(self):
""" Test that cli renames a file when given 1 source and 1 target (not dir) """
with self.runner.isolated_filesystem() as fs:
|
build out logging
Place currently does not have any logging setup. A root logger should be configured in cli:main and provisions should be made for other modules to create logger instances (just make sure this works). By default only `error` should be logged. A command line argument should be added `-v`/`--verbose` which should enabled `warn` and `info` level logging, and a `--debug` argument for `debug` level logging. The levels should be configured on the root logger such that all other loggers inherit.
AC
- root logger configured in cli:main with default level `error`, and stream handler and a sane log line config
- `-v/--verbose` and `--debug` command line options are added
- `-v/--verbose` command line option sets log level to info (including warning)
- `--debug` command line option sets log level to debug
- Other modules (example `lib/utils.py`) can register loggers which inherit the log level and format from the root logger. Prove this works with tests, you can use the cli.Runner result like here: https://github.com/mbiokyle29/place/blob/master/tests/test_main.py#L53. It has an `.output` attr with the output of the call
|
0.0
|
994186e72f63004c7882e0a9f2434bd0a361bf23
|
[
"tests/lib/test_utils.py::TestIsDir::test_is_dir",
"tests/lib/test_utils.py::TestIsDir::test_is_dir_dne",
"tests/lib/test_utils.py::TestIsDir::test_is_dir_is_file",
"tests/lib/test_utils.py::TestConfigureLogger::test_configure_logger",
"tests/test_main.py::TestMainCli::test_cli_debug",
"tests/test_main.py::TestMainCli::test_cli_move_to_dir_multiple_files",
"tests/test_main.py::TestMainCli::test_cli_move_to_dir_single_file",
"tests/test_main.py::TestMainCli::test_cli_no_target",
"tests/test_main.py::TestMainCli::test_cli_rename_dir",
"tests/test_main.py::TestMainCli::test_cli_rename_file",
"tests/test_main.py::TestMainCli::test_cli_verbose"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-03-18 19:07:23+00:00
|
mit
| 3,838 |
|
mc706__changelog-cli-34
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a4a3db0..eec5d1f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,6 +16,7 @@ This project adheres to [Semantic Versioning](http://semver.org/) and [Keep a Ch
* Existing CHANGELOGs will start using these headers after the new run of `changelog release`
### Fixed
+* Fix changelog release --<release type> --yes
* Format release lines in the same format that keepachangelog.com does
* Fix Description for pypi
diff --git a/src/changelog/commands.py b/src/changelog/commands.py
index d038747..7b29803 100644
--- a/src/changelog/commands.py
+++ b/src/changelog/commands.py
@@ -60,7 +60,7 @@ def release(release_type: str, auto_confirm: bool) -> None:
try:
new_version = CL.get_new_release_version(release_type)
if auto_confirm:
- CL.cut_release()
+ CL.cut_release(release_type)
else:
if click.confirm(f"Planning on releasing version {new_version}. Proceed?"):
CL.cut_release(release_type)
|
mc706/changelog-cli
|
c7b6d92cafd7b261f22ecdccbe36434ed8e268a6
|
diff --git a/tests/integration/test_cli.py b/tests/integration/test_cli.py
index 9c3e472..9de770d 100644
--- a/tests/integration/test_cli.py
+++ b/tests/integration/test_cli.py
@@ -128,6 +128,15 @@ class CliIntegrationTestCase(unittest.TestCase):
suggest = self.runner.invoke(cli, ['current'])
self.assertEqual(suggest.output.strip(), '0.1.0')
+ def test_cli_release_y_specify_type(self):
+ with self.runner.isolated_filesystem():
+ self.runner.invoke(cli, ['init'])
+ self.runner.invoke(cli, ['added', 'Adding a new feature'])
+ result = self.runner.invoke(cli, ['release', '--major', '--yes'])
+ self.assertTrue(result)
+ suggest = self.runner.invoke(cli, ['current'])
+ self.assertEqual(suggest.output.strip(), '1.0.0')
+
def test_cli_release_missing(self):
with self.runner.isolated_filesystem():
result = self.runner.invoke(cli, ['release'])
|
The --yes flag on release results in a release type flag being ignored
It seems that using the `--yes` flag with release causes the recommended release type to be used rather than one specified by the command. See examples below.
It looks like a simple fix - will hopefully raise a PR as soon as I can get my head around how the tests work!
```
$ changelog suggest
0.7.1
$ changelog release --patch
Planning on releasing version 0.7.1. Proceed? [y/N]: N
$ changelog release --minor
Planning on releasing version 0.8.0. Proceed? [y/N]: N
$ changelog release --major
Planning on releasing version 1.0.0. Proceed? [y/N]: N
$ changelog release --major --yes
$ changelog current
0.7.1
```
|
0.0
|
c7b6d92cafd7b261f22ecdccbe36434ed8e268a6
|
[
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_release_y_specify_type"
] |
[
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_added",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_added_missing",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_changed",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_changed_missing",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_current",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_current_missing",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_fixed",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_fixed_missing",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_init",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_release",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_release_missing",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_release_y",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_removed",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_removed_missing",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_suggest",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_suggest_missing",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_suggest_type_fixed",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_suggest_type_removed",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_version_flag",
"tests/integration/test_cli.py::CliIntegrationTestCase::test_cli_view"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-04-30 04:39:56+00:00
|
mit
| 3,839 |
|
mciepluc__cocotb-coverage-70
|
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
new file mode 100644
index 0000000..3faa610
--- /dev/null
+++ b/.github/workflows/main.yml
@@ -0,0 +1,60 @@
+name: Regression Tests
+
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+ branches:
+ - master
+
+jobs:
+
+ lint-flake8:
+ runs-on: ubuntu-latest
+ name: flake8
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: [3.9]
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{matrix.python-version}}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{matrix.python-version}}
+
+ - name: flake8
+ continue-on-error: true
+ run: |
+ pip install flake8
+ flake8
+
+ tests:
+
+ name: Python ${{matrix.python-version}}
+ runs-on: ubuntu-20.04
+
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: ['3.7', '3.8', '3.9', '3.10']
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{matrix.python-version}}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{matrix.python-version}}
+
+ - name: Install Python testing dependencies
+ run: |
+ pip install tox tox-gh-actions
+
+ - name: Install Icarus Verilog
+ run: |
+ sudo apt install -y --no-install-recommends iverilog
+
+ - name: Test
+ run: |
+ tox
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index be1c526..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-sudo: required
-language: python
-dist: xenial
-cache:
- directories:
- - iverilog
-
-python:
- - "3.7"
-
-before_install:
- - sudo apt-get install gperf
- - if [[ ! -e "./iverilog/README.txt" ]]; then rm -rf iverilog; git clone https://github.com/steveicarus/iverilog.git --depth=1 --branch v10_2; fi
- - cd iverilog && autoconf && ./configure && make -j2 && sudo make install && cd ..
- - pip install tox
-
-script:
- - tox -e py3
diff --git a/README.md b/README.md
index 0a43173..48f55b2 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
Functional Coverage and Constrained Randomization Extensions for Cocotb
[](http://cocotb-coverage.readthedocs.org/en/latest/)
-[](https://travis-ci.org/mciepluc/cocotb-coverage)
+[](https://github.com/mciepluc/cocotb-coverage/actions/workflows/main.yml)
[](https://pypi.org/project/cocotb-coverage/)
This package allows you to use constrained randomization and functional coverage techniques known from CRV (constrained random verification) and MDV (metric-driven verification) methodologies, available in SystemVerilog or _e_. Such extensions enable the implementation of an advanced verification environment for complex projects.
diff --git a/cocotb_coverage/coverage.py b/cocotb_coverage/coverage.py
index 5c903cd..c9bdfe1 100644
--- a/cocotb_coverage/coverage.py
+++ b/cocotb_coverage/coverage.py
@@ -378,7 +378,7 @@ class CoverItem(object):
"""
coverage = {}
for child in self._children:
- coverage.append(child.detailed_coverage)
+ coverage[child._name] = child.detailed_coverage
return coverage
@property
diff --git a/tox.ini b/tox.ini
index 46c9235..fd515c3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py3
+envlist = py37, py38, py39, py310
[testenv]
passenv =
@@ -10,8 +10,17 @@ whitelist_externals =
deps =
pytest
+ cocotb-bus
+ numpy
commands =
make -k -C tests
make -C examples/fifo/tests
make -C examples/pkt_switch/tests
+
+[gh-actions]
+python =
+ 3.7: py37
+ 3.8: py38
+ 3.9: py39
+ 3.10: py310
\ No newline at end of file
|
mciepluc/cocotb-coverage
|
9cdc3f0497398c4dc946f31f4a1cd5d6a8e66e9b
|
diff --git a/tests/test_coverage/coverage_test.py b/tests/test_coverage/coverage_test.py
index c884c9c..80b88ce 100644
--- a/tests/test_coverage/coverage_test.py
+++ b/tests/test_coverage/coverage_test.py
@@ -385,6 +385,33 @@ def test_bins_labels():
assert coverage.coverage_db["top.t10.cross"].coverage == 4
+
+# accessing 'CoverItem.detailed_coverage' field
+def test_coveritem_detailed_coverage():
+ print("Running test_coveritem_detailed_coverage")
+
+ for i in range(10):
+ x = random.randint(0, 10)
+
+ @coverage.CoverPoint("top.t11.c1", vname="i", bins=list(range(10)))
+ @coverage.CoverPoint("top.t11.c2", vname="x", bins=list(range(10)))
+ def sample(i, x):
+ pass
+
+ sample(i, x)
+
+ detailed_coverage = coverage.coverage_db['top'].detailed_coverage
+ assert isinstance(detailed_coverage, dict)
+ assert isinstance(detailed_coverage['top.t11'], dict)
+ assert isinstance(detailed_coverage['top.t11'], dict)
+
+ detailed_coverage = coverage.coverage_db['top.t11'].detailed_coverage
+ assert isinstance(detailed_coverage, dict)
+ assert len(detailed_coverage) == 2
+ assert detailed_coverage['top.t11.c1'] is not None
+ assert detailed_coverage['top.t11.c2'] is not None
+
+
def test_tutorial_coverpoint_transition():
addr_prev = collections.deque(4*[0], 4) # we would need up to 4 values in this example
|
Access to `coverage_db['top'].detailed_coverage` field is failed.
`CoverItem` class contains `detailed_coverage` property and it can be accessed accordingly to spec. But trying to address it (for instance: `coverage_db['top'].detailed_coverage`) leads to fail. The cause of exception is using `append()` method for a dict in `CoverItem` code:
```
coverage = {}
for child in self._children:
coverage.append(child.detailed_coverage)
```
|
0.0
|
9cdc3f0497398c4dc946f31f4a1cd5d6a8e66e9b
|
[
"tests/test_coverage/coverage_test.py::test_coveritem_detailed_coverage"
] |
[
"tests/test_coverage/coverage_test.py::test_simple_coverpoint",
"tests/test_coverage/coverage_test.py::test_coverpoint_in_class",
"tests/test_coverage/coverage_test.py::test_injective_coverpoint",
"tests/test_coverage/coverage_test.py::test_covercross",
"tests/test_coverage/coverage_test.py::test_at_least_and_weight",
"tests/test_coverage/coverage_test.py::test_callbacks",
"tests/test_coverage/coverage_test.py::test_xml_export",
"tests/test_coverage/coverage_test.py::test_covercheck",
"tests/test_coverage/coverage_test.py::test_print_coverage",
"tests/test_coverage/coverage_test.py::test_bins_labels",
"tests/test_coverage/coverage_test.py::test_tutorial_coverpoint_transition"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-01-04 17:10:31+00:00
|
bsd-2-clause
| 3,840 |
|
mcmtroffaes__sphinxcontrib-bibtex-260
|
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 4cc4127..e35f5c0 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -4,6 +4,12 @@
* Allow specific warnings to be suppressed (see issue #255, contributed by
stevenrhall).
+* Remove space between footnote and author for textual footnote citations in
+ the default foot referencing style.
+
+* Document how to use a backslash escaped space to suppress space before
+ footnotes (see issue #256, reported by hagenw).
+
2.3.0 (1 June 2021)
-------------------
diff --git a/README.rst b/README.rst
index c25e5e7..cf32819 100644
--- a/README.rst
+++ b/README.rst
@@ -108,17 +108,21 @@ represented by footnotes as follows:
.. code-block:: rest
See :footcite:t:`1987:nelson` for an introduction to non-standard analysis.
- Non-standard analysis is fun :footcite:p:`1987:nelson`.
+ Non-standard analysis is fun\ :footcite:p:`1987:nelson`.
.. footbibliography::
which will get rendered as:
-See Nelson [#Nel87b]_ for an introduction to non-standard analysis.
-Non-standard analysis is fun [#Nel87b]_.
+See Nelson\ [#Nel87b]_ for an introduction to non-standard analysis.
+Non-standard analysis is fun\ [#Nel87b]_.
.. [#Nel87b] Edward Nelson. *Radically Elementary Probability Theory*. Princeton University Press, 1987.
+Note the use of the
+`backslash escaped space <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#inline-markup>`_
+to suppress the space that would otherwise precede the footnote.
+
Typically, you have a single ``footbibliography`` directive
at the bottom of each document that has footnote citations.
Advanced use cases with multiple ``footbibliography`` directives
diff --git a/doc/usage.rst b/doc/usage.rst
index 031a130..f17d157 100644
--- a/doc/usage.rst
+++ b/doc/usage.rst
@@ -278,19 +278,23 @@ Roles and Directives
.. code-block:: rest
- We will make use of non-standard analysis :footcite:p:`1987:nelson`.
+ We will make use of non-standard analysis\ :footcite:p:`1987:nelson`.
which would be equivalent to the following LaTeX code:
.. code-block:: latex
- We will make use of non-standard analysis \footcite{1987:nelson}.
+ We will make use of non-standard analysis\footcite{1987:nelson}.
+
+ Note the use of the
+ `backslash escaped space <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#inline-markup>`_
+ to suppress the space that would otherwise precede the footnote.
As with all citation roles, multiple keys can be specified:
.. code-block:: rest
- I love analysis :footcite:p:`1987:nelson,2001:schechter`!
+ I love analysis\ :footcite:p:`1987:nelson,2001:schechter`!
.. rst:role:: footcite:t
diff --git a/src/sphinxcontrib/bibtex/style/referencing/foot.py b/src/sphinxcontrib/bibtex/style/referencing/foot.py
index 69e95b9..8431d12 100644
--- a/src/sphinxcontrib/bibtex/style/referencing/foot.py
+++ b/src/sphinxcontrib/bibtex/style/referencing/foot.py
@@ -22,7 +22,7 @@ class FootReferenceStyle(GroupReferenceStyle):
person: PersonStyle = PersonStyle()
#: Separator between text and reference for textual citations.
- text_reference_sep: Union["BaseText", str] = ' '
+ text_reference_sep: Union["BaseText", str] = ''
def __post_init__(self):
self.styles.extend([
|
mcmtroffaes/sphinxcontrib-bibtex
|
724897e30317df36f65e5568c988f260bf041e21
|
diff --git a/test/test_footcite.py b/test/test_footcite.py
index 1cff584..533a56d 100644
--- a/test/test_footcite.py
+++ b/test/test_footcite.py
@@ -15,22 +15,22 @@ def test_footcite_roles(app, warning) -> None:
tests = [
("p", " [1] "),
("ps", " [1] "),
- ("t", " de Du *et al.* [1] "),
- ("ts", " de Du, Em, and Fa [1] "),
- ("ct", " De Du *et al.* [1] "),
- ("cts", " De Du, Em, and Fa [1] "),
+ ("t", " de Du *et al.*[1] "),
+ ("ts", " de Du, Em, and Fa[1] "),
+ ("ct", " De Du *et al.*[1] "),
+ ("cts", " De Du, Em, and Fa[1] "),
("p", " [2][3] "),
("ps", " [2][3] "),
- ("t", " al Ap [2], Be and Ci [3] "),
- ("ts", " al Ap [2], Be and Ci [3] "),
- ("ct", " Al Ap [2], Be and Ci [3] "),
- ("cts", " Al Ap [2], Be and Ci [3] "),
+ ("t", " al Ap[2], Be and Ci[3] "),
+ ("ts", " al Ap[2], Be and Ci[3] "),
+ ("ct", " Al Ap[2], Be and Ci[3] "),
+ ("cts", " Al Ap[2], Be and Ci[3] "),
("p", " [4][5][6] "),
("ps", " [4][5][6] "),
- ("t", " Ge [4], Hu [5], Ix [6] "),
- ("ts", " Ge [4], Hu [5], Ix [6] "),
- ("ct", " Ge [4], Hu [5], Ix [6] "),
- ("cts", " Ge [4], Hu [5], Ix [6] "),
+ ("t", " Ge[4], Hu[5], Ix[6] "),
+ ("ts", " Ge[4], Hu[5], Ix[6] "),
+ ("ct", " Ge[4], Hu[5], Ix[6] "),
+ ("cts", " Ge[4], Hu[5], Ix[6] "),
]
for role, text in tests:
escaped_text = re.escape(text)
@@ -76,22 +76,22 @@ def test_footcite_style_custom(app, warning) -> None:
tests = [
("p", " [1] "),
("ps", " [1] "),
- ("t", " de Du et al [1] "),
- ("ts", " de Du & Em & Fa [1] "),
- ("ct", " De Du et al [1] "),
- ("cts", " De Du & Em & Fa [1] "),
+ ("t", " de Du et al[1] "),
+ ("ts", " de Du & Em & Fa[1] "),
+ ("ct", " De Du et al[1] "),
+ ("cts", " De Du & Em & Fa[1] "),
("p", " [2][3] "),
("ps", " [2][3] "),
- ("t", " al Ap [2]; Be & Ci [3] "),
- ("ts", " al Ap [2]; Be & Ci [3] "),
- ("ct", " Al Ap [2]; Be & Ci [3] "),
- ("cts", " Al Ap [2]; Be & Ci [3] "),
+ ("t", " al Ap[2]; Be & Ci[3] "),
+ ("ts", " al Ap[2]; Be & Ci[3] "),
+ ("ct", " Al Ap[2]; Be & Ci[3] "),
+ ("cts", " Al Ap[2]; Be & Ci[3] "),
("p", " [4][5][6] "),
("ps", " [4][5][6] "),
- ("t", " Ge [4]; Hu [5]; Ix [6] "),
- ("ts", " Ge [4]; Hu [5]; Ix [6] "),
- ("ct", " Ge [4]; Hu [5]; Ix [6] "),
- ("cts", " Ge [4]; Hu [5]; Ix [6] "),
+ ("t", " Ge[4]; Hu[5]; Ix[6] "),
+ ("ts", " Ge[4]; Hu[5]; Ix[6] "),
+ ("ct", " Ge[4]; Hu[5]; Ix[6] "),
+ ("cts", " Ge[4]; Hu[5]; Ix[6] "),
]
for role, text in tests:
escaped_text = re.escape(text)
|
Add a footnote citation at the end of a sentence
It is not uncommon to write a footnote in English like this:
> ... curve.¹
As I cannot write the role directly after the `.` in RST I can only write:
```rst
... curve. :footcite:`Name2021`
```
which produces

It should more look like this

Is there a trick to make this work?
|
0.0
|
724897e30317df36f65e5568c988f260bf041e21
|
[
"test/test_footcite.py::test_footcite_roles",
"test/test_footcite.py::test_footcite_style_custom"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-28 14:04:49+00:00
|
bsd-2-clause
| 3,841 |
|
mdickinson__pcgrandom-35
|
diff --git a/.travis.yml b/.travis.yml
index 5eb0243..11f1ed1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -9,7 +9,7 @@ install:
- pip install -r requirements.txt
- pip install -e .
script:
- - coverage run -m unittest discover -v
+ - coverage run --branch -m unittest discover -v
after_success:
- codecov
notifications:
diff --git a/pcgrandom/pcg_common.py b/pcgrandom/pcg_common.py
index def0378..228a3a6 100644
--- a/pcgrandom/pcg_common.py
+++ b/pcgrandom/pcg_common.py
@@ -19,14 +19,86 @@ from __future__ import division
import bisect as _bisect
import collections as _collections
+import hashlib as _hashlib
import operator as _operator
import os as _os
from builtins import int as _int, range as _range
+from past.builtins import unicode as _unicode
from pcgrandom.distributions import Distributions
+def seed_from_system_entropy(bits):
+ """
+ Create a new integer seed from whatever entropy we can find.
+
+ Parameters
+ ----------
+ bits : nonnegative integer
+ Number of bits we need.
+
+ Returns
+ -------
+ seed : integer
+ Integer seed in the range 0 <= seed < 2**bits.
+ """
+ numbytes, excess = -(-bits // 8), -bits % 8
+ seed = _int.from_bytes(_os.urandom(numbytes), byteorder="big")
+ return seed >> excess
+
+
+def seed_from_object(obj, bits):
+ """
+ Create a new integer seed from the given Python object, in
+ a reproducible manner.
+
+ Parameters
+ ----------
+ obj : Object
+ The object to use to create the seed.
+ bits : nonnegative integer.
+ Number of bits needed for the seed. This function can produce
+ a maximum of 512 bits from a Unicode or string object.
+
+ Returns
+ -------
+ seed : integer
+ Integer seed in the range 0 <= seed < 2**bits.
+ """
+ # From an integer-like.
+ try:
+ obj_as_integer = _operator.index(obj)
+ except TypeError:
+ pass
+ else:
+ seed_mask = ~(~0 << bits)
+ seed = obj_as_integer & seed_mask
+ return seed
+
+ # For a Unicode or byte string.
+ if isinstance(obj, _unicode):
+ obj = obj.encode('utf8')
+
+ if isinstance(obj, bytes):
+ obj_hash = _hashlib.sha512(obj).digest()
+ numbytes, excess = -(-bits // 8), -bits % 8
+
+ if numbytes > len(obj_hash):
+ raise ValueError(
+ "Cannot provide more than {} bits of seed.".format(
+ 8 * len(obj_hash)))
+
+ seed = _int.from_bytes(obj_hash[:numbytes], byteorder="big") >> excess
+ return seed
+
+ raise TypeError(
+ "Unable to create seed from object of type {}. "
+ "Please use an integer, bytestring or Unicode string.".format(
+ type(obj))
+ )
+
+
class PCGCommon(Distributions):
"""
Common base class for the PCG random generators.
@@ -61,12 +133,11 @@ class PCGCommon(Distributions):
"""Initialize internal state from hashable object.
"""
if seed is None:
- nbytes = self._state_bits // 8
- seed = _int.from_bytes(_os.urandom(nbytes), byteorder="little")
+ integer_seed = seed_from_system_entropy(self._state_bits)
else:
- seed = _operator.index(seed)
+ integer_seed = seed_from_object(seed, self._state_bits)
- self._set_state_from_seed(seed)
+ self._set_state_from_seed(integer_seed)
self.gauss_next = None
def getstate(self):
|
mdickinson/pcgrandom
|
100c80b6d7d1ad43d4ed6422b5afe6dc9227e064
|
diff --git a/pcgrandom/test/test_pcg_common.py b/pcgrandom/test/test_pcg_common.py
index c3f76ad..f28ffd1 100644
--- a/pcgrandom/test/test_pcg_common.py
+++ b/pcgrandom/test/test_pcg_common.py
@@ -21,6 +21,10 @@ import collections
import itertools
import math
import pickle
+import unittest
+
+from pcgrandom.pcg_common import seed_from_system_entropy, seed_from_object
+
# 99% values of the chi-squared statistic used in the goodness-of-fit tests
# below, indexed by degrees of freedom. Values calculated using
@@ -34,6 +38,25 @@ chisq_99percentile = {
}
+class TestSeedingFunctions(unittest.TestCase):
+ def test_seed_from_system_entropy_different(self):
+ seeds = [seed_from_system_entropy(bits=64) for _ in range(10)]
+ for seed in seeds:
+ self.assertEqual(seed % 2**64, seed)
+ self.assertEqual(len(seeds), len(set(seeds)))
+
+ def test_seed_from_object_large_bits(self):
+ with self.assertRaises(ValueError):
+ seed_from_object("some string or other", 513)
+ seed = seed_from_object("some string or other", 512)
+ self.assertGreater(seed.bit_length(), 500)
+ self.assertLessEqual(seed.bit_length(), 512)
+
+ def test_seed_from_object_bad_object_type(self):
+ with self.assertRaises(TypeError):
+ seed_from_object(3.4, 32)
+
+
class TestPCGCommon(object):
"""
Mixin class providing tests common to all generators in the
@@ -58,6 +81,18 @@ class TestPCGCommon(object):
self.assertNotEqual(gen1.getstate(), gen2.getstate())
self.assertEqual(gen1.getrandbits(64), gen3.getrandbits(64))
+ def test_seed_from_integer(self):
+ gen1 = self.gen_class(seed=17289)
+ gen2 = self.gen_class(seed=17289 + 2**self.gen_class._state_bits)
+ gen3 = self.gen_class(seed=17289 - 2**self.gen_class._state_bits)
+ self.assertEqual(gen1.getstate(), gen2.getstate())
+ self.assertEqual(gen1.getstate(), gen3.getstate())
+
+ def test_seed_from_bytes_and_unicode(self):
+ gen1 = self.gen_class(seed=b"your mother was a hamster")
+ gen2 = self.gen_class(seed=u"your mother was a hamster")
+ self.assertEqual(gen1.getstate(), gen2.getstate())
+
def test_sequence_default(self):
gen = self.gen_class(seed=12345)
self.assertEqual(gen._increment, gen._default_increment)
|
Revisit seeding
- While we definitely don't want to use Python hashes for seeding, it would still be nice to allow bytestrings and Unicode strings.
- It's possible that not all platforms will have `os.urandom`; we may need some kind of fallback.
|
0.0
|
100c80b6d7d1ad43d4ed6422b5afe6dc9227e064
|
[
"pcgrandom/test/test_pcg_common.py::TestSeedingFunctions::test_seed_from_object_bad_object_type",
"pcgrandom/test/test_pcg_common.py::TestSeedingFunctions::test_seed_from_object_large_bits",
"pcgrandom/test/test_pcg_common.py::TestSeedingFunctions::test_seed_from_system_entropy_different"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-07-29 17:27:25+00:00
|
apache-2.0
| 3,842 |
|
mdolab__baseclasses-38
|
diff --git a/baseclasses/utils.py b/baseclasses/utils.py
index a83a1e9..fa012ae 100644
--- a/baseclasses/utils.py
+++ b/baseclasses/utils.py
@@ -5,32 +5,83 @@ class CaseInsensitiveDict(dict):
create an instance where keys are not strings.
All common Python dictionary operations are supported, and additional operations
can be added easily.
+ In order to preserve capitalization on key initialization, the implementation relies on storing
+ a dictionary of mappings between the lowercase representation and the initial capitalization,
+ which is stored in self.map.
+ By looking up in these mappings, we can check any new keys against existing keys and compare them
+ in a case-insensitive fashion.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- # convert keys to lower case
- for k in list(self.keys()):
- v = super().pop(k)
- self.__setitem__(k, v)
+ self._updateMap()
+
+ def _updateMap(self):
+ """
+ This function updates self.map based on self.keys().
+ """
+ self.map = {k.lower(): k for k in self.keys()}
+
+ def _getKey(self, key):
+ """
+ This function checks if the input key already exists.
+ Note that this check is case insensitive
+
+ Parameters
+ ----------
+ key : str
+ the key to check
+
+ Returns
+ -------
+ str, None
+ Returns the original key if it exists. Otherwise returns None.
+ """
+ if key.lower() in self.map:
+ return self.map[key.lower()]
+ else:
+ return None
def __setitem__(self, key, value):
- super().__setitem__(key.lower(), value)
+ existingKey = self._getKey(key)
+ if existingKey:
+ key = existingKey
+ else:
+ self.map[key.lower()] = key
+ super().__setitem__(key, value)
def __getitem__(self, key):
- return super().__getitem__(key.lower())
+ existingKey = self._getKey(key)
+ if existingKey:
+ key = existingKey
+ return super().__getitem__(key)
def __contains__(self, key):
- return super().__contains__(key.lower())
+ return key.lower() in self.map.keys()
def __delitem__(self, key):
- super().__delitem__(key.lower())
+ existingKey = self._getKey(key)
+ if existingKey:
+ key = existingKey
+ self.map.pop(key.lower())
+ super().__delitem__(key)
def pop(self, key, *args, **kwargs):
- super().pop(key.lower(), *args, **kwargs)
+ existingKey = self._getKey(key)
+ if existingKey:
+ key = existingKey
+ self.map.pop(key.lower())
+ super().pop(key, *args, **kwargs)
def get(self, key, *args, **kwargs):
- return super().get(key.lower(), *args, **kwargs)
+ existingKey = self._getKey(key)
+ if existingKey:
+ key = existingKey
+ return super().get(key, *args, **kwargs)
+
+ def update(self, d, *args, **kwargs):
+ super().update(d, *args, **kwargs)
+ self._updateMap()
class CaseInsensitiveSet(set):
@@ -40,20 +91,78 @@ class CaseInsensitiveSet(set):
create an instance where elements are not strings.
All common Python set operations are supported, and additional operations
can be added easily.
+ In order to preserve capitalization on key initialization, the implementation relies on storing
+ a dictionary of mappings between the lowercase representation and the initial capitalization,
+ which is stored in self.map.
+ By looking up in these mappings, we can check any new keys against existing keys and compare them
+ in a case-insensitive fashion.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- # convert entries to lowe case
- for k in self:
- super().remove(k)
- self.add(k)
+ self._updateMap()
+
+ def _updateMap(self):
+ self.map = {k.lower(): k for k in list(self)}
+
+ def _getItem(self, item):
+ """
+ This function checks if the input item already exists.
+ Note that this check is case insensitive
+
+ Parameters
+ ----------
+ item : str
+ the item to check
+
+ Returns
+ -------
+ str, None
+ Returns the original item if it exists. Otherwise returns None.
+ """
+ if item.lower() in self.map:
+ return self.map[item.lower()]
+ else:
+ return None
def add(self, item):
- super().add(item.lower())
+ existingItem = self._getItem(item)
+ if existingItem:
+ item = existingItem
+ else:
+ self.map[item.lower()] = item
+ super().add(item)
+
+ def pop(self, item, *args, **kwargs):
+ existingItem = self._getItem(item)
+ if existingItem:
+ item = existingItem
+ self.map.pop(item.lower())
+ super().pop(item, *args, **kwargs)
+
+ def update(self, d, *args, **kwargs):
+ super().update(d, *args, **kwargs)
+ self._updateMap()
+
+ def issubset(self, other):
+ lowerSelf = set([s.lower() for s in self])
+ lowerOther = set([s.lower() for s in other])
+ return lowerSelf.issubset(lowerOther)
+
+ def remove(self, item):
+ existingItem = self._getItem(item)
+ if existingItem:
+ item = existingItem
+ self.map.pop(item.lower())
+ super().remove(item)
def __contains__(self, item):
- return super().__contains__(item.lower())
+ return item.lower() in self.map.keys()
+
+ def __eq__(self, other):
+ a = set([s.lower() for s in list(self)])
+ b = set([o.lower() for o in list(other)])
+ return a.__eq__(b)
class Error(Exception):
|
mdolab/baseclasses
|
6c82a993d67bf773c43757c16f288d035c362b80
|
diff --git a/tests/test_BaseSolver.py b/tests/test_BaseSolver.py
index af66b0c..10fe5e2 100644
--- a/tests/test_BaseSolver.py
+++ b/tests/test_BaseSolver.py
@@ -95,10 +95,10 @@ class TestOptions(unittest.TestCase):
# test Errors
with self.assertRaises(Error) as context:
solver.getOption("invalidOption") # test invalid option in getOption
- self.assertTrue("intoption" in context.exception.message) # check that intoption is offered as a suggestion
+ self.assertTrue("intOption" in context.exception.message) # check that intoption is offered as a suggestion
with self.assertRaises(Error) as context:
solver.setOption("invalidOption", 1) # test invalid option in setOption
- self.assertTrue("intoption" in context.exception.message) # check that intoption is offered as a suggestion
+ self.assertTrue("intOption" in context.exception.message) # check that intoption is offered as a suggestion
with self.assertRaises(Error):
solver.setOption("intOption", 4) # test value not in list
with self.assertRaises(Error):
diff --git a/tests/test_CaseInsensitve.py b/tests/test_CaseInsensitve.py
index 5fd85e1..ca2e5cc 100644
--- a/tests/test_CaseInsensitve.py
+++ b/tests/test_CaseInsensitve.py
@@ -13,16 +13,19 @@ class TestCaseInsensitiveClasses(unittest.TestCase):
# test __setitem__
d["OPTION1"] = value2
self.assertEqual(d["option1"], value2)
+ self.assertEqual("OPtion1", d.map["option1"])
# test __contains__
self.assertIn("Option1", d)
- d["option2"] = value1
+ d["optioN2"] = value1
self.assertEqual(len(d), 2)
self.assertIn("OPTION2", d)
+ # test original capitalization is preserved on new key
+ self.assertEqual("optioN2", d.map["option2"])
# test pop()
d.pop("Option2")
self.assertEqual(len(d), 1)
self.assertEqual(d.get("opTION1"), value2)
- self.assertEqual(list(d), ["option1"])
+ self.assertEqual(list(d), ["OPtion1"])
d2 = CaseInsensitiveDict({"OPTION3": value3})
# test update()
d.update(d2)
@@ -33,8 +36,12 @@ class TestCaseInsensitiveClasses(unittest.TestCase):
# test __contains__ and add()
s = CaseInsensitiveSet({"Option1"})
self.assertIn("OPTION1", s)
+ # test original capitalization is preserved on initialization
+ self.assertEqual("Option1", s.map["option1"])
s.add("OPTION2")
self.assertIn("option2", s)
+ # test original capitalization is preserved on new item
+ self.assertEqual("OPTION2", s.map["option2"])
# test update()
s2 = CaseInsensitiveSet({"OPTION2", "opTION3"})
s.update(s2)
|
Update `caseInsensitveDict` to preserve initial capitalization
# Description
Currently, keys are cast to lower case on initialization and subsequent addition. We can instead keep those as they are, and just change the string comparison so that they are case insensitive. We should do the same thing for sets also.
|
0.0
|
6c82a993d67bf773c43757c16f288d035c362b80
|
[
"tests/test_BaseSolver.py::TestOptions::test_options",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveClasses::test_dict",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveClasses::test_set"
] |
[
"tests/test_BaseSolver.py::TestOptions::test_caseSensitive",
"tests/test_BaseSolver.py::TestOptions::test_checkDefaultOptions",
"tests/test_BaseSolver.py::TestComm::test_comm_without_mpi",
"tests/test_BaseSolver.py::TestInforms::test_informs"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-01-23 01:48:37+00:00
|
apache-2.0
| 3,843 |
|
mdolab__baseclasses-54
|
diff --git a/baseclasses/utils.py b/baseclasses/utils.py
index 61a8044..598b6fb 100644
--- a/baseclasses/utils.py
+++ b/baseclasses/utils.py
@@ -1,5 +1,6 @@
from collections.abc import MutableMapping, MutableSet
from typing import Any, Dict, Optional
+from pprint import pformat
class CaseInsensitiveDict(MutableMapping):
@@ -93,7 +94,7 @@ class CaseInsensitiveDict(MutableMapping):
return selfLower.__eq__(otherLower)
def __repr__(self):
- return str(self.data)
+ return pformat(self.data)
class CaseInsensitiveSet(MutableSet):
@@ -202,7 +203,7 @@ class CaseInsensitiveSet(MutableSet):
return lowerSelf.issubset(lowerOther)
def __repr__(self):
- return str(self.data)
+ return pformat(self.data)
class Error(Exception):
|
mdolab/baseclasses
|
df6d61b78d13d1ad792c6fedab0a82fa100a3dc0
|
diff --git a/tests/test_CaseInsensitve.py b/tests/test_CaseInsensitve.py
index 5b2b0d0..38931c0 100644
--- a/tests/test_CaseInsensitve.py
+++ b/tests/test_CaseInsensitve.py
@@ -1,5 +1,6 @@
import unittest
import pickle
+from pprint import pformat
from baseclasses.utils import CaseInsensitiveDict, CaseInsensitiveSet
from parameterized import parameterized
from baseclasses.decorators import require_mpi
@@ -128,6 +129,14 @@ class TestCaseInsensitiveDict(unittest.TestCase):
def test_repr(self):
self.assertEqual(self.d2.__str__(), self.d2.data.__str__())
+ def test_repr_pprint(self):
+ long_dict = {"b-longstring": 2, "a-longstring": 1, "c-longstring": 3, "e-longstring": 5, "d-longstring": 4}
+ string_format = pformat(CaseInsensitiveDict(long_dict))
+ string_expected = (
+ "{'a-longstring': 1,\n 'b-longstring': 2,\n 'c-longstring': 3,\n 'd-longstring': 4,\n 'e-longstring': 5}"
+ )
+ self.assertEqual(string_format, string_expected)
+
class TestCaseInsensitiveSet(unittest.TestCase):
def setUp(self):
@@ -225,6 +234,14 @@ class TestCaseInsensitiveSet(unittest.TestCase):
def test_repr(self):
self.assertEqual(self.s2.__str__(), self.s2.data.__str__())
+ def test_repr_pprint(self):
+ long_set = {"a-longstring", "b-longstring", "c-longstring", "d-longstring", "e-longstring", "f-longstring"}
+ string_format = pformat(CaseInsensitiveSet(long_set))
+ string_expected = (
+ "{'a-longstring',\n 'b-longstring',\n 'c-longstring',\n 'd-longstring',\n 'e-longstring',\n 'f-longstring'}"
+ )
+ self.assertEqual(string_format, string_expected)
+
class TestParallel(unittest.TestCase):
N_PROCS = 2
|
CaseInsensitive containers do not work with pprint
## Description
The `CaseInsensitive` containers are not properly formatted when printed with `pprint`. This bug was introduced with the ABC implementation in #51. I realized this because the ADflow options dictionary is now printed in one massive line instead of having line breaks. The same bug applies to both `CaseInsensitiveDict` and `CaseInsensitiveSet`.
### Steps to reproduce issue
Here is a MWE for `CaseInsensitiveDict`:
```
from baseclasses.utils import CaseInsensitiveDict
from pprint import pprint
sensitive = {"b-longstring": 2, "a-longstring": 1, "c-longstring": 3, "e-longstring": 5, "d-longstring": 4}
insensitive = CaseInsensitiveDict(sensitive)
pprint(sensitive)
pprint(insensitive)
```
### Current behavior
```
>>> pprint(sensitive)
{'a-longstring': 1,
'b-longstring': 2,
'c-longstring': 3,
'd-longstring': 4,
'e-longstring': 5}
>>> pprint(insensitive)
{'b-longstring': 2, 'a-longstring': 1, 'c-longstring': 3, 'e-longstring': 5, 'd-longstring': 4}
```
### Expected behavior
```
>>> pprint(sensitive)
{'a-longstring': 1,
'b-longstring': 2,
'c-longstring': 3,
'd-longstring': 4,
'e-longstring': 5}
>>> pprint(insensitive)
{'a-longstring': 1,
'b-longstring': 2,
'c-longstring': 3,
'd-longstring': 4,
'e-longstring': 5}
```
|
0.0
|
df6d61b78d13d1ad792c6fedab0a82fa100a3dc0
|
[
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_repr_pprint",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_repr_pprint"
] |
[
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_contains",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_del",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_empty_init",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_equal",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_get",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_invalid_init",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_items",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_iter",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_keys",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_len",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_pickle",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_pop",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_repr",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_set",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_update",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_update_dict_with_regular_dict",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_update_regular_dict_with_dict",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveDict::test_values",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_add_contains",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_empty_init",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_equal",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_invalid_init",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_iter",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_len",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_pickle",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_remove",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_repr",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_subsets",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_union",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_update",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_update_regular_set_with_set",
"tests/test_CaseInsensitve.py::TestCaseInsensitiveSet::test_update_with_regular_set"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2021-05-06 16:59:43+00:00
|
apache-2.0
| 3,844 |
|
mdowds__python-mock-firestore-13
|
diff --git a/mockfirestore/__init__.py b/mockfirestore/__init__.py
index 5636753..a7f18de 100644
--- a/mockfirestore/__init__.py
+++ b/mockfirestore/__init__.py
@@ -12,3 +12,4 @@ from mockfirestore.document import DocumentSnapshot, DocumentReference
from mockfirestore.collection import CollectionReference
from mockfirestore.query import Query
from mockfirestore._helpers import Timestamp
+from mockfirestore.transaction import Transaction
diff --git a/mockfirestore/client.py b/mockfirestore/client.py
index c66533a..50bbbb4 100644
--- a/mockfirestore/client.py
+++ b/mockfirestore/client.py
@@ -1,16 +1,7 @@
+from typing import Iterable
from mockfirestore.collection import CollectionReference
-
-
-
-
-
-
-
-
-
-
-
-
+from mockfirestore.document import DocumentReference, DocumentSnapshot
+from mockfirestore.transaction import Transaction
class MockFirestore:
@@ -26,5 +17,13 @@ class MockFirestore:
def reset(self):
self._data = {}
+ def get_all(self, references: Iterable[DocumentReference],
+ field_paths=None,
+ transaction=None) -> Iterable[DocumentSnapshot]:
+ for doc_ref in set(references):
+ yield doc_ref.get()
+
+ def transaction(self, **kwargs) -> Transaction:
+ return Transaction(self, **kwargs)
diff --git a/mockfirestore/document.py b/mockfirestore/document.py
index ccd3ba7..657143c 100644
--- a/mockfirestore/document.py
+++ b/mockfirestore/document.py
@@ -27,6 +27,15 @@ class DocumentSnapshot:
timestamp = Timestamp.from_now()
return timestamp
+ @property
+ def update_time(self) -> Timestamp:
+ return self.create_time
+
+ @property
+ def read_time(self) -> Timestamp:
+ timestamp = Timestamp.from_now()
+ return timestamp
+
def get(self, field_path: str) -> Any:
if not self.exists:
return None
diff --git a/mockfirestore/query.py b/mockfirestore/query.py
index 2e94f07..7f32e25 100644
--- a/mockfirestore/query.py
+++ b/mockfirestore/query.py
@@ -24,7 +24,7 @@ class Query:
for field_filter in field_filters:
self._add_field_filter(*field_filter)
- def stream(self) -> Iterator[DocumentSnapshot]:
+ def stream(self, transaction=None) -> Iterator[DocumentSnapshot]:
doc_snapshots = self.parent.stream()
for field, compare, value in self._field_filters:
diff --git a/mockfirestore/transaction.py b/mockfirestore/transaction.py
new file mode 100644
index 0000000..7f06d2d
--- /dev/null
+++ b/mockfirestore/transaction.py
@@ -0,0 +1,119 @@
+from functools import partial
+import random
+from typing import Iterable, Callable
+from mockfirestore._helpers import generate_random_string, Timestamp
+from mockfirestore.document import DocumentReference, DocumentSnapshot
+from mockfirestore.query import Query
+
+MAX_ATTEMPTS = 5
+_MISSING_ID_TEMPLATE = "The transaction has no transaction ID, so it cannot be {}."
+_CANT_BEGIN = "The transaction has already begun. Current transaction ID: {!r}."
+_CANT_ROLLBACK = _MISSING_ID_TEMPLATE.format("rolled back")
+_CANT_COMMIT = _MISSING_ID_TEMPLATE.format("committed")
+
+
+class WriteResult:
+ def __init__(self):
+ self.update_time = Timestamp.from_now()
+
+
+class Transaction:
+ """
+ This mostly follows the model from
+ https://googleapis.dev/python/firestore/latest/transaction.html
+ """
+ def __init__(self, client,
+ max_attempts=MAX_ATTEMPTS, read_only=False):
+ self._client = client
+ self._max_attempts = max_attempts
+ self._read_only = read_only
+ self._id = None
+ self._write_ops = []
+ self.write_results = None
+
+ @property
+ def in_progress(self):
+ return self._id is not None
+
+ @property
+ def id(self):
+ return self._id
+
+ def _begin(self, retry_id=None):
+ # generate a random ID to set the transaction as in_progress
+ self._id = generate_random_string()
+
+ def _clean_up(self):
+ self._write_ops.clear()
+ self._id = None
+
+ def _rollback(self):
+ if not self.in_progress:
+ raise ValueError(_CANT_ROLLBACK)
+
+ self._clean_up()
+
+ def _commit(self) -> Iterable[WriteResult]:
+ if not self.in_progress:
+ raise ValueError(_CANT_COMMIT)
+
+ results = []
+ for write_op in self._write_ops:
+ write_op()
+ results.append(WriteResult())
+ self.write_results = results
+ self._clean_up()
+ return results
+
+ def get_all(self,
+ references: Iterable[DocumentReference]) -> Iterable[DocumentSnapshot]:
+ return self._client.get_all(references)
+
+ def get(self, ref_or_query) -> Iterable[DocumentSnapshot]:
+ if isinstance(ref_or_query, DocumentReference):
+ return self._client.get_all([ref_or_query])
+ elif isinstance(ref_or_query, Query):
+ return ref_or_query.stream()
+ else:
+ raise ValueError(
+ 'Value for argument "ref_or_query" must be a DocumentReference or a Query.'
+ )
+
+ # methods from
+ # https://googleapis.dev/python/firestore/latest/batch.html#google.cloud.firestore_v1.batch.WriteBatch
+
+ def _add_write_op(self, write_op: Callable):
+ if self._read_only:
+ raise ValueError(
+ "Cannot perform write operation in read-only transaction."
+ )
+ self._write_ops.append(write_op)
+
+ def create(self, reference: DocumentReference, document_data):
+ # this is a no-op, because if we have a DocumentReference
+ # it's already in the MockFirestore
+ ...
+
+ def set(self, reference: DocumentReference, document_data: dict,
+ merge=False):
+ write_op = partial(reference.set, document_data, merge=merge)
+ self._add_write_op(write_op)
+
+ def update(self, reference: DocumentReference,
+ field_updates: dict, option=None):
+ write_op = partial(reference.update, field_updates)
+ self._add_write_op(write_op)
+
+ def delete(self, reference: DocumentReference, option=None):
+ write_op = reference.delete
+ self._add_write_op(write_op)
+
+ def commit(self):
+ return self._commit()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if exc_type is None:
+ self.commit()
|
mdowds/python-mock-firestore
|
f79c9b0034a39678be3ea1222c648d153f0d8206
|
diff --git a/tests/test_document_snapshot.py b/tests/test_document_snapshot.py
index 7d60e4b..896f45b 100644
--- a/tests/test_document_snapshot.py
+++ b/tests/test_document_snapshot.py
@@ -46,6 +46,38 @@ class TestDocumentSnapshot(TestCase):
doc_snapshot = doc_ref.get()
self.assertIs(doc_ref, doc_snapshot.reference)
+ def test_documentSnapshot_id(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1}
+ }}
+ doc = fs.collection('foo').document('first').get()
+ self.assertIsInstance(doc.id, str)
+
+ def test_documentSnapshot_create_time(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1}
+ }}
+ doc = fs.collection('foo').document('first').get()
+ self.assertIsNotNone(doc.create_time)
+
+ def test_documentSnapshot_update_time(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1}
+ }}
+ doc = fs.collection('foo').document('first').get()
+ self.assertIsNotNone(doc.update_time)
+
+ def test_documentSnapshot_read_time(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1}
+ }}
+ doc = fs.collection('foo').document('first').get()
+ self.assertIsNotNone(doc.read_time)
+
def test_documentSnapshot_get_by_existing_field_path(self):
fs = MockFirestore()
fs._data = {'foo': {
diff --git a/tests/test_mock_client.py b/tests/test_mock_client.py
new file mode 100644
index 0000000..227c5c7
--- /dev/null
+++ b/tests/test_mock_client.py
@@ -0,0 +1,19 @@
+from unittest import TestCase
+
+from mockfirestore import MockFirestore
+
+
+class TestMockFirestore(TestCase):
+ def test_client_get_all(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1},
+ 'second': {'id': 2}
+ }}
+ doc = fs.collection('foo').document('first')
+ results = list(fs.get_all([doc]))
+ returned_doc_snapshot = results[0].to_dict()
+ expected_doc_snapshot = doc.get().to_dict()
+ self.assertEqual(returned_doc_snapshot, expected_doc_snapshot)
+
+
diff --git a/tests/test_transaction.py b/tests/test_transaction.py
new file mode 100644
index 0000000..72031fa
--- /dev/null
+++ b/tests/test_transaction.py
@@ -0,0 +1,73 @@
+from unittest import TestCase
+from mockfirestore import MockFirestore, Transaction
+
+
+class TestTransaction(TestCase):
+ def setUp(self) -> None:
+ self.fs = MockFirestore()
+ self.fs._data = {'foo': {
+ 'first': {'id': 1},
+ 'second': {'id': 2}
+ }}
+
+ def test_transaction_getAll(self):
+ with Transaction(self.fs) as transaction:
+ transaction._begin()
+ docs = [self.fs.collection('foo').document(doc_name)
+ for doc_name in self.fs._data['foo']]
+ results = list(transaction.get_all(docs))
+ returned_docs_snapshots = [result.to_dict() for result in results]
+ expected_doc_snapshots = [doc.get().to_dict() for doc in docs]
+ for expected_snapshot in expected_doc_snapshots:
+ self.assertIn(expected_snapshot, returned_docs_snapshots)
+
+ def test_transaction_getDocument(self):
+ with Transaction(self.fs) as transaction:
+ transaction._begin()
+ doc = self.fs.collection('foo').document('first')
+ returned_doc = next(transaction.get(doc))
+ self.assertEqual(doc.get().to_dict(), returned_doc.to_dict())
+
+ def test_transaction_getQuery(self):
+ with Transaction(self.fs) as transaction:
+ transaction._begin()
+ query = self.fs.collection('foo').order_by('id')
+ returned_docs = [doc.to_dict() for doc in transaction.get(query)]
+ query = self.fs.collection('foo').order_by('id')
+ expected_docs = [doc.to_dict() for doc in query.stream()]
+ self.assertEqual(returned_docs, expected_docs)
+
+ def test_transaction_set_setsContentOfDocument(self):
+ doc_content = {'id': '3'}
+ doc_ref = self.fs.collection('foo').document('third')
+ with Transaction(self.fs) as transaction:
+ transaction._begin()
+ transaction.set(doc_ref, doc_content)
+ self.assertEqual(doc_ref.get().to_dict(), doc_content)
+
+ def test_transaction_set_mergeNewValue(self):
+ doc = self.fs.collection('foo').document('first')
+ with Transaction(self.fs) as transaction:
+ transaction._begin()
+ transaction.set(doc, {'updated': True}, merge=True)
+ updated_doc = {'id': 1, 'updated': True}
+ self.assertEqual(doc.get().to_dict(), updated_doc)
+
+ def test_transaction_update_changeExistingValue(self):
+ doc = self.fs.collection('foo').document('first')
+ with Transaction(self.fs) as transaction:
+ transaction._begin()
+ transaction.update(doc, {'updated': False})
+ updated_doc = {'id': 1, 'updated': False}
+ self.assertEqual(doc.get().to_dict(), updated_doc)
+
+ def test_transaction_delete_documentDoesNotExistAfterDelete(self):
+ doc = self.fs.collection('foo').document('first')
+ with Transaction(self.fs) as transaction:
+ transaction._begin()
+ transaction.delete(doc)
+ doc = self.fs.collection('foo').document('first').get()
+ self.assertEqual(False, doc.exists)
+
+
+
|
DocumentSnapshot.id missing
The Google Firestore client has an id property available on DocumentSnapshot. id seems to be set regardless of whether the document exists or not.
`id = client.collection('test').document('zzz').get().id`
id should be `'zzz'` in this example, but with mock-firestore, this raises `AttributeError: 'DocumentSnapshot' object has no attribute 'id'`.
|
0.0
|
f79c9b0034a39678be3ea1222c648d153f0d8206
|
[
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_create_time",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_exists",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_exists_documentDoesNotExist",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_get_by_existing_field_path",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_get_by_non_existing_field_path",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_get_in_an_non_existing_document",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_get_returns_a_copy_of_the_data_stored",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_id",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_read_time",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_reference",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_toDict",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_toDict_isolation",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_update_time",
"tests/test_mock_client.py::TestMockFirestore::test_client_get_all",
"tests/test_transaction.py::TestTransaction::test_transaction_delete_documentDoesNotExistAfterDelete",
"tests/test_transaction.py::TestTransaction::test_transaction_getAll",
"tests/test_transaction.py::TestTransaction::test_transaction_getDocument",
"tests/test_transaction.py::TestTransaction::test_transaction_getQuery",
"tests/test_transaction.py::TestTransaction::test_transaction_set_mergeNewValue",
"tests/test_transaction.py::TestTransaction::test_transaction_set_setsContentOfDocument",
"tests/test_transaction.py::TestTransaction::test_transaction_update_changeExistingValue"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-10-30 19:54:14+00:00
|
mit
| 3,845 |
|
mdowds__python-mock-firestore-28
|
diff --git a/mockfirestore/document.py b/mockfirestore/document.py
index 24c8a68..ccd3ba7 100644
--- a/mockfirestore/document.py
+++ b/mockfirestore/document.py
@@ -27,9 +27,15 @@ class DocumentSnapshot:
timestamp = Timestamp.from_now()
return timestamp
+ def get(self, field_path: str) -> Any:
+ if not self.exists:
+ return None
+ else:
+ return reduce(operator.getitem, field_path.split('.'), self._doc)
+
def _get_by_field_path(self, field_path: str) -> Any:
try:
- return reduce(operator.getitem, field_path.split('.'), self._doc)
+ return self.get(field_path)
except KeyError:
return None
|
mdowds/python-mock-firestore
|
3fe61b53ad0972d12ffc17e0bd22d262ec361789
|
diff --git a/tests/test_document_snapshot.py b/tests/test_document_snapshot.py
index 19cce06..7d60e4b 100644
--- a/tests/test_document_snapshot.py
+++ b/tests/test_document_snapshot.py
@@ -46,3 +46,45 @@ class TestDocumentSnapshot(TestCase):
doc_snapshot = doc_ref.get()
self.assertIs(doc_ref, doc_snapshot.reference)
+ def test_documentSnapshot_get_by_existing_field_path(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1, 'contact': {
+ 'email': '[email protected]'
+ }}
+ }}
+ doc = fs.collection('foo').document('first').get()
+ self.assertEqual(doc.get('contact.email'), '[email protected]')
+
+ def test_documentSnapshot_get_by_non_existing_field_path(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1, 'contact': {
+ 'email': '[email protected]'
+ }}
+ }}
+ doc = fs.collection('foo').document('first').get()
+ with self.assertRaises(KeyError):
+ doc.get('contact.phone')
+
+ def test_documentSnapshot_get_in_an_non_existing_document(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1, 'contact': {
+ 'email': '[email protected]'
+ }}
+ }}
+ doc = fs.collection('foo').document('second').get()
+ self.assertIsNone(doc.get('contact.email'))
+
+ def test_documentSnapshot_get_returns_a_copy_of_the_data_stored(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1, 'contact': {
+ 'email': '[email protected]'
+ }}
+ }}
+ doc = fs.collection('foo').document('first').get()
+ self.assertIsNot(
+ doc.get('contact'),fs._data['foo']['first']['contact']
+ )
|
DocumentSnapshot.get() missing
The DocumentSnapshot has a get function, which seems to be missing here. Large sections of our source code uses the get function. It would be great if this could be added.
|
0.0
|
3fe61b53ad0972d12ffc17e0bd22d262ec361789
|
[
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_get_by_existing_field_path",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_get_by_non_existing_field_path",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_get_in_an_non_existing_document",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_get_returns_a_copy_of_the_data_stored"
] |
[
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_exists",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_exists_documentDoesNotExist",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_reference",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_toDict",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_toDict_isolation"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-02 20:13:29+00:00
|
mit
| 3,846 |
|
mdowds__python-mock-firestore-32
|
diff --git a/mockfirestore/document.py b/mockfirestore/document.py
index 24c8a68..ccd3ba7 100644
--- a/mockfirestore/document.py
+++ b/mockfirestore/document.py
@@ -27,9 +27,15 @@ class DocumentSnapshot:
timestamp = Timestamp.from_now()
return timestamp
+ def get(self, field_path: str) -> Any:
+ if not self.exists:
+ return None
+ else:
+ return reduce(operator.getitem, field_path.split('.'), self._doc)
+
def _get_by_field_path(self, field_path: str) -> Any:
try:
- return reduce(operator.getitem, field_path.split('.'), self._doc)
+ return self.get(field_path)
except KeyError:
return None
diff --git a/mockfirestore/query.py b/mockfirestore/query.py
index a17c12f..354df4b 100644
--- a/mockfirestore/query.py
+++ b/mockfirestore/query.py
@@ -103,7 +103,7 @@ class Query:
index = idx
else:
index = None
- if index:
+ if index is not None:
if before and start:
return islice(docs, index, None, None)
elif not before and start:
|
mdowds/python-mock-firestore
|
3fe61b53ad0972d12ffc17e0bd22d262ec361789
|
diff --git a/tests/test_collection_reference.py b/tests/test_collection_reference.py
index 0eeadaf..5507811 100644
--- a/tests/test_collection_reference.py
+++ b/tests/test_collection_reference.py
@@ -232,7 +232,7 @@ class TestCollectionReference(TestCase):
docs = list(fs.collection('foo').start_at({'id': 2}).stream())
self.assertEqual({'id': 2}, docs[0].to_dict())
self.assertEqual(2, len(docs))
-
+
def test_collection_start_at_order_by(self):
fs = MockFirestore()
fs._data = {'foo': {
@@ -251,9 +251,9 @@ class TestCollectionReference(TestCase):
'second': {'id': 2},
'third': {'id': 3}
}}
- docs = list(fs.collection('foo').start_after({'id': 2}).stream())
- self.assertEqual({'id': 3}, docs[0].to_dict())
- self.assertEqual(1, len(docs))
+ docs = list(fs.collection('foo').start_after({'id': 1}).stream())
+ self.assertEqual({'id': 2}, docs[0].to_dict())
+ self.assertEqual(2, len(docs))
def test_collection_start_after_order_by(self):
fs = MockFirestore()
@@ -298,7 +298,7 @@ class TestCollectionReference(TestCase):
docs = list(fs.collection('foo').end_at({'id': 2}).stream())
self.assertEqual({'id': 2}, docs[1].to_dict())
self.assertEqual(2, len(docs))
-
+
def test_collection_end_at_order_by(self):
fs = MockFirestore()
fs._data = {'foo': {
diff --git a/tests/test_document_snapshot.py b/tests/test_document_snapshot.py
index 19cce06..7d60e4b 100644
--- a/tests/test_document_snapshot.py
+++ b/tests/test_document_snapshot.py
@@ -46,3 +46,45 @@ class TestDocumentSnapshot(TestCase):
doc_snapshot = doc_ref.get()
self.assertIs(doc_ref, doc_snapshot.reference)
+ def test_documentSnapshot_get_by_existing_field_path(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1, 'contact': {
+ 'email': '[email protected]'
+ }}
+ }}
+ doc = fs.collection('foo').document('first').get()
+ self.assertEqual(doc.get('contact.email'), '[email protected]')
+
+ def test_documentSnapshot_get_by_non_existing_field_path(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1, 'contact': {
+ 'email': '[email protected]'
+ }}
+ }}
+ doc = fs.collection('foo').document('first').get()
+ with self.assertRaises(KeyError):
+ doc.get('contact.phone')
+
+ def test_documentSnapshot_get_in_an_non_existing_document(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1, 'contact': {
+ 'email': '[email protected]'
+ }}
+ }}
+ doc = fs.collection('foo').document('second').get()
+ self.assertIsNone(doc.get('contact.email'))
+
+ def test_documentSnapshot_get_returns_a_copy_of_the_data_stored(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1, 'contact': {
+ 'email': '[email protected]'
+ }}
+ }}
+ doc = fs.collection('foo').document('first').get()
+ self.assertIsNot(
+ doc.get('contact'),fs._data['foo']['first']['contact']
+ )
|
Query._apply_cursor is not working for first element
Hello,
Got problem with `start_after` and after some digging I found that `Query._apply_cursor` is not working properly when first doc is passed.
Let's take a look at problematic fragment:
```
for idx, doc in enumerate(doc_snapshot):
index = None
for k, v in document_fields.items():
if doc.to_dict().get(k, None) == v:
index = idx
else:
index = None
if index:
if before and start:
return islice(docs, index, None, None)
...
```
We search for index of matching document in inner `for` loop but if found index equals 0 it won't go inside `if index:`
Following test will fail on current version:
```
def test_collection_start_after(self):
fs = MockFirestore()
fs._data = {'foo': {
'first': {'id': 1},
'second': {'id': 2},
'third': {'id': 3}
}}
docs = list(fs.collection('foo').start_after({'id': 1}).stream())
self.assertEqual({'id': 2}, docs[0].to_dict())
self.assertEqual(2, len(docs))
```
Solution:
In `_apply_cursor` add explicit check for None
`if index is not None:`
|
0.0
|
3fe61b53ad0972d12ffc17e0bd22d262ec361789
|
[
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_after",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_get_by_existing_field_path",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_get_by_non_existing_field_path",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_get_in_an_non_existing_document",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_get_returns_a_copy_of_the_data_stored"
] |
[
"tests/test_collection_reference.py::TestCollectionReference::test_collection_addDocument",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_at",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_at_order_by",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_before",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_before_order_by",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_collectionDoesNotExist",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_nestedCollection",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_nestedCollection_collectionDoesNotExist",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_ordersByAscendingDocumentId_byDefault",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_returnsDocuments",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_limit",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_limitAndOrderBy",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_listDocuments",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_offset",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_orderBy",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_orderBy_descending",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_orderby_offset",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_parent",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_after_order_by",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_at",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_at_order_by",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_stream",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereArrayContains",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereArrayContainsAny",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereEquals",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereGreaterThan",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereGreaterThanOrEqual",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereIn",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereLessThan",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereLessThanOrEqual",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereMissingField",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereNestedField",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_exists",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_exists_documentDoesNotExist",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_reference",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_toDict",
"tests/test_document_snapshot.py::TestDocumentSnapshot::test_documentSnapshot_toDict_isolation"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-27 14:32:56+00:00
|
mit
| 3,847 |
|
mdowds__python-mock-firestore-34
|
diff --git a/mockfirestore/query.py b/mockfirestore/query.py
index 354df4b..2e94f07 100644
--- a/mockfirestore/query.py
+++ b/mockfirestore/query.py
@@ -103,6 +103,7 @@ class Query:
index = idx
else:
index = None
+ break
if index is not None:
if before and start:
return islice(docs, index, None, None)
|
mdowds/python-mock-firestore
|
2e1ebb817ba98a5e34700940bdc285bf4c318662
|
diff --git a/tests/test_collection_reference.py b/tests/test_collection_reference.py
index 5507811..361e3e0 100644
--- a/tests/test_collection_reference.py
+++ b/tests/test_collection_reference.py
@@ -255,6 +255,18 @@ class TestCollectionReference(TestCase):
self.assertEqual({'id': 2}, docs[0].to_dict())
self.assertEqual(2, len(docs))
+ def test_collection_start_after_similar_objects(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1, 'value': 1},
+ 'second': {'id': 2, 'value': 2},
+ 'third': {'id': 3, 'value': 2},
+ 'fourth': {'id': 4, 'value': 3}
+ }}
+ docs = list(fs.collection('foo').order_by('id').start_after({'id': 3, 'value': 2}).stream())
+ self.assertEqual({'id': 4, 'value': 3}, docs[0].to_dict())
+ self.assertEqual(1, len(docs))
+
def test_collection_start_after_order_by(self):
fs = MockFirestore()
fs._data = {'foo': {
|
Query._apply_cursor is not working for similar objects
Following test will fail:
```
def test_collection_start_after_similar_objects(self):
fs = MockFirestore()
fs._data = {'foo': {
'first': {'id': 1, 'value': 1},
'second': {'id': 2, 'value': 2},
'third': {'id': 3, 'value': 2},
'fourth': {'id': 4, 'value': 3}
}}
docs = list(fs.collection('foo').order_by('id').start_after({'id': 3, 'value': 2}).stream())
self.assertEqual({'id': 4, 'value': 3}, docs[0].to_dict())
self.assertEqual(1, len(docs))
```
Only last key in `document_fields` decides if index is set or not because of missing `brake` in search-compare loop inside `Query._apply_cursor`:
```
for idx, doc in enumerate(doc_snapshot):
index = None
for k, v in document_fields.items():
if doc.to_dict().get(k, None) == v:
index = idx
else:
index = None
# missing break
if index:
...
```
|
0.0
|
2e1ebb817ba98a5e34700940bdc285bf4c318662
|
[
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_after_similar_objects"
] |
[
"tests/test_collection_reference.py::TestCollectionReference::test_collection_addDocument",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_at",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_at_order_by",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_before",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_before_order_by",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_collectionDoesNotExist",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_nestedCollection",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_nestedCollection_collectionDoesNotExist",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_ordersByAscendingDocumentId_byDefault",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_returnsDocuments",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_limit",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_limitAndOrderBy",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_listDocuments",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_offset",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_orderBy",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_orderBy_descending",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_orderby_offset",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_parent",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_after",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_after_order_by",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_at",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_at_order_by",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_stream",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereArrayContains",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereArrayContainsAny",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereEquals",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereGreaterThan",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereGreaterThanOrEqual",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereIn",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereLessThan",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereLessThanOrEqual",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereMissingField",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereNestedField"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-08-27 15:29:11+00:00
|
mit
| 3,848 |
|
mdowds__python-mock-firestore-44
|
diff --git a/README.md b/README.md
index f4a7906..ea92013 100644
--- a/README.md
+++ b/README.md
@@ -31,6 +31,7 @@ mock_db.reset()
mock_db = MockFirestore()
# Collections
+mock_db.collections()
mock_db.collection('users')
mock_db.collection('users').get()
mock_db.collection('users').list_documents()
diff --git a/mockfirestore/client.py b/mockfirestore/client.py
index 07fd398..75943bd 100644
--- a/mockfirestore/client.py
+++ b/mockfirestore/client.py
@@ -1,4 +1,4 @@
-from typing import Iterable
+from typing import Iterable, Sequence
from mockfirestore.collection import CollectionReference
from mockfirestore.document import DocumentReference, DocumentSnapshot
from mockfirestore.transaction import Transaction
@@ -44,6 +44,9 @@ class MockFirestore:
self._data[name] = {}
return CollectionReference(self._data, [name])
+ def collections(self) -> Sequence[CollectionReference]:
+ return [CollectionReference(self._data, [collection_name]) for collection_name in self._data]
+
def reset(self):
self._data = {}
diff --git a/mockfirestore/query.py b/mockfirestore/query.py
index 4761a92..7a4618d 100644
--- a/mockfirestore/query.py
+++ b/mockfirestore/query.py
@@ -121,6 +121,8 @@ class Query:
def _compare_func(self, op: str) -> Callable[[T, T], bool]:
if op == '==':
return lambda x, y: x == y
+ elif op == '!=':
+ return lambda x, y: x != y
elif op == '<':
return lambda x, y: x < y
elif op == '<=':
|
mdowds/python-mock-firestore
|
d45d09d94c28ea0fa2a6840c48ba2f965be8d2b7
|
diff --git a/tests/test_collection_reference.py b/tests/test_collection_reference.py
index 59397be..ed34c8b 100644
--- a/tests/test_collection_reference.py
+++ b/tests/test_collection_reference.py
@@ -81,6 +81,16 @@ class TestCollectionReference(TestCase):
docs = list(fs.collection('foo').where('valid', '==', True).stream())
self.assertEqual({'valid': True}, docs[0].to_dict())
+ def test_collection_whereNotEquals(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'count': 1},
+ 'second': {'count': 5}
+ }}
+
+ docs = list(fs.collection('foo').where('count', '!=', 1).stream())
+ self.assertEqual({'count': 5}, docs[0].to_dict())
+
def test_collection_whereLessThan(self):
fs = MockFirestore()
fs._data = {'foo': {
diff --git a/tests/test_mock_client.py b/tests/test_mock_client.py
index 227c5c7..7f99414 100644
--- a/tests/test_mock_client.py
+++ b/tests/test_mock_client.py
@@ -16,4 +16,18 @@ class TestMockFirestore(TestCase):
expected_doc_snapshot = doc.get().to_dict()
self.assertEqual(returned_doc_snapshot, expected_doc_snapshot)
+ def test_client_collections(self):
+ fs = MockFirestore()
+ fs._data = {
+ 'foo': {
+ 'first': {'id': 1},
+ 'second': {'id': 2}
+ },
+ 'bar': {}
+ }
+ collections = fs.collections()
+ expected_collections = fs._data
+ self.assertEqual(len(collections), len(expected_collections))
+ for collection in collections:
+ self.assertTrue(collection._path[0] in expected_collections)
|
`!=` operator is not supported in queries
Firestore supports `!=` operators now: https://firebase.google.com/docs/firestore/query-data/queries#query_operators
I get the following error (which goes away if I use the `==` operator instead).
```
doc_snapshots = [doc_snapshot for doc_snapshot in doc_snapshots
> if compare(doc_snapshot._get_by_field_path(field), value)]
E TypeError: 'NoneType' object is not callable
```
I believe a simple condition just needs to be added here in `compare_func`: https://github.com/mdowds/python-mock-firestore/blob/master/mockfirestore/query.py#L122
|
0.0
|
d45d09d94c28ea0fa2a6840c48ba2f965be8d2b7
|
[
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereNotEquals",
"tests/test_mock_client.py::TestMockFirestore::test_client_collections"
] |
[
"tests/test_collection_reference.py::TestCollectionReference::test_collection_addDocument",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_at",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_at_doc_snapshot",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_at_order_by",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_before",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_before_doc_snapshot",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_end_before_order_by",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_collectionDoesNotExist",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_nestedCollection",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_nestedCollection_by_path",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_nestedCollection_by_path_collectionDoesNotExist",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_nestedCollection_collectionDoesNotExist",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_ordersByAscendingDocumentId_byDefault",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_returnsDocuments",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_limit",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_limitAndOrderBy",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_listDocuments",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_offset",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_orderBy",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_orderBy_descending",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_orderby_offset",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_parent",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_after",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_after_doc_snapshot",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_after_order_by",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_after_similar_objects",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_at",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_at_doc_snapshot",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_start_at_order_by",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_stream",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereArrayContains",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereArrayContainsAny",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereEquals",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereGreaterThan",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereGreaterThanOrEqual",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereIn",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereLessThan",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereLessThanOrEqual",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereMissingField",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereNestedField",
"tests/test_mock_client.py::TestMockFirestore::test_client_get_all"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-15 01:36:46+00:00
|
mit
| 3,849 |
|
mdowds__python-mock-firestore-6
|
diff --git a/mockfirestore/main.py b/mockfirestore/main.py
index b660af3..f028129 100644
--- a/mockfirestore/main.py
+++ b/mockfirestore/main.py
@@ -1,4 +1,6 @@
import operator
+import random
+import string
from collections import OrderedDict
from functools import reduce
from itertools import islice
@@ -28,11 +30,21 @@ class DocumentReference:
self._data = data
self._path = path
+ @property
+ def id(self):
+ return self._path[-1]
+
def get(self) -> DocumentSnapshot:
return DocumentSnapshot(get_by_path(self._data, self._path))
- def set(self, data: Document):
- set_by_path(self._data, self._path, data)
+ def delete(self):
+ delete_by_path(self._data, self._path)
+
+ def set(self, data: Dict, merge=False):
+ if merge:
+ self.update(data)
+ else:
+ set_by_path(self._data, self._path, data)
def update(self, data: Dict[str, Any]):
get_by_path(self._data, self._path).update(data)
@@ -86,8 +98,10 @@ class CollectionReference:
self._data = data
self._path = path
- def document(self, name: str) -> DocumentReference:
+ def document(self, name: Optional[str] = None) -> DocumentReference:
collection = get_by_path(self._data, self._path)
+ if name is None:
+ name = generate_random_string()
new_path = self._path + [name]
if name not in collection:
set_by_path(self._data, new_path, {})
@@ -109,6 +123,12 @@ class CollectionReference:
collection = get_by_path(self._data, self._path)
return Query(collection).limit(limit_amount)
+ def list_documents(self, page_size: Optional[int] = None) -> Sequence[DocumentReference]:
+ docs = []
+ for key in get_by_path(self._data, self._path):
+ docs.append(self.document(key))
+ return docs
+
class MockFirestore:
@@ -132,3 +152,12 @@ def get_by_path(data: Dict[str, T], path: Sequence[str]) -> T:
def set_by_path(data: Dict[str, T], path: Sequence[str], value: T):
"""Set a value in a nested object in root by item sequence."""
get_by_path(data, path[:-1])[path[-1]] = value
+
+
+def delete_by_path(data: Dict[str, T], path: Sequence[str]):
+ """Delete a value in a nested object in root by item sequence."""
+ del get_by_path(data, path[:-1])[path[-1]]
+
+
+def generate_random_string():
+ return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
|
mdowds/python-mock-firestore
|
590a1684c7c073879d74240685fe5a304afacfdd
|
diff --git a/tests/test_collection_reference.py b/tests/test_collection_reference.py
index 0d23227..7e14e1f 100644
--- a/tests/test_collection_reference.py
+++ b/tests/test_collection_reference.py
@@ -1,6 +1,6 @@
from unittest import TestCase
-from mockfirestore import MockFirestore
+from mockfirestore import MockFirestore, DocumentReference
class TestCollectionReference(TestCase):
@@ -146,3 +146,15 @@ class TestCollectionReference(TestCase):
docs = list(fs.collection('foo').order_by('order').limit(2).get())
self.assertEqual({'order': 1}, docs[0].to_dict())
self.assertEqual({'order': 2}, docs[1].to_dict())
+
+ def test_collection_listDocuments(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'order': 2},
+ 'second': {'order': 1},
+ 'third': {'order': 3}
+ }}
+ doc_refs = list(fs.collection('foo').list_documents())
+ self.assertEqual(3, len(doc_refs))
+ for doc_ref in doc_refs:
+ self.assertIsInstance(doc_ref, DocumentReference)
diff --git a/tests/test_document_reference.py b/tests/test_document_reference.py
index e1eecfc..3e5e7e5 100644
--- a/tests/test_document_reference.py
+++ b/tests/test_document_reference.py
@@ -12,6 +12,21 @@ class TestDocumentReference(TestCase):
doc = fs.collection('foo').document('first').get().to_dict()
self.assertEqual({'id': 1}, doc)
+ def test_document_get_documentIdEqualsKey(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1}
+ }}
+ doc_ref = fs.collection('foo').document('first')
+ self.assertEqual('first', doc_ref.id)
+
+ def test_document_get_newDocumentReturnsDefaultId(self):
+ fs = MockFirestore()
+ doc_ref = fs.collection('foo').document()
+ doc = doc_ref.get()
+ self.assertNotEqual(None, doc_ref.id)
+ self.assertFalse(doc.exists)
+
def test_document_get_documentDoesNotExist(self):
fs = MockFirestore()
fs._data = {'foo': {}}
@@ -60,6 +75,24 @@ class TestDocumentReference(TestCase):
doc = fs.collection('foo').document('bar').get().to_dict()
self.assertEqual(doc_content, doc)
+ def test_document_set_mergeNewValue(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1}
+ }}
+ fs.collection('foo').document('first').set({'updated': True}, merge=True)
+ doc = fs.collection('foo').document('first').get().to_dict()
+ self.assertEqual({'id': 1, 'updated': True}, doc)
+
+ def test_document_set_overwriteValue(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1}
+ }}
+ fs.collection('foo').document('first').set({'new_id': 1}, merge=False)
+ doc = fs.collection('foo').document('first').get().to_dict()
+ self.assertEqual({'new_id': 1}, doc)
+
def test_document_update_addNewValue(self):
fs = MockFirestore()
fs._data = {'foo': {
@@ -77,3 +110,12 @@ class TestDocumentReference(TestCase):
fs.collection('foo').document('first').update({'id': 2})
doc = fs.collection('foo').document('first').get().to_dict()
self.assertEqual({'id': 2}, doc)
+
+ def test_document_delete_documentDoesNotExistAfterDelete(self):
+ fs = MockFirestore()
+ fs._data = {'foo': {
+ 'first': {'id': 1}
+ }}
+ fs.collection('foo').document('first').delete()
+ doc = fs.collection('foo').document('first').get()
+ self.assertEqual(False, doc.exists)
|
Deleting documents
Hello again!
I also noticed that the delete() function for `DocumentReference` wasn't added.
I added the following to `main.py` as a quick fix but would love to hear your thoughts:
```
def delete(self):
get_by_path(self._data, self._path).clear()
```
I also tried:
```
def delete(self):
self.set({})
```
They both seemed to have similar behavior and passed my unit tests
|
0.0
|
590a1684c7c073879d74240685fe5a304afacfdd
|
[
"tests/test_collection_reference.py::TestCollectionReference::test_collection_listDocuments",
"tests/test_document_reference.py::TestDocumentReference::test_document_delete_documentDoesNotExistAfterDelete",
"tests/test_document_reference.py::TestDocumentReference::test_document_get_documentIdEqualsKey",
"tests/test_document_reference.py::TestDocumentReference::test_document_get_newDocumentReturnsDefaultId",
"tests/test_document_reference.py::TestDocumentReference::test_document_set_mergeNewValue",
"tests/test_document_reference.py::TestDocumentReference::test_document_set_overwriteValue"
] |
[
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_collectionDoesNotExist",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_nestedCollection",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_nestedCollection_collectionDoesNotExist",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_ordersByAscendingDocumentId_byDefault",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_get_returnsDocuments",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_limit",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_limitAndOrderBy",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_orderBy",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_orderBy_descending",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereEquals",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereGreaterThan",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereGreaterThanOrEqual",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereLessThan",
"tests/test_collection_reference.py::TestCollectionReference::test_collection_whereLessThanOrEqual",
"tests/test_document_reference.py::TestDocumentReference::test_document_get_documentDoesNotExist",
"tests/test_document_reference.py::TestDocumentReference::test_document_get_returnsDocument",
"tests/test_document_reference.py::TestDocumentReference::test_document_set_setsContentOfDocument",
"tests/test_document_reference.py::TestDocumentReference::test_document_update_addNewValue",
"tests/test_document_reference.py::TestDocumentReference::test_document_update_changeExistingValue",
"tests/test_document_reference.py::TestDocumentReference::test_get_nestedDocument",
"tests/test_document_reference.py::TestDocumentReference::test_get_nestedDocument_documentDoesNotExist"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-06-16 12:07:47+00:00
|
mit
| 3,850 |
|
medmunds__aws-cfn-ses-domain-11
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f6726bc..dda2185 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -21,6 +21,11 @@
* Support using the [`Region`](README.md#region) property to provision an Amazon SES
domain in a different region from where you're running your CloudFormation stack.
(Thanks to @gfodor.)
+
+* Fix incorrect handling of `EnableSend: false` and other potential problems with
+ Boolean properties, by working around CloudFormation's non-standard YAML parsing.
+ (Thanks to @aajtodd.)
+
### Features
diff --git a/aws_cfn_ses_domain/ses_domain_identity.py b/aws_cfn_ses_domain/ses_domain_identity.py
index d57d77f..623558e 100644
--- a/aws_cfn_ses_domain/ses_domain_identity.py
+++ b/aws_cfn_ses_domain/ses_domain_identity.py
@@ -8,8 +8,7 @@ import boto3
from botocore.exceptions import BotoCoreError, ClientError
from .cfnresponse import FAILED, SUCCESS, send
-from .utils import format_arn
-
+from .utils import format_arn, to_bool
logger = logging.getLogger()
logger.setLevel(os.getenv("LOG_LEVEL", "WARNING"))
@@ -24,6 +23,7 @@ DEFAULT_PROPERTIES = {
"TTL": "1800",
"Region": os.getenv("AWS_REGION"), # where the stack (lambda fn) is running
}
+BOOLEAN_PROPERTIES = ("EnableSend", "EnableReceive")
def handle_domain_identity_request(event, context):
@@ -52,6 +52,17 @@ def handle_domain_identity_request(event, context):
resource_type="identity", resource_name=domain,
defaults_from=event["StackId"]) # current stack's ARN has account and partition
+ for prop in BOOLEAN_PROPERTIES:
+ # CloudFormation may convert YAML/JSON bools to strings, so reverse that
+ # https://github.com/medmunds/aws-cfn-ses-domain/issues/10
+ try:
+ properties[prop] = to_bool(properties[prop])
+ except ValueError:
+ return send(event, context, FAILED,
+ reason=f"The '{prop}' property must be 'true' or 'false',"
+ f" not '{properties[prop]}'.",
+ physical_resource_id=domain_arn)
+
if event["RequestType"] == "Delete" and event["PhysicalResourceId"] == domain:
# v0.3 backwards compatibility:
# Earlier versions used just the domain as the PhysicalResourceId.
diff --git a/aws_cfn_ses_domain/utils.py b/aws_cfn_ses_domain/utils.py
index a4787eb..a4937ee 100644
--- a/aws_cfn_ses_domain/utils.py
+++ b/aws_cfn_ses_domain/utils.py
@@ -25,3 +25,34 @@ def format_arn(partition=None, service=None, region=None, account=None,
resource = resource if resource is not None else _resource
return f"arn:{partition}:{service}:{region}:{account}:{resource}"
+
+
+def to_bool(val):
+ """Convert val to True or False.
+
+ Converts 'true' (case-insensitive) and 1, '1', or True to True.
+ Converts 'false', 'null' or 'none' (case-insensitive), the empty string '',
+ and 0, '0', or False to False.
+ Raises a ValueError for any other input.
+
+ >>> to_bool('true')
+ True
+ >>> to_bool('False')
+ False
+ >>> to_bool(0)
+ False
+ >>> to_bool('0')
+ False
+ >>> to_bool(None)
+ False
+ >>> to_bool('yes')
+ ValueError("Invalid boolean value 'yes'")
+ """
+ # (Loosely adapted from distutils.util.strtobool)
+ strval = str(val).lower()
+ if strval in ('true', '1'):
+ return True
+ elif strval in ('false', '0', 'null', 'none', ''):
+ return False
+ else:
+ raise ValueError(f"Invalid boolean value {val!r}")
|
medmunds/aws-cfn-ses-domain
|
5ec77ea37475c5e5790aafd8060317e3419f9cad
|
diff --git a/tests/base.py b/tests/base.py
index 98cdc61..ad2c04f 100644
--- a/tests/base.py
+++ b/tests/base.py
@@ -1,4 +1,4 @@
-from unittest.case import TestCase
+from unittest import TestCase
from unittest.mock import patch, ANY as MOCK_ANY
import boto3
diff --git a/tests/test_ses_domain_identity.py b/tests/test_ses_domain_identity.py
index 9664a39..7396429 100644
--- a/tests/test_ses_domain_identity.py
+++ b/tests/test_ses_domain_identity.py
@@ -98,8 +98,8 @@ class TestDomainIdentityHandler(HandlerTestCase):
"RequestType": "Create",
"ResourceProperties": {
"Domain": "example.com.",
- "EnableSend": True,
- "EnableReceive": True,
+ "EnableSend": "true",
+ "EnableReceive": "true",
"MailFromSubdomain": "bounce",
"CustomDMARC": '"v=DMARC1; p=quarantine; rua=mailto:[email protected];"',
"TTL": "300",
@@ -166,8 +166,8 @@ class TestDomainIdentityHandler(HandlerTestCase):
"RequestType": "Update",
"ResourceProperties": {
"Domain": "example.com.",
- "EnableSend": False,
- "EnableReceive": True,
+ "EnableSend": "false",
+ "EnableReceive": "true",
"CustomDMARC": None,
},
"StackId": self.mock_stack_id}
@@ -208,8 +208,8 @@ class TestDomainIdentityHandler(HandlerTestCase):
"PhysicalResourceId": "arn:aws:ses:mock-region:111111111111:identity/example.com",
"ResourceProperties": {
"Domain": "example.com.",
- "EnableSend": True,
- "EnableReceive": True,
+ "EnableSend": "true",
+ "EnableReceive": "true",
},
"StackId": self.mock_stack_id}
self.ses_stubber.add_response(
@@ -244,8 +244,8 @@ class TestDomainIdentityHandler(HandlerTestCase):
"PhysicalResourceId": "example.com", # old format: just the domain
"ResourceProperties": {
"Domain": "example.com.",
- "EnableSend": True,
- "EnableReceive": True,
+ "EnableSend": "true",
+ "EnableReceive": "true",
},
"StackId": self.mock_stack_id}
# self.ses_stubber.nothing: *no* SES ops should occur
@@ -280,3 +280,17 @@ class TestDomainIdentityHandler(HandlerTestCase):
'ERROR:root:Error updating SES: An error occurred (InvalidParameterValue) when'
' calling the VerifyDomainIdentity operation: Invalid domain name bad domain name.',
cm.output[0])
+
+ def test_invalid_boolean_property(self):
+ event = {
+ "RequestType": "Create",
+ "ResourceProperties": {
+ "Domain": "example.com",
+ "EnableSend": "yes",
+ },
+ "StackId": self.mock_stack_id}
+ handle_domain_identity_request(event, self.mock_context)
+ self.assertSentResponse(
+ event, status="FAILED",
+ reason="The 'EnableSend' property must be 'true' or 'false', not 'yes'.",
+ physical_resource_id=MOCK_ANY)
diff --git a/tests/test_utils.py b/tests/test_utils.py
new file mode 100644
index 0000000..2b49ed2
--- /dev/null
+++ b/tests/test_utils.py
@@ -0,0 +1,41 @@
+from unittest import TestCase
+
+from aws_cfn_ses_domain.utils import to_bool
+
+
+class TestToBool(TestCase):
+ TRUE_VALUES = (
+ 'true', 'True', 'TRUE', 'tRuE',
+ 1, '1',
+ True,
+ )
+
+ FALSE_VALUES = (
+ 'false', 'False', 'FALSE', 'fAlSe',
+ 0, '0',
+ None, 'None',
+ 'null', # JSON's None as a string
+ '', # empty string
+ False,
+ )
+
+ INVALID_VALUES = (
+ 'yes', 'no', 't', 'f', ' ',
+ 100, -1, 0.5,
+ )
+
+ def test_true(self):
+ for value in self.TRUE_VALUES:
+ with self.subTest(value=value):
+ self.assertIs(to_bool(value), True)
+
+ def test_false(self):
+ for value in self.FALSE_VALUES:
+ with self.subTest(value=value):
+ self.assertIs(to_bool(value), False)
+
+ def test_invalid(self):
+ for value in self.INVALID_VALUES:
+ with self.subTest(value=value):
+ with self.assertRaises(ValueError):
+ to_bool(value)
|
Boolean Options are not handled correctly
## Summary
The lambda function does not handle `EnableSend` or `EnableReceive` properly.
The `true`/`false` values are converted to textual `"true"`/`"false"` not actual Python boolean values.
You can see this in the log output of the lambda function:
```
Expanded properties to {'Domain': 'example.com, 'EnableSend': 'false', 'EnableReceive': 'true', 'MailFromSubdomain': 'mail', 'CustomDMARC': '', 'TTL': '1800', 'Region': 'us-east-1'}
```
Note I removed the service token and obviously changed the domain name in the output.
## Steps to reproduce
1. Create `test.cf.yaml`:
```yaml
SESDomain:
Type: Custom::SES_Domain
Properties:
ServiceToken: !GetAtt CfnSESDomain.Outputs.Arn
Domain: "example.com"
EnableSend: false
EnableReceive: true
MailFromSubdomain: "mail"
TTL: "1800"
CustomDMARC: ''
Region: !Ref "AWS::Region"
```
2. Run `aws cloudformation deploy --capabilities CAPABILITY_IAM --template-file test.cf.yaml ...`
3. Check stack status in CloudFormation dashboard
### Results
SES outbound sending was enabled.
### Expected results
SES outbound sending should not be enabled.
## System
* aws-cfn-ses-domain version: 0.2
* aws cli version: aws-cli/1.16.159 Python/3.7.3 Darwin/17.7.0 botocore/1.12.155
* OS: Mac
## Additional Info
The unit tests seem to cover this case but they use Python boolean values in the parameters which is not how the parameters are actually passed to the function at runtime.
Also thanks, nice library overall.
|
0.0
|
5ec77ea37475c5e5790aafd8060317e3419f9cad
|
[
"tests/test_ses_domain_identity.py::TestDomainIdentityHandler::test_boto_error",
"tests/test_ses_domain_identity.py::TestDomainIdentityHandler::test_create_all_options",
"tests/test_ses_domain_identity.py::TestDomainIdentityHandler::test_create_default",
"tests/test_ses_domain_identity.py::TestDomainIdentityHandler::test_delete",
"tests/test_ses_domain_identity.py::TestDomainIdentityHandler::test_domain_required",
"tests/test_ses_domain_identity.py::TestDomainIdentityHandler::test_invalid_boolean_property",
"tests/test_ses_domain_identity.py::TestDomainIdentityHandler::test_non_empty_domain_required",
"tests/test_ses_domain_identity.py::TestDomainIdentityHandler::test_update_receive_only",
"tests/test_ses_domain_identity.py::TestDomainIdentityHandler::test_v0_3_physical_id_change",
"tests/test_utils.py::TestToBool::test_false",
"tests/test_utils.py::TestToBool::test_invalid",
"tests/test_utils.py::TestToBool::test_true"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-01-07 21:39:32+00:00
|
apache-2.0
| 3,851 |
|
meejah__txtorcon-390
|
diff --git a/docs/releases.rst b/docs/releases.rst
index ea32618..517a41c 100644
--- a/docs/releases.rst
+++ b/docs/releases.rst
@@ -18,6 +18,7 @@ See also :ref:`api_stability`.
`git main <https://github.com/meejah/txtorcon>`_ *will likely become v23.6.0*
* Fix test-failures on Python 3.12
+ * Particular GETINFO hanging (`#389 <https://github.com/meejah/txtorcon/issues/389>`_)
v23.5.0
diff --git a/txtorcon/torcontrolprotocol.py b/txtorcon/torcontrolprotocol.py
index 887bd0a..e882160 100644
--- a/txtorcon/torcontrolprotocol.py
+++ b/txtorcon/torcontrolprotocol.py
@@ -232,6 +232,12 @@ class TorControlProtocol(LineOnlyReceiver):
:class:`txtorcon.TorState`, which is also the place to go if you
wish to add your own stream or circuit listeners.
"""
+ # override Twisted's LineOnlyReceiver maximum line-length. At
+ # least "GETINFO md/id/X" for some Xse exceeds 16384 (2**14, the
+ # default) and thus causes the control connection to
+ # fail. control.c defines MAX_COMMAND_LINE_LENGTH as 1024*1024 so
+ # we use that
+ MAX_LENGTH = 2 ** 20
def __init__(self, password_function=None):
"""
@@ -274,11 +280,6 @@ class TorControlProtocol(LineOnlyReceiver):
:func:`when_disconnected` instead)
"""
- self._when_disconnected = SingleObserver()
- """
- Internal use. A :class:`SingleObserver` for when_disconnected()
- """
-
self._when_disconnected = SingleObserver()
"""
Private. See :func:`.when_disconnected`
@@ -356,7 +357,7 @@ class TorControlProtocol(LineOnlyReceiver):
self.stop_debug()
def start_debug(self):
- self.debuglog = open('txtorcon-debug.log', 'w')
+ self.debuglog = open('txtorcon-debug.log', 'wb')
def stop_debug(self):
def noop(*args, **kw):
@@ -692,10 +693,14 @@ class TorControlProtocol(LineOnlyReceiver):
def connectionLost(self, reason):
"Protocol API"
txtorlog.msg('connection terminated: ' + str(reason))
- if reason.check(ConnectionDone):
- self._when_disconnected.fire(self)
- else:
- self._when_disconnected.fire(reason)
+ self._when_disconnected.fire(
+ Failure(
+ TorDisconnectError(
+ text="Tor connection terminated",
+ error=reason,
+ )
+ )
+ )
# ...and this is why we don't do on_disconnect = Deferred() :(
# and instead should have had on_disconnect() method that
@@ -712,8 +717,10 @@ class TorControlProtocol(LineOnlyReceiver):
else:
self.on_disconnect.errback(reason)
self.on_disconnect = None
- self._when_disconnected.fire(self)
+
outstanding = [self.command] + self.commands if self.command else self.commands
+ self.command = None
+ self.defer = None
for d, cmd, cmd_arg in outstanding:
if not d.called:
d.errback(
@@ -754,6 +761,10 @@ class TorControlProtocol(LineOnlyReceiver):
if len(self.commands):
self.command = self.commands.pop(0)
(d, cmd, cmd_arg) = self.command
+
+ if self._when_disconnected.already_fired(d):
+ return
+
self.defer = d
self.debuglog.write(cmd + b'\n')
diff --git a/txtorcon/util.py b/txtorcon/util.py
index 4b772e3..406a0f5 100644
--- a/txtorcon/util.py
+++ b/txtorcon/util.py
@@ -473,6 +473,19 @@ class SingleObserver(object):
self._observers = []
self._fired = self._NotFired
+ def has_fired(self):
+ return self._fired is not self._NotFired
+
+ def already_fired(self, d):
+ """
+ If we have already fired, callback `d` with our result.
+ :returns bool: True if we already fired, False otherwise
+ """
+ if self.has_fired():
+ d.callback(self._fired)
+ return True
+ return False
+
def when_fired(self):
d = defer.Deferred()
if self._fired is not self._NotFired:
|
meejah/txtorcon
|
c0c98ff4bb888b9e1e2b5b53e6a0ce5a8be3ba69
|
diff --git a/test/test_torcontrolprotocol.py b/test/test_torcontrolprotocol.py
index 23ddeec..e15bdf0 100644
--- a/test/test_torcontrolprotocol.py
+++ b/test/test_torcontrolprotocol.py
@@ -226,7 +226,7 @@ class DisconnectionTests(unittest.TestCase):
it_was_called.yes = False
d = self.protocol.when_disconnected()
- d.addCallback(it_was_called)
+ d.addBoth(it_was_called)
f = failure.Failure(error.ConnectionDone("It's all over"))
self.protocol.connectionLost(f)
self.assertTrue(it_was_called.yes)
@@ -284,6 +284,31 @@ class DisconnectionTests(unittest.TestCase):
self.protocol.connectionLost(f)
self.assertEqual(it_was_called.count, 2)
+ def test_disconnect_subsequent_commands(self):
+ """
+ commands issued after disconnect should errback
+ """
+
+ def it_was_called(f):
+ str(f)
+ it_was_called.count += 1
+ return None
+ it_was_called.count = 0
+
+ # one outstanding command
+ d0 = self.protocol.queue_command("some command0")
+ d0.addErrback(it_was_called)
+ self.protocol.on_disconnect.addErrback(lambda _: None)
+
+ f = failure.Failure(RuntimeError("The thing didn't do the stuff."))
+ self.protocol.connectionLost(f)
+
+ # one command issued _after_ we've disconnected
+ d1 = self.protocol.queue_command("some command1")
+ d1.addErrback(it_was_called)
+
+ self.assertEqual(it_was_called.count, 2)
+
class ProtocolTests(unittest.TestCase):
|
When tor process exits unexpectedly tor.protocol.get_info hangs
Here is a minimal reproduction for this issue:
```python
from twisted.internet.task import react
from twisted.internet.defer import ensureDeferred
import txtorcon
EXIT_RELAY_FP = [
'130CFCF38BA3327E3001A1DB2A4B5ACBDAE248D9', # this triggers the getinfo fail
'127F6358F68FFB7E437DBA51D6D4DAC47B9F78A7',
]
async def main(reactor):
try:
tor = await txtorcon.launch(
reactor,
kill_on_stderr=False,
progress_updates=lambda x, y, z: print(f"{x}%: {y} - {z}"),
)
except Exception as exc:
print(f"FAILED to start tor {exc}")
return
state = await tor.create_state()
for exit_fp in EXIT_RELAY_FP:
print(f"doing {exit_fp}")
try:
print("calling GETINFO")
info = await tor.protocol.get_info("md/id/" + exit_fp)
print(f"got {info}")
except Exception as exc:
print(f"FAILED to get info for {exit_fp} {exc}")
@react
def _main(reactor):
return ensureDeferred(main(reactor))
```
You can see that the last log lines are:
```
doing 130CFCF38BA3327E3001A1DB2A4B5ACBDAE248D9
calling GETINFO
FAILED to get info for 130CFCF38BA3327E3001A1DB2A4B5ACBDAE248D9 Tor unexpectedly disconnected while running: GETINFO md/id/130CFCF38BA3327E3001A1DB2A4B5ACBDAE248D9
doing 127F6358F68FFB7E437DBA51D6D4DAC47B9F78A7
calling GETINFO
Unhandled Error
Traceback (most recent call last):
Failure: builtins.RuntimeError: Tor exited with error-code 0
```
Where we are waiting on the `tor.protocol.get_info("md/id/" + exit_fp)` call.
I would expect to either get an errback, because the process is dead or have some other way to tell that I should not be calling it.
It would also be nice to be able to somehow know that tor has exited this way, as it's currently not possible to listen for the "tor exited" event.
This probably is also a tor bug, as it should not happen that the tor process exits when issuing this specific get_info command.
|
0.0
|
c0c98ff4bb888b9e1e2b5b53e6a0ce5a8be3ba69
|
[
"test/test_torcontrolprotocol.py::DisconnectionTests::test_disconnect_subsequent_commands"
] |
[
"test/test_torcontrolprotocol.py::InterfaceTests::test_implements",
"test/test_torcontrolprotocol.py::InterfaceTests::test_object_implements",
"test/test_torcontrolprotocol.py::LogicTests::test_set_conf_wrong_args",
"test/test_torcontrolprotocol.py::FactoryTests::test_create",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_cookie",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_no_password",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_null",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_password",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_password_deferred",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_password_deferred_but_no_password",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_password_not_bytes",
"test/test_torcontrolprotocol.py::DisconnectionTests::test_disconnect_callback",
"test/test_torcontrolprotocol.py::DisconnectionTests::test_disconnect_errback",
"test/test_torcontrolprotocol.py::DisconnectionTests::test_disconnect_outstanding_commands",
"test/test_torcontrolprotocol.py::DisconnectionTests::test_when_disconnect",
"test/test_torcontrolprotocol.py::DisconnectionTests::test_when_disconnect_error",
"test/test_torcontrolprotocol.py::ProtocolTests::test_650_after_authenticate",
"test/test_torcontrolprotocol.py::ProtocolTests::test_addevent",
"test/test_torcontrolprotocol.py::ProtocolTests::test_async",
"test/test_torcontrolprotocol.py::ProtocolTests::test_async_multiline",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_cookie_without_reading",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_dont_send_cookiefile",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_fail",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_no_auth_line",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_not_enough_cookie_data",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_not_enough_safecookie_data",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_password_when_cookie_unavailable",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_password_when_safecookie_unavailable",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_safecookie",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_safecookie_wrong_hash",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_unexisting_cookie_file",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_unexisting_safecookie_file",
"test/test_torcontrolprotocol.py::ProtocolTests::test_bootstrap_callback",
"test/test_torcontrolprotocol.py::ProtocolTests::test_bootstrap_tor_does_not_support_signal_names",
"test/test_torcontrolprotocol.py::ProtocolTests::test_continuation_line",
"test/test_torcontrolprotocol.py::ProtocolTests::test_debug",
"test/test_torcontrolprotocol.py::ProtocolTests::test_dot",
"test/test_torcontrolprotocol.py::ProtocolTests::test_eventlistener",
"test/test_torcontrolprotocol.py::ProtocolTests::test_eventlistener_error",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getconf",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getconf_raw",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getconf_single",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo_for_descriptor",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo_incremental",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo_incremental_continuation",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo_multiline",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo_one_line",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo_single",
"test/test_torcontrolprotocol.py::ProtocolTests::test_minus_line_no_command",
"test/test_torcontrolprotocol.py::ProtocolTests::test_multiline_plus",
"test/test_torcontrolprotocol.py::ProtocolTests::test_multiline_plus_embedded_equals",
"test/test_torcontrolprotocol.py::ProtocolTests::test_newdesc",
"test/test_torcontrolprotocol.py::ProtocolTests::test_notify_after_getinfo",
"test/test_torcontrolprotocol.py::ProtocolTests::test_notify_error",
"test/test_torcontrolprotocol.py::ProtocolTests::test_plus_line_no_command",
"test/test_torcontrolprotocol.py::ProtocolTests::test_quit",
"test/test_torcontrolprotocol.py::ProtocolTests::test_remove_eventlistener",
"test/test_torcontrolprotocol.py::ProtocolTests::test_remove_eventlistener_multiple",
"test/test_torcontrolprotocol.py::ProtocolTests::test_response_with_no_request",
"test/test_torcontrolprotocol.py::ProtocolTests::test_setconf",
"test/test_torcontrolprotocol.py::ProtocolTests::test_setconf_multi",
"test/test_torcontrolprotocol.py::ProtocolTests::test_setconf_with_space",
"test/test_torcontrolprotocol.py::ProtocolTests::test_signal",
"test/test_torcontrolprotocol.py::ProtocolTests::test_signal_error",
"test/test_torcontrolprotocol.py::ProtocolTests::test_statemachine_broadcast_no_code",
"test/test_torcontrolprotocol.py::ProtocolTests::test_statemachine_broadcast_unknown_code",
"test/test_torcontrolprotocol.py::ProtocolTests::test_statemachine_continuation",
"test/test_torcontrolprotocol.py::ProtocolTests::test_statemachine_is_finish",
"test/test_torcontrolprotocol.py::ProtocolTests::test_statemachine_multiline",
"test/test_torcontrolprotocol.py::ProtocolTests::test_statemachine_singleline",
"test/test_torcontrolprotocol.py::ProtocolTests::test_twocommands",
"test/test_torcontrolprotocol.py::ParseTests::test_circuit_status",
"test/test_torcontrolprotocol.py::ParseTests::test_default_keywords",
"test/test_torcontrolprotocol.py::ParseTests::test_keywords",
"test/test_torcontrolprotocol.py::ParseTests::test_keywords_mutli_equals",
"test/test_torcontrolprotocol.py::ParseTests::test_multientry_keywords_2",
"test/test_torcontrolprotocol.py::ParseTests::test_multientry_keywords_3",
"test/test_torcontrolprotocol.py::ParseTests::test_multientry_keywords_4",
"test/test_torcontrolprotocol.py::ParseTests::test_multiline_keywords",
"test/test_torcontrolprotocol.py::ParseTests::test_multiline_keywords_with_spaces",
"test/test_torcontrolprotocol.py::ParseTests::test_network_status",
"test/test_torcontrolprotocol.py::ParseTests::test_unquoted_keywords",
"test/test_torcontrolprotocol.py::ParseTests::test_unquoted_keywords_empty",
"test/test_torcontrolprotocol.py::ParseTests::test_unquoted_keywords_singlequote"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-29 22:24:50+00:00
|
mit
| 3,852 |
|
meerk40t__svgelements-158
|
diff --git a/setup.cfg b/setup.cfg
index 060baf4..ad41d40 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = svgelements
-version = 1.6.5
+version = 1.6.6
description = Svg Elements Parsing
long_description_content_type=text/markdown
long_description = file: README.md
diff --git a/svgelements/svgelements.py b/svgelements/svgelements.py
index 433aedb..8c83c91 100644
--- a/svgelements/svgelements.py
+++ b/svgelements/svgelements.py
@@ -43,7 +43,7 @@ Though not required the Image class acquires new functionality if provided with
and the Arc can do exact arc calculations if scipy is installed.
"""
-SVGELEMENTS_VERSION = "1.6.5"
+SVGELEMENTS_VERSION = "1.6.6"
MIN_DEPTH = 5
ERROR = 1e-12
@@ -7834,6 +7834,7 @@ class Text(SVGElement, GraphicObject, Transformable):
def property_by_values(self, values):
Transformable.property_by_values(self, values)
GraphicObject.property_by_values(self, values)
+ SVGElement.property_by_values(self, values)
self.anchor = values.get(SVG_ATTR_TEXT_ANCHOR, self.anchor)
self.font_face = values.get("font_face")
self.font_face = values.get(SVG_ATTR_FONT_FACE, self.font_face)
@@ -8080,6 +8081,7 @@ class Image(SVGElement, GraphicObject, Transformable):
def render(self, **kwargs):
GraphicObject.render(self, **kwargs)
Transformable.render(self, **kwargs)
+ SVGElements.render(self, **kwargs)
width = kwargs.get("width", kwargs.get("relative_length"))
height = kwargs.get("height", kwargs.get("relative_length"))
try:
|
meerk40t/svgelements
|
57b8bda811b0b4112d1d47305f53ce43e82016e8
|
diff --git a/test/test_text.py b/test/test_text.py
new file mode 100644
index 0000000..860a081
--- /dev/null
+++ b/test/test_text.py
@@ -0,0 +1,26 @@
+import io
+import unittest
+
+from svgelements import *
+
+
+class TestElementText(unittest.TestCase):
+
+ def test_issue_157(self):
+ q = io.StringIO(u'''<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg>
+ <g id="layer1">
+ <text
+ style="font-size:18px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
+ id="textobject"><tspan
+ id="tspanobject"
+ x="0"
+ y="0">Test</tspan></text>
+ </g>
+</svg>
+ ''')
+ m = SVG.parse(q)
+ q = list(m.elements())
+ self.assertIsNotNone(q[1].id) # Group
+ self.assertIsNotNone(q[2].id) # Text
+ self.assertIsNotNone(q[3].id) # TSpan
|
Parsing Inkscape text elements results in missing element IDs
When trying to access text elements created in Inkscape, the parser does not get the element ids correctly. This might be due to ` id="text3785"><tspan` and a simple regular expression could solve the issue.
```py
import svgelements
# svgFileName = "path/to/svg"
layout = svgelements.SVG.parse(
source = svgFileName,
reify = True,
ppi = svgelements.svgelements.DEFAULT_PPI,
width = 1,
height = 1,
color = "black",
transform = None,
context = None
)
[element.id for element in layout.elements()]
# ['svg3040', 'namedview3042', 'layer1', None, None, None, None, None, None]
```
```xml
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="210mm"
height="297mm"
viewBox="0 0 210 297"
version="1.1"
id="svg3040"
inkscape:version="1.1.1 (3bf5ae0d25, 2021-09-20)"
sodipodi:docname="test12.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview3042"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:document-units="mm"
showgrid="false"
inkscape:zoom="0.72337262"
inkscape:cx="396.75265"
inkscape:cy="561.95105"
inkscape:window-width="1920"
inkscape:window-height="1001"
inkscape:window-x="-9"
inkscape:window-y="-9"
inkscape:window-maximized="1"
inkscape:current-layer="layer1" />
<defs
id="defs3037" />
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<text
xml:space="preserve"
style="font-size:18px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="30.724138"
y="44.988914"
id="text3785"><tspan
sodipodi:role="line"
id="tspan3783"
style="stroke-width:0.264583"
x="30.724138"
y="44.988914">Test</tspan></text>
<text
xml:space="preserve"
style="font-size:18px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="67.300491"
y="110.46059"
id="text4119"><tspan
sodipodi:role="line"
id="tspan4117"
style="stroke-width:0.264583"
x="67.300491"
y="110.46059">TestTest</tspan></text>
<text
xml:space="preserve"
style="font-size:18px;line-height:1.25;font-family:sans-serif;stroke-width:0.264583"
x="67.300491"
y="172.6404"
id="text4519"><tspan
sodipodi:role="line"
id="tspan4517"
style="stroke-width:0.264583"
x="67.300491"
y="172.6404">123</tspan></text>
</g>
</svg>
```
|
0.0
|
57b8bda811b0b4112d1d47305f53ce43e82016e8
|
[
"test/test_text.py::TestElementText::test_issue_157"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-01-05 03:41:40+00:00
|
mit
| 3,853 |
|
meerk40t__svgelements-183
|
diff --git a/setup.cfg b/setup.cfg
index bf387e8..7b05ac3 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = svgelements
-version = 1.6.15
+version = 1.6.16
description = Svg Elements Parsing
long_description_content_type=text/markdown
long_description = file: README.md
diff --git a/svgelements/svgelements.py b/svgelements/svgelements.py
index c919e9a..feb2d26 100644
--- a/svgelements/svgelements.py
+++ b/svgelements/svgelements.py
@@ -43,7 +43,7 @@ Though not required the Image class acquires new functionality if provided with
and the Arc can do exact arc calculations if scipy is installed.
"""
-SVGELEMENTS_VERSION = "1.6.15"
+SVGELEMENTS_VERSION = "1.6.16"
MIN_DEPTH = 5
ERROR = 1e-12
@@ -220,6 +220,7 @@ REGEX_COLOR_HSL = re.compile(
% (PATTERN_FLOAT, PATTERN_FLOAT, PATTERN_FLOAT, PATTERN_FLOAT)
)
REGEX_LENGTH = re.compile(r"(%s)([A-Za-z%%]*)" % PATTERN_FLOAT)
+REGEX_CSS_COMMENT = re.compile(r"\/\*[\s\S]*?\*\/|\/\/.*$", re.MULTILINE)
REGEX_CSS_STYLE = re.compile(r"([^{]+)\s*\{\s*([^}]+)\s*\}")
REGEX_CSS_FONT = re.compile(
r"(?:(normal|italic|oblique)\s|(normal|small-caps)\s|(normal|bold|bolder|lighter|\d{3})\s|(normal|ultra-condensed|extra-condensed|condensed|semi-condensed|semi-expanded|expanded|extra-expanded|ultra-expanded)\s)*\s*(xx-small|x-small|small|medium|large|x-large|xx-large|larger|smaller|\d+(?:em|pt|pc|px|%))(?:/(xx-small|x-small|small|medium|large|x-large|xx-large|larger|smaller|\d+(?:em|pt|pc|px|%)))?\s*(.*),?\s+(serif|sans-serif|cursive|fantasy|monospace);?"
@@ -4771,44 +4772,52 @@ class Arc(Curve):
if control is not None:
delta_a = control - self.start
delta_b = self.end - control
- if abs(delta_a.x) > 1e-12:
- slope_a = delta_a.y / delta_a.x
- else:
- slope_a = float("inf")
- if abs(delta_b.x) > 1e-12:
- slope_b = delta_b.y / delta_b.x
- else:
- slope_b = float("inf")
ab_mid = Point.towards(self.start, control, 0.5)
bc_mid = Point.towards(control, self.end, 0.5)
- if abs(delta_a.y) < 1e-12: # slope_a == 0
+ if self.start == self.end:
cx = ab_mid.x
- if abs(delta_b.x) < 1e-12: # slope_b == inf
- cy = bc_mid.y
+ cy = ab_mid.y
+ self.sweep = tau
+ else:
+ if abs(delta_a.x) > 1e-12:
+ slope_a = delta_a.y / delta_a.x
+ else:
+ slope_a = float("inf")
+ if abs(delta_b.x) > 1e-12:
+ slope_b = delta_b.y / delta_b.x
else:
- cy = bc_mid.y + (bc_mid.x - cx) / slope_b
- elif abs(delta_b.y) < 1e-12: # slope_b == 0
- cx = bc_mid.x
- if abs(delta_a.y) < 1e-12: # slope_a == inf
+ slope_b = float("inf")
+ if abs(delta_a.y) < 1e-12: # slope_a == 0
+ cx = ab_mid.x
+ if abs(delta_b.x) < 1e-12: # slope_b == inf
+ cy = bc_mid.y
+ else:
+ if abs(slope_b) > 1e-12:
+ cy = bc_mid.y + (bc_mid.x - cx) / slope_b
+ else:
+ cy = float("inf")
+ elif abs(delta_b.y) < 1e-12: # slope_b == 0
+ cx = bc_mid.x
+ if abs(delta_a.y) < 1e-12: # slope_a == inf
+ cy = ab_mid.y
+ else:
+ cy = ab_mid.y + (ab_mid.x - cx) / slope_a
+ elif abs(delta_a.x) < 1e-12: # slope_a == inf
+ cy = ab_mid.y
+ cx = slope_b * (bc_mid.y - cy) + bc_mid.x
+ elif abs(delta_b.x) < 1e-12: # slope_b == inf
+ cy = bc_mid.y
+ cx = slope_a * (ab_mid.y - cy) + ab_mid.x
+ elif abs(slope_a - slope_b) < 1e-12:
+ cx = ab_mid.x
cy = ab_mid.y
else:
- cy = ab_mid.y + (ab_mid.x - cx) / slope_a
- elif abs(delta_a.x) < 1e-12: # slope_a == inf
- cy = ab_mid.y
- cx = slope_b * (bc_mid.y - cy) + bc_mid.x
- elif abs(delta_b.x) < 1e-12: # slope_b == inf
- cy = bc_mid.y
- cx = slope_a * (ab_mid.y - cy) + ab_mid.x
- elif abs(slope_a - slope_b) < 1e-12:
- cx = ab_mid.x
- cy = ab_mid.y
- else:
- cx = (
- slope_a * slope_b * (ab_mid.y - bc_mid.y)
- - slope_a * bc_mid.x
- + slope_b * ab_mid.x
- ) / (slope_b - slope_a)
- cy = ab_mid.y - (cx - ab_mid.x) / slope_a
+ cx = (
+ slope_a * slope_b * (ab_mid.y - bc_mid.y)
+ - slope_a * bc_mid.x
+ + slope_b * ab_mid.x
+ ) / (slope_b - slope_a)
+ cy = ab_mid.y - (cx - ab_mid.x) / slope_a
self.center = Point(cx, cy)
cw = bool(Point.orientation(self.start, control, self.end) == 2)
elif "r" in kwargs:
@@ -8834,7 +8843,9 @@ class SVG(Group):
s = Title(values, title=elem.text)
context.append(s)
elif SVG_TAG_STYLE == tag:
- assignments = list(re.findall(REGEX_CSS_STYLE, elem.text))
+ textstyle = elem.text
+ textstyle = re.sub(REGEX_CSS_COMMENT, '', textstyle)
+ assignments = list(re.findall(REGEX_CSS_STYLE, textstyle.strip()))
for key, value in assignments:
key = key.strip()
value = value.strip()
|
meerk40t/svgelements
|
8faa1e1ac6c3d18f6756c9706106f32b3e7f71b0
|
diff --git a/test/test_css.py b/test/test_css.py
index b16939f..86442bc 100644
--- a/test/test_css.py
+++ b/test/test_css.py
@@ -36,3 +36,38 @@ class TestSVGCSS(unittest.TestCase):
self.assertEqual(circ2.fill, "none")
self.assertEqual(circ2.stroke, "red")
+
+ def test_issue_178(self):
+ """Testing Issue 178 css comment parsing
+ """
+
+ q = io.StringIO(u'''<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 80 80">
+ <defs>
+ <style>
+ //this is a comment.
+ .cls-1,.cls-2{fill:none;stroke-miterlimit:10;}.cls-1{stroke:blue;}.cls-2{stroke:red;}
+ //.cls-2{stroke:pink;}
+ /* Testing this should be functional */
+ </style>
+ </defs>
+ <g id="Layer_2" data-name="Layer 2">
+ <g id="Layer_1-2" data-name="Layer 1">
+ <polygon points="56.59 67.4 39.86 57.28 23.01 67.22 26.34 45.99 12.83 30.88 31.62 27.88 40.12 8.6 48.41 27.97 67.17 31.17 53.5 46.14 56.59 67.4"/>
+ <circle class="cls-1" cx="40" cy="40" r="35"/>
+ <circle class="cls-2" cx="40" cy="40" r="39.5"/>
+ </g>
+ </g>
+ </svg>''')
+ m = SVG.parse(q)
+ poly = m[0][0][0]
+ circ1 = m[0][0][1]
+ circ2 = m[0][0][2]
+
+ self.assertEqual(poly.fill, "black")
+ self.assertEqual(poly.stroke, "none")
+
+ self.assertEqual(circ1.fill, "none")
+ self.assertEqual(circ1.stroke, "blue")
+
+ self.assertEqual(circ2.fill, "none")
+ self.assertEqual(circ2.stroke, "red")
diff --git a/test/test_path_segments.py b/test/test_path_segments.py
index 5af098d..8656fdc 100644
--- a/test/test_path_segments.py
+++ b/test/test_path_segments.py
@@ -40,3 +40,21 @@ class TestBoundingBox(unittest.TestCase):
def test_null_arc_bbox(self):
self.assertEqual(Path("M0,0A0,0 0 0 0 0,0z").bbox(), (0,0,0,0))
+
+class TestArcControlPoints(unittest.TestCase):
+
+ def test_coincident_end_arc(self):
+ """
+ Tests the creation of a control point with a coincident start and end.
+ """
+ arc = Arc(start=(0,0), control=(50,0), end=(0,0))
+ self.assertAlmostEqual(arc.rx, 25)
+
+ def test_linear_arc(self):
+ """
+ Colinear Arcs should raise value errors.
+ """
+ arc_vertical = Arc(start=(0, 0), control=(25, 0), end=(50, 0))
+ # print(arc_vertical)
+ arc_horizontal = Arc(start=(0, 0), control=(0, 25), end=(0, 50))
+ # print(arc_horizontal)
|
Control Point Arc with Colinear Points Crashes in unhelpful way.
An Arc with Colinear control points should be a line segment, this should crash with a better error than divide by zero.
|
0.0
|
8faa1e1ac6c3d18f6756c9706106f32b3e7f71b0
|
[
"test/test_css.py::TestSVGCSS::test_issue_178",
"test/test_path_segments.py::TestArcControlPoints::test_coincident_end_arc",
"test/test_path_segments.py::TestArcControlPoints::test_linear_arc"
] |
[
"test/test_css.py::TestSVGCSS::test_issue_103",
"test/test_path_segments.py::TestElementLinear::test_linear_nearest",
"test/test_path_segments.py::TestBoundingBox::test_arc_bbox",
"test/test_path_segments.py::TestBoundingBox::test_cbezier_bbox",
"test/test_path_segments.py::TestBoundingBox::test_linear_bbox",
"test/test_path_segments.py::TestBoundingBox::test_null_arc_bbox",
"test/test_path_segments.py::TestBoundingBox::test_qbezier_bbox"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-06-12 10:40:34+00:00
|
mit
| 3,854 |
|
meerk40t__svgelements-216
|
diff --git a/svgelements/svgelements.py b/svgelements/svgelements.py
index 45a0336..e89a99d 100644
--- a/svgelements/svgelements.py
+++ b/svgelements/svgelements.py
@@ -4787,7 +4787,12 @@ class CubicBezier(Curve):
if 0 < r2 < 1:
local_extremizers.append(r2)
else:
- local_extremizers.append(0.5)
+ c = a[1] - a[0]
+ b = 2 * (a[0] - 2*a[1] + a[2])
+ if b != 0:
+ r0 = -c/b
+ if 0 < r0 < 1:
+ local_extremizers.append(r0)
local_extrema = [self.point(t)[v] for t in local_extremizers]
return min(local_extrema), max(local_extrema)
|
meerk40t/svgelements
|
61ac29097a8435458f1c6b02bc2091bcc170b4b7
|
diff --git a/test/test_cubic_bezier.py b/test/test_cubic_bezier.py
index e63893a..37ef038 100644
--- a/test/test_cubic_bezier.py
+++ b/test/test_cubic_bezier.py
@@ -1,3 +1,4 @@
+import random
import unittest
from random import *
@@ -5,12 +6,15 @@ from svgelements import *
def get_random_cubic_bezier():
- return CubicBezier((random() * 50, random() * 50), (random() * 50, random() * 50),
- (random() * 50, random() * 50), (random() * 50, random() * 50))
+ return CubicBezier(
+ (random() * 50, random() * 50),
+ (random() * 50, random() * 50),
+ (random() * 50, random() * 50),
+ (random() * 50, random() * 50),
+ )
class TestElementCubicBezierLength(unittest.TestCase):
-
def test_cubic_bezier_length(self):
n = 100
error = 0
@@ -25,18 +29,20 @@ class TestElementCubicBezierLength(unittest.TestCase):
class TestElementCubicBezierPoint(unittest.TestCase):
-
def test_cubic_bezier_point_start_stop(self):
import numpy as np
+
for _ in range(1000):
b = get_random_cubic_bezier()
self.assertEqual(b.start, b.point(0))
self.assertEqual(b.end, b.point(1))
- self.assertTrue(np.all(np.array([list(b.start), list(b.end)])
- == b.npoint([0, 1])))
+ self.assertTrue(
+ np.all(np.array([list(b.start), list(b.end)]) == b.npoint([0, 1]))
+ )
def test_cubic_bezier_point_implementations_match(self):
import numpy as np
+
for _ in range(1000):
b = get_random_cubic_bezier()
@@ -50,3 +56,21 @@ class TestElementCubicBezierPoint(unittest.TestCase):
for p, p1, p2 in zip(pos, v1, v2):
self.assertEqual(b.point(p), Point(p1))
self.assertEqual(Point(p1), Point(p2))
+
+ def test_cubic_bounds_issue_214(self):
+ cubic = CubicBezier(0, -2 - 3j, -1 - 4j, -3j)
+ bbox = cubic.bbox()
+ self.assertLess(bbox[1], -3)
+
+ def test_cubic_bounds_issue_214_random(self):
+ for i in range(100):
+ a = random() * 5
+ b = random() * 5
+ c = random() * 5
+ d = a - 3 * b + 3 * c
+ cubic1 = CubicBezier(a, b, c, d)
+ bbox1 = cubic1.bbox()
+ cubic2 = CubicBezier(a, b, c, d + 1e-11)
+ bbox2 = cubic2.bbox()
+ for a, b in zip(bbox1, bbox2):
+ self.assertAlmostEqual(a, b, delta=1e-5)
|
Cubic Bezier incorrect bounding box
This may be related to #186, but thought I'd bring it up in case it isn't known about. The computed bounding box of certain cubic beziers seems to be incorrect sometimes. Here's an example:
```python
from svgelements import CubicBezier
cubic = CubicBezier(0, -2-3j, -1-4j, -3j)
bbox = cubic.bbox()
print(bbox) # outputs (-1.1547005383792515, -3.0, 0.0, 0.0)
```
This outputs `(-1.1547005383792515, -3.0, 0.0, 0.0)`, but `ymin` should really be lower than `-3`. Here is a picture of the bezier with the incorrectly computed bounding box:

svgelements version: 1.9.0
Python version: 3.8.1
OS: Windows
|
0.0
|
61ac29097a8435458f1c6b02bc2091bcc170b4b7
|
[
"test/test_cubic_bezier.py::TestElementCubicBezierPoint::test_cubic_bounds_issue_214",
"test/test_cubic_bezier.py::TestElementCubicBezierPoint::test_cubic_bounds_issue_214_random"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_media"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-03-12 12:53:08+00:00
|
mit
| 3,855 |
|
meerk40t__svgelements-221
|
diff --git a/setup.cfg b/setup.cfg
index f1fdd39..ce62894 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = svgelements
-version = 1.9.1
+version = 1.9.2
description = Svg Elements Parsing
long_description_content_type=text/markdown
long_description = file: README.md
@@ -10,14 +10,12 @@ classifiers =
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python
- Programming Language :: Python :: 2.7
- Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
- Programming Language :: Python :: Implementation :: PyPy
+ Programming Language :: Python :: 3.11
Topic :: Multimedia :: Graphics
Topic :: Multimedia :: Graphics :: Editors :: Vector-Based
Topic :: Software Development :: Libraries :: Python Modules
diff --git a/svgelements/svgelements.py b/svgelements/svgelements.py
index e89a99d..43f2a39 100644
--- a/svgelements/svgelements.py
+++ b/svgelements/svgelements.py
@@ -43,7 +43,7 @@ Though not required the Image class acquires new functionality if provided with
and the Arc can do exact arc calculations if scipy is installed.
"""
-SVGELEMENTS_VERSION = "1.9.1"
+SVGELEMENTS_VERSION = "1.9.2"
MIN_DEPTH = 5
ERROR = 1e-12
@@ -4773,7 +4773,7 @@ class CubicBezier(Curve):
local_extremizers = [0, 1]
a = [c[v] for c in self]
denom = a[0] - 3 * a[1] + 3 * a[2] - a[3]
- if abs(denom) >= 1e-12:
+ if abs(denom) >= 1e-8:
delta = (
a[1] * a[1] - (a[0] + a[1]) * a[2] + a[2] * a[2] + (a[0] - a[1]) * a[3]
)
|
meerk40t/svgelements
|
4e9922363c6ea9ce0a97079520975518dac38241
|
diff --git a/.github/workflows/unittests.yml b/.github/workflows/unittests.yml
index fb8bd86..f046eba 100644
--- a/.github/workflows/unittests.yml
+++ b/.github/workflows/unittests.yml
@@ -29,13 +29,14 @@ jobs:
strategy:
fail-fast: false
matrix:
- os: [ubuntu-18.04, macos-11]
- python-version: [3.9]
+ os: [ubuntu-20.04, ubuntu-latest, macos-11]
+ python-version: ['3.9', '3.11']
experimental: [false]
include:
- - os: ubuntu-18.04
+ - os: ubuntu-20.04
+ python-version: 3.6
+ - os: macos-11
python-version: 3.6
- experimental: false
steps:
@@ -43,7 +44,7 @@ jobs:
uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
diff --git a/test/test_cubic_bezier.py b/test/test_cubic_bezier.py
index 37ef038..6927487 100644
--- a/test/test_cubic_bezier.py
+++ b/test/test_cubic_bezier.py
@@ -74,3 +74,9 @@ class TestElementCubicBezierPoint(unittest.TestCase):
bbox2 = cubic2.bbox()
for a, b in zip(bbox1, bbox2):
self.assertAlmostEqual(a, b, delta=1e-5)
+
+ def test_cubic_bounds_issue_220(self):
+ p = Path(transform=Matrix(682.657124793113, 0.000000000003, -0.000000000003, 682.657124793113, 257913.248909660178, -507946.354527872754))
+ p += CubicBezier(start=Point(-117.139521365,1480.99923469), control1=Point(-41.342266634,1505.62725567), control2=Point(40.3422666342,1505.62725567), end=Point(116.139521365,1480.99923469))
+ bounds = p.bbox()
+ self.assertNotAlmostEquals(bounds[1], bounds[3], delta=100)
|
Numerical Instability above the 1e-12 level for cubic bounding box.
```python
def test_bbox_failure(self):
p = Path(transform=Matrix(682.657124793113, 0.000000000003, -0.000000000003, 682.657124793113, 257913.248909660178, -507946.354527872754))
p += CubicBezier(start=Point(-117.139521365,1480.99923469), control1=Point(-41.342266634,1505.62725567), control2=Point(40.3422666342,1505.62725567), end=Point(116.139521365,1480.99923469))
bounds = p.bbox()
self.assertNotAlmostEquals(bounds[1], bounds[3], delta=100)
```
This fails even though it's basically an arc and the y-range should be quite considerable.
See: meerk40t/meerk40t#1798
|
0.0
|
4e9922363c6ea9ce0a97079520975518dac38241
|
[
"test/test_cubic_bezier.py::TestElementCubicBezierPoint::test_cubic_bounds_issue_220"
] |
[
"test/test_cubic_bezier.py::TestElementCubicBezierPoint::test_cubic_bounds_issue_214",
"test/test_cubic_bezier.py::TestElementCubicBezierPoint::test_cubic_bounds_issue_214_random"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-16 22:23:40+00:00
|
mit
| 3,856 |
|
megajanlott__cbor-decoder-46
|
diff --git a/cbor/MajorType.py b/cbor/MajorType.py
index 2f43a8c..018d188 100644
--- a/cbor/MajorType.py
+++ b/cbor/MajorType.py
@@ -1,9 +1,10 @@
from cbor.CBORStream import CBORStream
from cbor.State import State
-from cbor.type.ByteString import ByteString
-from cbor.type.TextString import TextString
from cbor.type.Array import ArrayInfo
+from cbor.type.ByteString import ByteString
from cbor.type.Map import MapInfo
+from cbor.type.Tag import TagInfo
+from cbor.type.TextString import TextString
MAJOR_TYPE_MASK = 0b11100000
MAJOR_TYPE_SIZE = 3
@@ -29,5 +30,7 @@ class MajorType(State):
return ArrayInfo()
elif t == 5:
return MapInfo()
+ elif t == 6:
+ return TagInfo()
return
diff --git a/cbor/type/Array.py b/cbor/type/Array.py
index 28bb061..014cdfe 100644
--- a/cbor/type/Array.py
+++ b/cbor/type/Array.py
@@ -12,7 +12,7 @@ class ArrayInfo(cbor.State.State):
if length == 0:
handler(']')
elif length < 24:
- return [cbor.MajorType.MajorType(), ArrayRead(length)]
+ return [ArrayRead(length), cbor.MajorType.MajorType()]
elif length == 24:
return [ArrayLen(1)]
elif length == 25:
@@ -22,7 +22,7 @@ class ArrayInfo(cbor.State.State):
elif length == 27:
return [ArrayLen(8)]
elif length == 31:
- return [cbor.MajorType.MajorType(), ArrayInf()]
+ return [ArrayInf(), cbor.MajorType.MajorType()]
return []
@@ -37,7 +37,7 @@ class ArrayRead(cbor.State.State):
def run(self, stream: cbor.CBORStream.CBORStream, handler):
if self.n > 1:
handler(',')
- return [cbor.MajorType.MajorType(), ArrayRead(self.n - 1)]
+ return [ArrayRead(self.n - 1), cbor.MajorType.MajorType()]
handler(']')
return []
@@ -53,11 +53,11 @@ class ArrayLen(cbor.State.State):
def run(self, stream: cbor.CBORStream.CBORStream, handler):
info = stream.read(self.n)
length = int.from_bytes(info, byteorder='big')
- return [cbor.MajorType.MajorType(), ArrayRead(length)]
+ return [ArrayRead(length), cbor.MajorType.MajorType()]
class ArrayInf(cbor.State.State):
def run(self, stream: cbor.CBORStream.CBORStream, handler):
handler(',')
- return [cbor.MajorType.MajorType(), ArrayInf()]
+ return [ArrayInf(), cbor.MajorType.MajorType()]
diff --git a/cbor/type/Tag.py b/cbor/type/Tag.py
new file mode 100644
index 0000000..d0a1a27
--- /dev/null
+++ b/cbor/type/Tag.py
@@ -0,0 +1,32 @@
+import cbor.CBORStream
+import cbor.MajorType
+import cbor.State
+
+
+class TagInfo(cbor.State.State):
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ info = stream.read(1)
+ length = ord(info) & 0b00011111
+ if length == 24:
+ return [TagRead(1)]
+ elif length == 25:
+ return [TagRead(2)]
+ elif length == 26:
+ return [TagRead(4)]
+ elif length == 27:
+ return [TagRead(8)]
+ return [cbor.MajorType.MajorType()]
+
+
+class TagRead(cbor.State.State):
+
+ def __eq__(self, other):
+ return self.n == other.n
+
+ def __init__(self, n: int):
+ self.n = n
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ stream.read(self.n)
+ return [cbor.MajorType.MajorType()]
|
megajanlott/cbor-decoder
|
ada9419642bb83e8975a55879843cb32cb2f14e8
|
diff --git a/tests/test_Array.py b/tests/test_Array.py
index 2755ae0..61539fa 100644
--- a/tests/test_Array.py
+++ b/tests/test_Array.py
@@ -3,6 +3,7 @@ from cbor.MajorType import MajorType
from cbor.CBORStream import CBORStream
from cbor.type.Array import ArrayInfo, ArrayRead, ArrayLen
from tests.MockHandler import MockHandler
+from cbor.Decoder import Decoder
def ignore_handler(v):
@@ -19,9 +20,9 @@ def test_run_array_length():
data = CBORStream(BytesIO(bytes([0b10000011])))
stack = ArrayInfo().run(data, ignore_handler)
assert len(stack) == 2
- assert type(stack[0]) == MajorType
- assert type(stack[1]) == ArrayRead
- assert stack[1] == ArrayRead(3)
+ assert type(stack[0]) == ArrayRead
+ assert stack[0] == ArrayRead(3)
+ assert type(stack[1]) == MajorType
def test_run_array_length_multibyte():
@@ -35,9 +36,9 @@ def test_run_array_length_multibyte():
assert stack[0] == ArrayLen(1)
stack2 = stack[0].run(data, ignore_handler)
assert len(stack2) == 2
- assert type(stack2[0]) == MajorType
- assert type(stack2[1]) == ArrayRead
- assert stack2[1] == ArrayRead(1)
+ assert type(stack2[0]) == ArrayRead
+ assert stack2[0] == ArrayRead(1)
+ assert type(stack2[1]) == MajorType
# Array length on 2 bytes.
data = CBORStream(BytesIO(bytes([
@@ -49,9 +50,9 @@ def test_run_array_length_multibyte():
assert stack[0] == ArrayLen(2)
stack2 = stack[0].run(data, ignore_handler)
assert len(stack2) == 2
- assert type(stack2[0]) == MajorType
- assert type(stack2[1]) == ArrayRead
- assert stack2[1] == ArrayRead(1 << 8)
+ assert type(stack2[0]) == ArrayRead
+ assert stack2[0] == ArrayRead(1 << 8)
+ assert type(stack2[1]) == MajorType
# Array length on 4 bytes.
data = CBORStream(BytesIO(bytes([
@@ -63,9 +64,9 @@ def test_run_array_length_multibyte():
assert stack[0] == ArrayLen(4)
stack2 = stack[0].run(data, ignore_handler)
assert len(stack2) == 2
- assert type(stack2[0]) == MajorType
- assert type(stack2[1]) == ArrayRead
- assert stack2[1] == ArrayRead(1 << 24)
+ assert type(stack2[0]) == ArrayRead
+ assert stack2[0] == ArrayRead(1 << 24)
+ assert type(stack2[1]) == MajorType
# Array length on 8 bytes.
data = CBORStream(BytesIO(bytes([
@@ -77,9 +78,9 @@ def test_run_array_length_multibyte():
assert stack[0] == ArrayLen(8)
stack2 = stack[0].run(data, ignore_handler)
assert len(stack2) == 2
- assert type(stack2[0]) == MajorType
- assert type(stack2[1]) == ArrayRead
- assert stack2[1] == ArrayRead(1 << 56)
+ assert type(stack2[0]) == ArrayRead
+ assert stack2[0] == ArrayRead(1 << 56)
+ assert type(stack2[1]) == MajorType
def test_run_array_read():
@@ -90,3 +91,26 @@ def test_run_array_read():
stack = ArrayInfo().run(data, handler.handler)
assert len(stack) == 0
handler.assert_data('[]')
+
+
+def test_run_array_single_element():
+ handler = MockHandler()
+
+ # Empty array.
+ d = Decoder()
+ data = bytes([0b10000001, 0b10000000])
+ d.decode_array(data, handler.handler)
+ handler.assert_data('[[]]')
+
+
+def test_run_array_two_elements():
+ handler = MockHandler()
+
+ # Empty array.
+ d = Decoder()
+ data = bytes([
+ 0b10000010,
+ 0b10000000,
+ 0b10000000])
+ d.decode_array(data, handler.handler)
+ handler.assert_data('[[],[]]')
diff --git a/tests/test_Tag.py b/tests/test_Tag.py
new file mode 100644
index 0000000..c57d23d
--- /dev/null
+++ b/tests/test_Tag.py
@@ -0,0 +1,83 @@
+from io import BytesIO
+from cbor.MajorType import MajorType
+from cbor.CBORStream import CBORStream
+from cbor.type.Tag import TagInfo, TagRead
+from tests.MockHandler import MockHandler
+
+
+def ignore_handler(v):
+ return
+
+
+def test_run_tag_probe():
+ data = CBORStream(BytesIO(bytes([0b11000001])))
+ assert type(MajorType().run(data, None)) == TagInfo
+
+
+def test_run_tag_length():
+ # Tag length lower than 24.
+ data = CBORStream(BytesIO(bytes([0b11000011])))
+ stack = TagInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == MajorType
+
+
+def test_run_tag_length_multibyte():
+ # Tag length on 1 byte.
+ data = CBORStream(BytesIO(bytes([
+ 0b11011000, 0b1
+ ])))
+ stack = TagInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == TagRead
+ assert stack[0] == TagRead(1)
+ stack2 = stack[0].run(data, ignore_handler)
+ assert len(stack2) == 1
+ assert type(stack2[0]) == MajorType
+
+ # Tag length on 2 bytes.
+ data = CBORStream(BytesIO(bytes([
+ 0b11011001, 0b1, 0b0
+ ])))
+ stack = TagInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == TagRead
+ assert stack[0] == TagRead(2)
+ stack2 = stack[0].run(data, ignore_handler)
+ assert len(stack2) == 1
+ assert type(stack2[0]) == MajorType
+
+ # Tag length on 4 bytes.
+ data = CBORStream(BytesIO(bytes([
+ 0b11011010, 0b1, 0b0, 0b0, 0b0
+ ])))
+ stack = TagInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == TagRead
+ assert stack[0] == TagRead(4)
+ stack2 = stack[0].run(data, ignore_handler)
+ assert len(stack2) == 1
+ assert type(stack2[0]) == MajorType
+
+ # Tag length on 8 bytes.
+ data = CBORStream(BytesIO(bytes([
+ 0b11011011, 0b1, 0b0, 0b0, 0b0, 0b0, 0b0, 0b0, 0b0
+ ])))
+ stack = TagInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == TagRead
+ assert stack[0] == TagRead(8)
+ stack2 = stack[0].run(data, ignore_handler)
+ assert len(stack2) == 1
+ assert type(stack2[0]) == MajorType
+
+
+def test_run_tag_read():
+ handler = MockHandler()
+
+ # No output expected.
+ data = CBORStream(BytesIO(bytes([0b11000000])))
+ stack = TagInfo().run(data, handler.handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == MajorType
+ handler.assert_data('')
|
Handle/Decode tags
Major type 6 describes optional tagging of items
We should decide whether to decode or skip them
Optional Tagging states:
- [ ] Implement TagInfo
- [ ] Implement TagRead(n)
Additional info about states can be found here:
https://docs.google.com/document/d/1tvQJtJbYUcM2vI5H0RDukWqNYsLxjczZ30PiBFFVsV8/edit#
|
0.0
|
ada9419642bb83e8975a55879843cb32cb2f14e8
|
[
"tests/test_Array.py::test_run_array_probe",
"tests/test_Array.py::test_run_array_length",
"tests/test_Array.py::test_run_array_length_multibyte",
"tests/test_Array.py::test_run_array_read",
"tests/test_Array.py::test_run_array_single_element",
"tests/test_Array.py::test_run_array_two_elements",
"tests/test_Tag.py::test_run_tag_probe",
"tests/test_Tag.py::test_run_tag_length",
"tests/test_Tag.py::test_run_tag_length_multibyte",
"tests/test_Tag.py::test_run_tag_read"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-04-29 20:01:46+00:00
|
mit
| 3,857 |
|
megajanlott__cbor-decoder-47
|
diff --git a/cbor/MajorType.py b/cbor/MajorType.py
index d9b0d34..2f43a8c 100644
--- a/cbor/MajorType.py
+++ b/cbor/MajorType.py
@@ -3,6 +3,7 @@ from cbor.State import State
from cbor.type.ByteString import ByteString
from cbor.type.TextString import TextString
from cbor.type.Array import ArrayInfo
+from cbor.type.Map import MapInfo
MAJOR_TYPE_MASK = 0b11100000
MAJOR_TYPE_SIZE = 3
@@ -26,5 +27,7 @@ class MajorType(State):
return TextString()
elif t == 4:
return ArrayInfo()
+ elif t == 5:
+ return MapInfo()
return
diff --git a/cbor/type/Map.py b/cbor/type/Map.py
new file mode 100644
index 0000000..e46f64e
--- /dev/null
+++ b/cbor/type/Map.py
@@ -0,0 +1,84 @@
+import cbor.CBORStream
+import cbor.MajorType
+import cbor.State
+
+
+class MapInfo(cbor.State.State):
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ info = stream.read(1)
+ length = ord(info) & 0b00011111
+ handler('{')
+ if length == 0:
+ handler('}')
+ elif length < 24:
+ return [MapReadValue(length), cbor.MajorType.MajorType()]
+ elif length == 24:
+ return [MapLen(1)]
+ elif length == 25:
+ return [MapLen(2)]
+ elif length == 26:
+ return [MapLen(4)]
+ elif length == 27:
+ return [MapLen(8)]
+ elif length == 31:
+ return [MapInfValue(), cbor.MajorType.MajorType()]
+ return []
+
+
+class MapLen(cbor.State.State):
+
+ def __eq__(self, other):
+ return self.n == other.n
+
+ def __init__(self, n: int):
+ self.n = n
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ info = stream.read(self.n)
+ length = int.from_bytes(info, byteorder='big')
+ return [MapReadValue(length), cbor.MajorType.MajorType()]
+
+
+class MapReadKey(cbor.State.State):
+
+ def __eq__(self, other):
+ return self.n == other.n
+
+ def __init__(self, n: int):
+ self.n = n
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ if self.n == 0:
+ handler('}')
+ return []
+ if self.n > 0:
+ handler(',')
+ return [MapReadValue(self.n), cbor.MajorType.MajorType()]
+
+
+class MapReadValue(cbor.State.State):
+
+ def __eq__(self, other):
+ return self.n == other.n
+
+ def __init__(self, n: int):
+ self.n = n
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ handler(':')
+ return [MapReadKey(self.n-1), cbor.MajorType.MajorType()]
+
+
+class MapInfKey(cbor.State.State):
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ handler(',')
+ return [MapInfValue(), cbor.MajorType.MajorType()]
+
+
+class MapInfValue(cbor.State.State):
+
+ def run(self, stream: cbor.CBORStream.CBORStream, handler):
+ handler(':')
+ return [MapInfKey(), cbor.MajorType.MajorType()]
|
megajanlott/cbor-decoder
|
c2af49e12ad7fe36433ec013b176f4dda89a4b2e
|
diff --git a/tests/test_Map.py b/tests/test_Map.py
new file mode 100644
index 0000000..7497c8d
--- /dev/null
+++ b/tests/test_Map.py
@@ -0,0 +1,147 @@
+from io import BytesIO
+from cbor.MajorType import MajorType
+from cbor.CBORStream import CBORStream
+from cbor.type.Map import *
+from tests.MockHandler import MockHandler
+from cbor.Decoder import Decoder
+
+
+def ignore_handler(v):
+ return
+
+
+def test_run_map_probe():
+ data = CBORStream(BytesIO(bytes([0b10100001])))
+ assert type(MajorType().run(data, None)) == MapInfo
+
+
+def test_run_map_length():
+ # Map length lower than 24.
+ data = CBORStream(BytesIO(bytes([0b10100011])))
+ stack = MapInfo().run(data, ignore_handler)
+ assert len(stack) == 2
+ assert type(stack[0]) == MapReadValue
+ assert stack[0] == MapReadValue(3)
+ assert type(stack[1]) == MajorType
+
+
+def test_run_map_length_multibyte():
+ # Map length on 1 byte.
+ data = CBORStream(BytesIO(bytes([
+ 0b10111000, 0b1
+ ])))
+ stack = MapInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == MapLen
+ assert stack[0] == MapLen(1)
+ stack2 = stack[0].run(data, ignore_handler)
+ assert len(stack2) == 2
+ assert type(stack2[0]) == MapReadValue
+ assert stack2[0] == MapReadValue(1)
+ assert type(stack2[1]) == MajorType
+
+ # Map length on 2 bytes.
+ data = CBORStream(BytesIO(bytes([
+ 0b10111001, 0b1, 0b0
+ ])))
+ stack = MapInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == MapLen
+ assert stack[0] == MapLen(2)
+ stack2 = stack[0].run(data, ignore_handler)
+ assert len(stack2) == 2
+ assert type(stack2[0]) == MapReadValue
+ assert stack2[0] == MapReadValue(1 << 8)
+ assert type(stack2[1]) == MajorType
+
+ # Map length on 4 bytes.
+ data = CBORStream(BytesIO(bytes([
+ 0b10111010, 0b1, 0b0, 0b0, 0b0
+ ])))
+ stack = MapInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == MapLen
+ assert stack[0] == MapLen(4)
+ stack2 = stack[0].run(data, ignore_handler)
+ assert len(stack2) == 2
+ assert type(stack2[0]) == MapReadValue
+ assert stack2[0] == MapReadValue(1 << 24)
+ assert type(stack2[1]) == MajorType
+
+ # Map length on 8 bytes.
+ data = CBORStream(BytesIO(bytes([
+ 0b10111011, 0b1, 0b0, 0b0, 0b0, 0b0, 0b0, 0b0, 0b0
+ ])))
+ stack = MapInfo().run(data, ignore_handler)
+ assert len(stack) == 1
+ assert type(stack[0]) == MapLen
+ assert stack[0] == MapLen(8)
+ stack2 = stack[0].run(data, ignore_handler)
+ assert len(stack2) == 2
+ assert type(stack2[0]) == MapReadValue
+ assert stack2[0] == MapReadValue(1 << 56)
+ assert type(stack2[1]) == MajorType
+
+
+def test_run_map_inf():
+ # Map with infinite length.
+ data = CBORStream(BytesIO(bytes([0b10111111])))
+ stack = MapInfo().run(data, ignore_handler)
+ assert len(stack) == 2
+ assert type(stack[0]) == MapInfValue
+ assert type(stack[1]) == MajorType
+
+
+def test_run_map_inf_key():
+ # Map with infinite length.
+ data = CBORStream(BytesIO(bytes([])))
+ stack = MapInfKey().run(data, ignore_handler)
+ assert len(stack) == 2
+ assert type(stack[0]) == MapInfValue
+ assert type(stack[1]) == MajorType
+
+
+def test_run_map_inf_value():
+ # Map with infinite length.
+ data = CBORStream(BytesIO(bytes([])))
+ stack = MapInfValue().run(data, ignore_handler)
+ assert len(stack) == 2
+ assert type(stack[0]) == MapInfKey
+ assert type(stack[1]) == MajorType
+
+
+def test_run_map_info_empty():
+ handler = MockHandler()
+
+ # Empty array.
+ data = CBORStream(BytesIO(bytes([0b10100000])))
+ stack = MapInfo().run(data, handler.handler)
+ assert len(stack) == 0
+ handler.assert_data('{}')
+
+
+def test_run_map_single_element():
+ handler = MockHandler()
+
+ # Empty array.
+ d = Decoder()
+ data = bytes([0b10100001, 0b10100000, 0b10100000])
+ d.decode_array(data, handler.handler)
+ print(handler.data)
+ handler.assert_data('{{}:{}}')
+
+
+def test_run_map_two_elements():
+ handler = MockHandler()
+
+ # Empty array.
+ d = Decoder()
+ data = bytes([
+ 0b10100010,
+ 0b10100000,
+ 0b10100000,
+ 0b10100000,
+ 0b10100000])
+ d.decode_array(data, handler.handler)
+ print(handler.data)
+ handler.assert_data('{{}:{},{}:{}}')
|
Map
Map states:
- [ ] Implement MapInfo
- [ ] Implement MapReadKey(n)
- [ ] Implement MapReadValue(n)
- [ ] Implement MapLen(n)
- [ ] Implement MapInfKey()
- [ ] Implement MapInfValue()
Additional info about states can be found here:
https://docs.google.com/document/d/1tvQJtJbYUcM2vI5H0RDukWqNYsLxjczZ30PiBFFVsV8/edit#
|
0.0
|
c2af49e12ad7fe36433ec013b176f4dda89a4b2e
|
[
"tests/test_Map.py::test_run_map_probe",
"tests/test_Map.py::test_run_map_length",
"tests/test_Map.py::test_run_map_length_multibyte",
"tests/test_Map.py::test_run_map_inf",
"tests/test_Map.py::test_run_map_inf_key",
"tests/test_Map.py::test_run_map_inf_value",
"tests/test_Map.py::test_run_map_info_empty",
"tests/test_Map.py::test_run_map_single_element",
"tests/test_Map.py::test_run_map_two_elements"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-05-02 10:56:59+00:00
|
mit
| 3,858 |
|
melexis__warnings-plugin-28
|
diff --git a/README.rst b/README.rst
index 7b37e9c..139e744 100644
--- a/README.rst
+++ b/README.rst
@@ -99,6 +99,11 @@ that case command will look like:
Help prints all currently supported commands and their usages.
+The command returns (shell $? variable):
+
+- value 0 when the number of counted warnings is within the supplied minimum and maximum limits: ok,
+- number of counted warnings (positive) when the counter number is not within those limit.
+
----------------------------
Parse for Sphinx warnings
----------------------------
diff --git a/src/mlx/warnings.py b/src/mlx/warnings.py
index 70a8474..ccbb45a 100644
--- a/src/mlx/warnings.py
+++ b/src/mlx/warnings.py
@@ -94,14 +94,14 @@ class WarningsChecker(object):
''' Function for checking whether the warning count is within the configured limits
Returns:
- int: 0 if the amount of warnings is within limits. 1 otherwise
+ int: 0 if the amount of warnings is within limits. the count of warnings otherwise
'''
if self.count > self.warn_max:
print("Number of warnings ({count}) is higher than the maximum limit ({max}). Returning error code 1.".format(count=self.count, max=self.warn_max))
- return 1
+ return self.count
elif self.count < self.warn_min:
print("Number of warnings ({count}) is lower than the minimum limit ({min}). Returning error code 1.".format(count=self.count, min=self.warn_min))
- return 1
+ return self.count
else:
print("Number of warnings ({count}) is between limits {min} and {max}. Well done.".format(count=self.count, min=self.warn_min, max=self.warn_max))
return 0
|
melexis/warnings-plugin
|
e45c72adea46a8595cc426368e38090a7553f40c
|
diff --git a/tests/test_limits.py b/tests/test_limits.py
index 7a6b1c9..9e477a8 100644
--- a/tests/test_limits.py
+++ b/tests/test_limits.py
@@ -45,7 +45,7 @@ class TestLimits(TestCase):
warnings.check('testfile.c:12: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 2)
warnings.set_maximum(1)
- self.assertEqual(warnings.return_check_limits(), 1)
+ self.assertEqual(warnings.return_check_limits(), 2)
warnings.set_maximum(2)
self.assertEqual(warnings.return_check_limits(), 0)
@@ -56,7 +56,7 @@ class TestLimits(TestCase):
warnings.check('testfile.c:6: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 2)
# default behavior
- self.assertEqual(warnings.return_check_limits(), 1)
+ self.assertEqual(warnings.return_check_limits(), 2)
# to set minimum we need to make maximum higher
warnings.set_maximum(10)
@@ -64,7 +64,7 @@ class TestLimits(TestCase):
if x <= 3:
self.assertEqual(warnings.return_check_limits(), 0)
else:
- self.assertEqual(warnings.return_check_limits(), 1)
+ self.assertEqual(warnings.return_check_limits(), 2)
warnings.set_minimum(x)
|
Return code of main() could/should be number of warnings
When count of warnings is not within limit min/max, the return code of main() could be the number of actual warnings found. This way programs can take the return value from shell and do something with it.
|
0.0
|
e45c72adea46a8595cc426368e38090a7553f40c
|
[
"tests/test_limits.py::TestLimits::test_return_values_maximum_increase",
"tests/test_limits.py::TestLimits::test_return_values_minimum_increase"
] |
[
"tests/test_limits.py::TestLimits::test_return_values_maximum_decrease",
"tests/test_limits.py::TestLimits::test_set_maximum",
"tests/test_limits.py::TestLimits::test_set_minimum",
"tests/test_limits.py::TestLimits::test_set_minimum_fail"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-06-30 08:18:03+00:00
|
apache-2.0
| 3,859 |
|
melexis__warnings-plugin-52
|
diff --git a/README.rst b/README.rst
index 139e744..e5af286 100644
--- a/README.rst
+++ b/README.rst
@@ -57,11 +57,16 @@ You can find more details in `Installation guide <docs/installation.rst>`_
Usage
=====
-Since warnings plugin parses log messages (so far), you will need to redirect
-your stderr to some text file. You can do that with shell pipes or with
+Warnings plugin parses log messages as well as direct command stream. In case you
+want to create log file, you will need to redirect your stderr to some text file.
+You can do that with shell pipes or with
command line arguments to command (if it supports outputting errors to file
instead of stderr). Be aware that some commands print warnings on stdout.
+Also warnings plugin log files need to be the last argument as otherwise the
+arguments after that are discarded, because they are considered as command
+arguments (with or without command flag).
+
------------
Pipe example
------------
@@ -73,6 +78,16 @@ file.
yourcommand 2>&1 | tee doc_log.txt
+---------------
+Command example
+---------------
+
+Below is the command example for the plugin (keep in mind that parse commands are
+required).
+
+.. code-block:: bash
+
+ mlx-warnings --command <yourcommand>
---------------
Running command
@@ -104,20 +119,26 @@ The command returns (shell $? variable):
- value 0 when the number of counted warnings is within the supplied minimum and maximum limits: ok,
- number of counted warnings (positive) when the counter number is not within those limit.
-----------------------------
+-------------------------
Parse for Sphinx warnings
-----------------------------
+-------------------------
After you saved your Sphinx warnings to the file, you can parse it with
command:
.. code-block:: bash
- # command line
+ # command line log file
mlx-warnings doc_log.txt --sphinx
+ # command line command execution
+ mlx-warnings --command --sphinx <commandforsphinx>
+
+ # explicitly as python module for log file
+ python3 -m mlx.warnings --sphinx doc_log.txt
+ python -m mlx.warnings --sphinx doc_log.txt
# explicitly as python module
- python3 -m mlx.warnings doc_log.txt --sphinx
- python -m mlx.warnings doc_log.txt --sphinx
+ python3 -m mlx.warnings --command --sphinx <commandforsphinx>
+ python -m mlx.warnings --command --sphinx <commandforsphinx>
--------------------------
@@ -129,11 +150,17 @@ command:
.. code-block:: bash
- # command line
+ # command line log file
mlx-warnings doc_log.txt --doxygen
+ # command line command execution
+ mlx-warnings --command --doxygen <commandfordoxygen>
+
+ # explicitly as python module for log file
+ python3 -m mlx.warnings --doxygen doc_log.txt
+ python -m mlx.warnings --doxygen doc_log.txt
# explicitly as python module
- python3 -m mlx.warnings doc_log.txt --doxygen
- python -m mlx.warnings doc_log.txt --doxygen
+ python3 -m mlx.warnings --command --doxygen <commandfordoxygen>
+ python -m mlx.warnings --command --doxygen <commandfordoxygen>
------------------------
@@ -145,11 +172,17 @@ command:
.. code-block:: bash
- # command line
+ # command line log file
mlx-warnings junit_output.xml --junit
+ # command line command execution
+ mlx-warnings --command --junit <commandforjunit>
+
+ # explicitly as python module for log file
+ python3 -m mlx.warnings --junit junit_output.xml
+ python -m mlx.warnings --junit junit_output.xml
# explicitly as python module
- python3 -m mlx.warnings junit_output.xml --junit
- python -m mlx.warnings junit_output.xml --junit
+ python3 -m mlx.warnings --command --junit <commandforjunit>
+ python -m mlx.warnings --command --junit <commandforjunit>
-------------
Other options
diff --git a/src/mlx/warnings.py b/src/mlx/warnings.py
index 240e13b..7f23c10 100644
--- a/src/mlx/warnings.py
+++ b/src/mlx/warnings.py
@@ -1,9 +1,13 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
+from __future__ import print_function
+
import argparse
+import os
import pkg_resources
import re
+import subprocess
import sys
import abc
from junitparser import JUnitXml, Failure, Error
@@ -313,33 +317,76 @@ def warnings_wrapper(args):
group.add_argument('-s', '--sphinx', dest='sphinx', action='store_true')
group.add_argument('-j', '--junit', dest='junit', action='store_true')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true')
+ parser.add_argument('--command', dest='command', action='store_true',
+ help='Treat program arguments as command to execute to obtain data')
parser.add_argument('-m', '--maxwarnings', type=int, required=False, default=0,
help='Maximum amount of warnings accepted')
parser.add_argument('--minwarnings', type=int, required=False, default=0,
help='Minimum amount of warnings accepted')
parser.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=pkg_resources.require('mlx.warnings')[0].version))
- parser.add_argument('logfile', nargs='+', help='Logfile that might contain warnings')
+ parser.add_argument('logfile', nargs='+', help='Logfile (or command) that might contain warnings')
+ parser.add_argument('flags', nargs=argparse.REMAINDER, help='Possible not-used flags from above are considered as command flags')
+
args = parser.parse_args(args)
warnings = WarningsPlugin(sphinx=args.sphinx, doxygen=args.doxygen, junit=args.junit, verbose=args.verbose)
warnings.set_maximum(args.maxwarnings)
warnings.set_minimum(args.minwarnings)
+ if args.command:
+ cmd = args.logfile
+ if args.flags:
+ cmd.extend(args.flags)
+ warnings_command(warnings, cmd)
+ else:
+ warnings_logfile(warnings, args.logfile)
+
+ warnings.return_count()
+ return warnings.return_check_limits()
+
+
+def warnings_command(warnings, cmd):
+ try:
+ print("Executing: ", end='')
+ print(cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE, bufsize=1, universal_newlines=True)
+ out, err = proc.communicate()
+ # Check stdout
+ if out:
+ try:
+ print(out.decode(encoding="utf-8"))
+ warnings.check(out.decode(encoding="utf-8"))
+ except AttributeError as e:
+ warnings.check(out)
+ print(out)
+ # Check stderr
+ if err:
+ try:
+ warnings.check(err.decode(encoding="utf-8"))
+ print(err.decode(encoding="utf-8"), file=sys.stderr)
+ except AttributeError as e:
+ warnings.check(err)
+ print(err, file=sys.stderr)
+ except OSError as e:
+ if e.errno == os.errno.ENOENT:
+ print("It seems like program " + str(cmd) + " is not installed.")
+ raise
+
+
+def warnings_logfile(warnings, log):
# args.logfile doesn't necessarily contain wildcards, but just to be safe, we
# assume it does, and try to expand them.
# This mechanism is put in place to allow wildcards to be passed on even when
# executing the script on windows (in that case there is no shell expansion of wildcards)
# so that the script can be used in the exact same way even when moving from one
# OS to another.
- for file_wildcard in args.logfile:
+ for file_wildcard in log:
for logfile in glob.glob(file_wildcard):
with open(logfile, 'r') as loghandle:
warnings.check(loghandle.read())
- warnings.return_count()
- return warnings.return_check_limits()
-
def main():
sys.exit(warnings_wrapper(sys.argv[1:]))
diff --git a/tox.ini b/tox.ini
index 33199f3..61c2795 100644
--- a/tox.ini
+++ b/tox.ini
@@ -35,8 +35,9 @@ commands =
python -c 'import mlx.warnings;print(mlx.warnings.__version__)'
python -m mlx.warnings -h
python -m mlx.warnings --version
- python -m mlx.warnings -j tests/junit*.xml --maxwarnings 3 --minwarnings 3
- python -m mlx.warnings -j "tests/junit*.xml" --maxwarnings 3 --minwarnings 3 #emulate for windows (no shell expansion)
+ python -m mlx.warnings -j --maxwarnings 3 --minwarnings 3 tests/junit*.xml
+ python -m mlx.warnings -j --maxwarnings 3 --minwarnings 3 "tests/junit*.xml" #emulate for windows (no shell expansion)
+ python -m mlx.warnings -j --command --maxwarnings 2 --minwarnings 2 cat tests/junit_double_fail.xml
[testenv:bootstrap]
deps =
|
melexis/warnings-plugin
|
0c7e730a491d32ad90f258439715fb6507be37f2
|
diff --git a/tests/sphinx_double_warning.txt b/tests/sphinx_double_warning.txt
new file mode 100644
index 0000000..54d4f42
--- /dev/null
+++ b/tests/sphinx_double_warning.txt
@@ -0,0 +1,3 @@
+/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'
+/home/bljah/test/index.rst:None: WARNING: toctree contains reference to nonexisting document u'installation'
+
diff --git a/tests/sphinx_single_warning.txt b/tests/sphinx_single_warning.txt
new file mode 100644
index 0000000..3dd77fc
--- /dev/null
+++ b/tests/sphinx_single_warning.txt
@@ -0,0 +1,2 @@
+/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'
+
diff --git a/tests/test_integration.py b/tests/test_integration.py
index 0a06f45..4500dfb 100644
--- a/tests/test_integration.py
+++ b/tests/test_integration.py
@@ -30,6 +30,26 @@ class TestIntegration(TestCase):
retval = warnings_wrapper(['--junit', 'tests/junit_single_fail.xml', 'tests/junit_double_fail.xml'])
self.assertEqual(1 + 2, retval)
+ def test_single_command_argument(self):
+ retval = warnings_wrapper(['--junit', '--command', 'cat', 'tests/junit_single_fail.xml'])
+ self.assertEqual(1, retval)
+
+ def test_two_command_arguments(self):
+ retval = warnings_wrapper(['--sphinx', '--command', 'cat', 'tests/sphinx_single_warning.txt', 'tests/sphinx_double_warning.txt'])
+ self.assertEqual(1 + 2, retval)
+
+ def test_command_with_its_own_arguments(self):
+ retval = warnings_wrapper(['--sphinx', '--command', 'cat', '-A', 'tests/sphinx_single_warning.txt', 'tests/sphinx_double_warning.txt'])
+ self.assertEqual(1 + 2, retval)
+
+ def test_command_to_stderr(self):
+ retval = warnings_wrapper(['--sphinx', '--command', 'cat', 'tests/sphinx_single_warning.txt', '>&2'])
+ self.assertEqual(1, retval)
+
+ def test_faulty_command(self):
+ with self.assertRaises(OSError):
+ warnings_wrapper(['--sphinx', '--command', 'blahahahaha', 'tests/sphinx_single_warning.txt'])
+
def test_wildcarded_arguments(self):
# note: no shell expansion simulation (e.g. as in windows)
retval = warnings_wrapper(['--junit', 'tests/junit*.xml'])
|
Parse output stream of command
```
warning-plugin <plugin-thresholds> <command> <arguments>
```
should disregard the command return value and parse stdout and stderr for warnings in plugin thresholds or in some special json formatted conf file in the root of the project. This will make it a lot more useable than generating log files to parse through plugin
|
0.0
|
0c7e730a491d32ad90f258439715fb6507be37f2
|
[
"tests/test_integration.py::TestIntegration::test_command_to_stderr",
"tests/test_integration.py::TestIntegration::test_command_with_its_own_arguments",
"tests/test_integration.py::TestIntegration::test_single_command_argument",
"tests/test_integration.py::TestIntegration::test_two_command_arguments"
] |
[
"tests/test_integration.py::TestIntegration::test_help",
"tests/test_integration.py::TestIntegration::test_max",
"tests/test_integration.py::TestIntegration::test_max_but_still_ok",
"tests/test_integration.py::TestIntegration::test_min",
"tests/test_integration.py::TestIntegration::test_min_but_still_ok",
"tests/test_integration.py::TestIntegration::test_no_parser_selection",
"tests/test_integration.py::TestIntegration::test_single_argument",
"tests/test_integration.py::TestIntegration::test_two_arguments",
"tests/test_integration.py::TestIntegration::test_version",
"tests/test_integration.py::TestIntegration::test_wildcarded_arguments"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-03-02 14:08:39+00:00
|
apache-2.0
| 3,860 |
|
melexis__warnings-plugin-56
|
diff --git a/src/mlx/warnings.py b/src/mlx/warnings.py
index 7f23c10..894f799 100644
--- a/src/mlx/warnings.py
+++ b/src/mlx/warnings.py
@@ -213,6 +213,7 @@ class WarningsPlugin:
self.warn_min = 0
self.warn_max = 0
self.count = 0
+ self.printout = False
def activate_checker(self, checker):
'''
@@ -244,6 +245,8 @@ class WarningsPlugin:
if len(self.checkerList) == 0:
print("No checkers activated. Please use activate_checker function")
else:
+ if self.printout:
+ print(content)
for name, checker in self.checkerList.items():
checker.check(content)
@@ -309,6 +312,16 @@ class WarningsPlugin:
return 0
+ def toggle_printout(self, printout):
+ ''' Toggle printout of all the parsed content
+
+ Useful for command input where we want to print content as well
+
+ Args:
+ printout: True enables the printout, False provides more silent mode
+ '''
+ self.printout = printout
+
def warnings_wrapper(args):
parser = argparse.ArgumentParser(prog='mlx-warnings')
@@ -319,6 +332,8 @@ def warnings_wrapper(args):
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true')
parser.add_argument('--command', dest='command', action='store_true',
help='Treat program arguments as command to execute to obtain data')
+ parser.add_argument('--ignore-retval', dest='ignore', action='store_true',
+ help='Ignore return value of the executed command')
parser.add_argument('-m', '--maxwarnings', type=int, required=False, default=0,
help='Maximum amount of warnings accepted')
parser.add_argument('--minwarnings', type=int, required=False, default=0,
@@ -338,7 +353,11 @@ def warnings_wrapper(args):
cmd = args.logfile
if args.flags:
cmd.extend(args.flags)
- warnings_command(warnings, cmd)
+ warnings.toggle_printout(True)
+ retval = warnings_command(warnings, cmd)
+
+ if (not args.ignore) and (retval != 0):
+ return retval
else:
warnings_logfile(warnings, args.logfile)
@@ -347,6 +366,24 @@ def warnings_wrapper(args):
def warnings_command(warnings, cmd):
+ ''' Execute command to obtain input for parsing for warnings
+
+ Usually log files are output of the commands. To avoid this additional step
+ this function runs a command instead and parses the stderr and stdout of the
+ command for warnings.
+
+ Args:
+ warnings (WarningsPlugin): Object for warnings where errors should be logged
+ cmd: Command list, which should be executed to obtain input for parsing
+ ignore: Flag to ignore return value of the command
+
+ Return:
+ retval: Return value of executed command
+
+ Raises:
+ OSError: When program is not installed.
+ '''
+
try:
print("Executing: ", end='')
print(cmd)
@@ -356,19 +393,16 @@ def warnings_command(warnings, cmd):
# Check stdout
if out:
try:
- print(out.decode(encoding="utf-8"))
warnings.check(out.decode(encoding="utf-8"))
except AttributeError as e:
warnings.check(out)
- print(out)
# Check stderr
if err:
try:
warnings.check(err.decode(encoding="utf-8"))
- print(err.decode(encoding="utf-8"), file=sys.stderr)
except AttributeError as e:
warnings.check(err)
- print(err, file=sys.stderr)
+ return proc.returncode
except OSError as e:
if e.errno == os.errno.ENOENT:
print("It seems like program " + str(cmd) + " is not installed.")
|
melexis/warnings-plugin
|
1a913d2e8b2d770ed4ec98eeb4238893d69ef66d
|
diff --git a/tests/test_integration.py b/tests/test_integration.py
index 4500dfb..9bc771a 100644
--- a/tests/test_integration.py
+++ b/tests/test_integration.py
@@ -50,6 +50,14 @@ class TestIntegration(TestCase):
with self.assertRaises(OSError):
warnings_wrapper(['--sphinx', '--command', 'blahahahaha', 'tests/sphinx_single_warning.txt'])
+ def test_command_revtal_err(self):
+ retval = warnings_wrapper(['--sphinx', '--command', 'false'])
+ self.assertEqual(1, retval)
+
+ def test_command_revtal_err_supress(self):
+ retval = warnings_wrapper(['--sphinx', '--ignore-retval', '--command', 'false'])
+ self.assertEqual(0, retval)
+
def test_wildcarded_arguments(self):
# note: no shell expansion simulation (e.g. as in windows)
retval = warnings_wrapper(['--junit', 'tests/junit*.xml'])
|
Report command return value by default
If command exists with non 0 value, we just check if there are no warnings and we might mark a field as passed. Confirm the return value of the command and use that as a default return value. Provide flag to disregard that.
|
0.0
|
1a913d2e8b2d770ed4ec98eeb4238893d69ef66d
|
[
"tests/test_integration.py::TestIntegration::test_command_revtal_err",
"tests/test_integration.py::TestIntegration::test_command_revtal_err_supress"
] |
[
"tests/test_integration.py::TestIntegration::test_command_to_stderr",
"tests/test_integration.py::TestIntegration::test_command_with_its_own_arguments",
"tests/test_integration.py::TestIntegration::test_help",
"tests/test_integration.py::TestIntegration::test_max",
"tests/test_integration.py::TestIntegration::test_max_but_still_ok",
"tests/test_integration.py::TestIntegration::test_min",
"tests/test_integration.py::TestIntegration::test_min_but_still_ok",
"tests/test_integration.py::TestIntegration::test_no_parser_selection",
"tests/test_integration.py::TestIntegration::test_single_argument",
"tests/test_integration.py::TestIntegration::test_single_command_argument",
"tests/test_integration.py::TestIntegration::test_two_arguments",
"tests/test_integration.py::TestIntegration::test_two_command_arguments",
"tests/test_integration.py::TestIntegration::test_version",
"tests/test_integration.py::TestIntegration::test_wildcarded_arguments"
] |
{
"failed_lite_validators": [
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-03-05 16:11:19+00:00
|
apache-2.0
| 3,861 |
|
melexis__warnings-plugin-57
|
diff --git a/src/mlx/warnings.py b/src/mlx/warnings.py
index 894f799..165c447 100644
--- a/src/mlx/warnings.py
+++ b/src/mlx/warnings.py
@@ -359,7 +359,9 @@ def warnings_wrapper(args):
if (not args.ignore) and (retval != 0):
return retval
else:
- warnings_logfile(warnings, args.logfile)
+ retval = warnings_logfile(warnings, args.logfile)
+ if retval != 0:
+ return retval
warnings.return_count()
return warnings.return_check_limits()
@@ -383,7 +385,6 @@ def warnings_command(warnings, cmd):
Raises:
OSError: When program is not installed.
'''
-
try:
print("Executing: ", end='')
print(cmd)
@@ -410,6 +411,16 @@ def warnings_command(warnings, cmd):
def warnings_logfile(warnings, log):
+ ''' Parse logfile for warnings
+
+ Args:
+ warnings (WarningsPlugin): Object for warnings where errors should be logged
+ log: Logfile for parsing
+
+ Return:
+ 0: Log files existed and are parsed successfully
+ 1: Log files did not exist
+ '''
# args.logfile doesn't necessarily contain wildcards, but just to be safe, we
# assume it does, and try to expand them.
# This mechanism is put in place to allow wildcards to be passed on even when
@@ -417,9 +428,15 @@ def warnings_logfile(warnings, log):
# so that the script can be used in the exact same way even when moving from one
# OS to another.
for file_wildcard in log:
- for logfile in glob.glob(file_wildcard):
- with open(logfile, 'r') as loghandle:
- warnings.check(loghandle.read())
+ if glob.glob(file_wildcard):
+ for logfile in glob.glob(file_wildcard):
+ with open(logfile, 'r') as loghandle:
+ warnings.check(loghandle.read())
+ else:
+ print("FILE: %s does not exist" % file_wildcard)
+ return 1
+
+ return 0
def main():
|
melexis/warnings-plugin
|
f0767be9f616ff41982ee3858ffe99554882cee7
|
diff --git a/tests/test_integration.py b/tests/test_integration.py
index 9bc771a..e9556a3 100644
--- a/tests/test_integration.py
+++ b/tests/test_integration.py
@@ -30,6 +30,10 @@ class TestIntegration(TestCase):
retval = warnings_wrapper(['--junit', 'tests/junit_single_fail.xml', 'tests/junit_double_fail.xml'])
self.assertEqual(1 + 2, retval)
+ def test_non_existing_logfile(self):
+ retval = warnings_wrapper(['--sphinx', 'not-exist.log'])
+ self.assertEqual(1, retval)
+
def test_single_command_argument(self):
retval = warnings_wrapper(['--junit', '--command', 'cat', 'tests/junit_single_fail.xml'])
self.assertEqual(1, retval)
|
Check if we could open a file
Now that we have a command option there is possibility that we missed the `--command` flag. In that moment plugin should tell us that files X, X, X do not exist and return non-0 value.
Example that passes as it would be all ok (I assume also simpler stuff with empty and non existent log file passes as well):
```
mlx-warnings --sphinx --maxwarnings 0 --minwarnings 0 make -C example html
```
FYI: @SteinHeselmans @bavovanachte
|
0.0
|
f0767be9f616ff41982ee3858ffe99554882cee7
|
[
"tests/test_integration.py::TestIntegration::test_non_existing_logfile"
] |
[
"tests/test_integration.py::TestIntegration::test_command_revtal_err",
"tests/test_integration.py::TestIntegration::test_command_revtal_err_supress",
"tests/test_integration.py::TestIntegration::test_command_to_stderr",
"tests/test_integration.py::TestIntegration::test_command_with_its_own_arguments",
"tests/test_integration.py::TestIntegration::test_help",
"tests/test_integration.py::TestIntegration::test_max",
"tests/test_integration.py::TestIntegration::test_max_but_still_ok",
"tests/test_integration.py::TestIntegration::test_min",
"tests/test_integration.py::TestIntegration::test_min_but_still_ok",
"tests/test_integration.py::TestIntegration::test_no_parser_selection",
"tests/test_integration.py::TestIntegration::test_single_argument",
"tests/test_integration.py::TestIntegration::test_single_command_argument",
"tests/test_integration.py::TestIntegration::test_two_arguments",
"tests/test_integration.py::TestIntegration::test_two_command_arguments",
"tests/test_integration.py::TestIntegration::test_version",
"tests/test_integration.py::TestIntegration::test_wildcarded_arguments"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-03-06 09:23:42+00:00
|
apache-2.0
| 3,862 |
|
melexis__warnings-plugin-77
|
diff --git a/.travis.yml b/.travis.yml
index 6b6f935..98f4853 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -96,6 +96,8 @@ install:
- easy_install --version
- pip --version
- tox --version
+ - sudo apt-get install graphviz
+ - sudo apt-get install plantuml
script:
- tox -v
diff --git a/docs/conf.py b/docs/conf.py
index 9b44879..256847d 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,7 +1,10 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
-import sys, os, subprocess
+import errno
+import os
+import subprocess
+import sys
# Append src directory to path so that autodoc can find the python module
sys.path.append("src")
@@ -20,14 +23,14 @@ extensions = [
'sphinxcontrib.plantuml',
]
if os.getenv('SPELLCHECK'):
- extensions += 'sphinxcontrib.spelling',
+ extensions.append('sphinxcontrib.spelling')
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = 'warning-plugin'
-year = '2017-2018'
+year = '2017-2019'
author = 'Bavo Van Achte'
copyright = '{0}, {1}'.format(year, author)
@@ -54,7 +57,7 @@ html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
- '**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
+ '**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
@@ -66,7 +69,7 @@ napoleon_use_param = False
# confirm we have plantuml in the path
if 'nt' in os.name:
plantuml_path = subprocess.check_output(["where", "/F", "plantuml.jar"])
- if not plantuml_path :
+ if not plantuml_path:
print("Can't find 'plantuml.jar' file.")
print("You need to add path to 'plantuml.jar' file to your PATH variable.")
sys.exit(os.strerror(errno.EPERM))
@@ -77,9 +80,7 @@ if 'nt' in os.name:
plantuml = 'java -jar' + ' ' + plantuml
else:
plantuml_path = subprocess.check_output(["whereis", "-u", "plantuml"])
- if not plantuml_path :
+ if not plantuml_path:
print("Can't find 'plantuml.jar' file.")
print("You need to add path to 'plantuml.jar' file to your PATH variable.")
sys.exit(os.strerror(errno.EPERM))
-
-
diff --git a/src/mlx/warnings.py b/src/mlx/warnings.py
index 0455636..7cf710a 100644
--- a/src/mlx/warnings.py
+++ b/src/mlx/warnings.py
@@ -4,12 +4,14 @@
from __future__ import print_function
import argparse
+import errno
+import glob
import json
-import os
import subprocess
import sys
-import glob
-from mlx.warnings_checker import SphinxChecker, DoxyChecker, JUnitChecker, XMLRunnerChecker, CoverityChecker
+
+from mlx.warnings_checker import CoverityChecker, DoxyChecker, JUnitChecker, SphinxChecker, XMLRunnerChecker
+
from .__warnings_version__ import version as warnings_version
__version__ = warnings_version
@@ -17,22 +19,22 @@ __version__ = warnings_version
class WarningsPlugin:
- def __init__(self, verbose = False, configfile= None):
+ def __init__(self, verbose=False, config_file=None):
'''
Function for initializing the parsers
Args:
- verbose (bool, optional): enable verbose logging
- configfile (filename, optional): configuration file with setup
+ verbose (bool): optional - enable verbose logging
+ config_file (str): optional - configuration file with setup
'''
- self.checkerList = {}
+ self.checker_list = {}
self.verbose = verbose
- self.publicCheckers = [SphinxChecker(self.verbose), DoxyChecker(self.verbose), JUnitChecker(self.verbose),
- XMLRunnerChecker(self.verbose), CoverityChecker(self.verbose)]
+ self.public_checkers = [SphinxChecker(self.verbose), DoxyChecker(self.verbose), JUnitChecker(self.verbose),
+ XMLRunnerChecker(self.verbose), CoverityChecker(self.verbose)]
- if configfile is not None:
- with open(configfile, 'r') as f:
- config = json.load(f)
+ if config_file:
+ with open(config_file, 'r') as open_file:
+ config = json.load(open_file)
self.config_parser_json(config)
self.warn_min = 0
@@ -45,10 +47,10 @@ class WarningsPlugin:
Activate additional checkers after initialization
Args:
- checker (WarningsChecker): checker object
+ checker (WarningsChecker): checker object
'''
checker.reset()
- self.checkerList[checker.name] = checker
+ self.checker_list[checker.name] = checker
def activate_checker_name(self, name):
'''
@@ -57,7 +59,7 @@ class WarningsPlugin:
Args:
name (str): checker name
'''
- for checker in self.publicCheckers:
+ for checker in self.public_checkers:
if checker.name == name:
self.activate_checker(checker)
break
@@ -72,7 +74,7 @@ class WarningsPlugin:
Return:
checker object (WarningsChecker)
'''
- return self.checkerList[name]
+ return self.checker_list[name]
def check(self, content):
'''
@@ -84,10 +86,10 @@ class WarningsPlugin:
if self.printout:
print(content)
- if len(self.checkerList) == 0:
+ if not self.checker_list:
print("No checkers activated. Please use activate_checker function")
else:
- for name, checker in self.checkerList.items():
+ for checker in self.checker_list.values():
checker.check(content)
def set_maximum(self, maximum):
@@ -96,7 +98,7 @@ class WarningsPlugin:
Args:
maximum (int): maximum amount of warnings allowed
'''
- for name, checker in self.checkerList.items():
+ for checker in self.checker_list.values():
checker.set_maximum(maximum)
def set_minimum(self, minimum):
@@ -105,10 +107,10 @@ class WarningsPlugin:
Args:
minimum (int): minimum amount of warnings allowed
'''
- for name, checker in self.checkerList.items():
+ for checker in self.checker_list.values():
checker.set_minimum(minimum)
- def return_count(self, name = None):
+ def return_count(self, name=None):
''' Getter function for the amount of found warnings
If the name parameter is set, this function will return the amount of
@@ -123,32 +125,32 @@ class WarningsPlugin:
'''
self.count = 0
if name is None:
- for name, checker in self.checkerList.items():
+ for checker in self.checker_list.values():
self.count += checker.return_count()
else:
- self.count = self.checkerList[name].return_count()
+ self.count = self.checker_list[name].return_count()
return self.count
- def return_check_limits(self, name = None):
+ def return_check_limits(self, name=None):
''' Function for determining the return value of the script
If the name parameter is set, this function will check (and return) the
return value of that checker. If not, this function checks whether the
- warnings for each registred checker are within the configured limits.
+ warnings for each registered checker are within the configured limits.
Args:
name (WarningsChecker): The checker for which to check the return value
Return:
- int: 0 if the amount warnings are within limits otherwise 1
+ int: 0 if the amount of warnings is within limits, 1 otherwise
'''
if name is None:
- for name, checker in self.checkerList.items():
+ for checker in self.checker_list.values():
retval = checker.return_check_limits()
- if retval != 0:
+ if retval:
return retval
else:
- return self.checkerList[name].return_check_limits()
+ return self.checker_list[name].return_check_limits()
return 0
@@ -158,26 +160,26 @@ class WarningsPlugin:
Useful for command input where we want to print content as well
Args:
- printout: True enables the printout, False provides more silent mode
+ printout (bool): True enables the printout, False provides more silent mode
'''
self.printout = printout
def config_parser_json(self, config):
- ''' Parsing configuration dict extracted by previously opened json file
+ ''' Parsing configuration dict extracted by previously opened JSON file
Args:
- config (dict): json dump of the configuration
+ config (dict): JSON dump of the configuration
'''
# activate checker
- for checker in self.publicCheckers:
+ for checker in self.public_checkers:
try:
if bool(config[checker.name]['enabled']):
self.activate_checker(checker)
self.get_checker(checker.name).set_maximum(int(config[checker.name]['max']))
self.get_checker(checker.name).set_minimum(int(config[checker.name]['min']))
print("Config parsing for {name} completed".format(name=checker.name))
- except KeyError as e:
- print("Incomplete config. Missing: {key}".format(key=e))
+ except KeyError as err:
+ print("Incomplete config. Missing: {key}".format(key=err))
def warnings_wrapper(args):
@@ -193,7 +195,8 @@ def warnings_wrapper(args):
group1.add_argument('--minwarnings', type=int, required=False, default=0,
help='Minimum amount of warnings accepted')
group2 = parser.add_argument_group('Configuration file with options')
- group2.add_argument('--config', dest='configfile', action='store', required=False, help='Config file in JSON format provides toggle of checkers and their limits')
+ group2.add_argument('--config', dest='configfile', action='store', required=False,
+ help='Config file in JSON format provides toggle of checkers and their limits')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true')
parser.add_argument('--command', dest='command', action='store_true',
help='Treat program arguments as command to execute to obtain data')
@@ -201,7 +204,8 @@ def warnings_wrapper(args):
help='Ignore return value of the executed command')
parser.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=__version__))
parser.add_argument('logfile', nargs='+', help='Logfile (or command) that might contain warnings')
- parser.add_argument('flags', nargs=argparse.REMAINDER, help='Possible not-used flags from above are considered as command flags')
+ parser.add_argument('flags', nargs=argparse.REMAINDER,
+ help='Possible not-used flags from above are considered as command flags')
args = parser.parse_args(args)
@@ -211,7 +215,7 @@ def warnings_wrapper(args):
if checkersflag or (args.maxwarnings != 0) or (args.minwarnings != 0):
print("Configfile cannot be provided with other arguments")
sys.exit(2)
- warnings = WarningsPlugin(verbose=args.verbose, configfile=args.configfile)
+ warnings = WarningsPlugin(verbose=args.verbose, config_file=args.configfile)
else:
warnings = WarningsPlugin(verbose=args.verbose)
if args.sphinx:
@@ -254,11 +258,10 @@ def warnings_command(warnings, cmd):
Args:
warnings (WarningsPlugin): Object for warnings where errors should be logged
- cmd: Command list, which should be executed to obtain input for parsing
- ignore: Flag to ignore return value of the command
+ cmd (list): List of commands (str), which should be executed to obtain input for parsing
Return:
- retval: Return value of executed command
+ int: Return value of executed command(s)
Raises:
OSError: When program is not installed.
@@ -282,8 +285,8 @@ def warnings_command(warnings, cmd):
except AttributeError:
warnings.check(err)
return proc.returncode
- except OSError as e:
- if e.errno == os.errno.ENOENT:
+ except OSError as err:
+ if err.errno == errno.ENOENT:
print("It seems like program " + str(cmd) + " is not installed.")
raise
@@ -323,4 +326,3 @@ def main():
if __name__ == '__main__':
main()
-
diff --git a/src/mlx/warnings_checker.py b/src/mlx/warnings_checker.py
index 2728533..e7af4ab 100644
--- a/src/mlx/warnings_checker.py
+++ b/src/mlx/warnings_checker.py
@@ -3,9 +3,9 @@
import abc
import re
-from junitparser import JUnitXml, Failure, Error
from xml.etree.ElementTree import ParseError
+from junitparser import Error, Failure, JUnitXml
DOXYGEN_WARNING_REGEX = r"(?:((?:[/.]|[A-Za-z]).+?):(-?\d+):\s*([Ww]arning|[Ee]rror)|<.+>:-?\d+(?::\s*([Ww]arning|[Ee]rror))?): (.+(?:(?!\s*(?:[Nn]otice|[Ww]arning|[Ee]rror): )[^/<\n][^:\n][^/\n].+)*)|\s*([Nn]otice|[Ww]arning|[Ee]rror): (.+)\n?"
doxy_pattern = re.compile(DOXYGEN_WARNING_REGEX)
@@ -59,7 +59,8 @@ class WarningsChecker(object):
ValueError: Invalid argument (min limit higher than max limit)
'''
if self.warn_min > maximum:
- raise ValueError("Invalid argument: mininum limit ({min}) is higher than maximum limit ({max}). Cannot enter {value}". format(min=self.warn_min, max=self.warn_max, value=maximum))
+ raise ValueError("Invalid argument: mininum limit ({0.warn_min}) is higher than maximum limit "
+ "({0.warn_max}). Cannot enter {value}". format(self, value=maximum))
else:
self.warn_max = maximum
@@ -81,7 +82,8 @@ class WarningsChecker(object):
ValueError: Invalid argument (min limit higher than max limit)
'''
if minimum > self.warn_max:
- raise ValueError("Invalid argument: mininum limit ({min}) is higher than maximum limit ({max}). Cannot enter {value}". format(min=self.warn_min, max=self.warn_max, value=minimum))
+ raise ValueError("Invalid argument: mininum limit ({0.warn_min}) is higher than maximum limit "
+ "({0.warn_max}). Cannot enter {value}".format(self, value=minimum))
else:
self.warn_min = minimum
@@ -99,7 +101,7 @@ class WarningsChecker(object):
Returns:
int: Number of warnings found
'''
- print("{count} {name} warnings found".format(count=self.count, name=self.name))
+ print("{0.count} {0.name} warnings found".format(self))
return self.count
def return_check_limits(self):
@@ -109,14 +111,15 @@ class WarningsChecker(object):
int: 0 if the amount of warnings is within limits. the count of warnings otherwise
'''
if self.count > self.warn_max:
- print("Number of warnings ({count}) is higher than the maximum limit ({max}). Returning error code 1.".format(count=self.count, max=self.warn_max))
+ print("Number of warnings ({0.count}) is higher than the maximum limit ({0.warn_max}). "
+ "Returning error code 1.".format(self))
return self.count
- elif self.count < self.warn_min:
- print("Number of warnings ({count}) is lower than the minimum limit ({min}). Returning error code 1.".format(count=self.count, min=self.warn_min))
+ if self.count < self.warn_min:
+ print("Number of warnings ({0.count}) is lower than the minimum limit ({0.warn_min}). "
+ "Returning error code 1.".format(self))
return self.count
- else:
- print("Number of warnings ({count}) is between limits {min} and {max}. Well done.".format(count=self.count, min=self.warn_min, max=self.warn_max))
- return 0
+ print("Number of warnings ({0.count}) is between limits {0.warn_min} and {0.warn_max}. Well done.".format(self))
+ return 0
class RegexChecker(WarningsChecker):
@@ -212,4 +215,3 @@ class CoverityChecker(RegexChecker):
self.count += 1
if self.verbose:
print(match.group(0).strip())
-
|
melexis/warnings-plugin
|
68ada70dceadf9ac48c8b108ad0998f243d74f27
|
diff --git a/tests/test_config.py b/tests/test_config.py
index 1091f48..c441659 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -5,7 +5,7 @@ from mlx.warnings import WarningsPlugin, SphinxChecker, DoxyChecker, JUnitChecke
class TestConfig(TestCase):
def test_configfile_parsing(self):
- warnings = WarningsPlugin(configfile="tests/config_example.json")
+ warnings = WarningsPlugin(config_file="tests/config_example.json")
warnings.check('testfile.c:6: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 0)
warnings.check('<testcase classname="dummy_class" name="dummy_name"><failure message="some random message from test case" /></testcase>')
|
Plantuml not found
I guess we missed the warning inside the CI (there we go for not dogfooding) for Sphinx
```
WARNING: plantuml command 'plantuml' cannot be run
```
We install sphinxcontrib-plantuml, but dont install plantuml itself I would assume. Needs investigation.
|
0.0
|
68ada70dceadf9ac48c8b108ad0998f243d74f27
|
[
"tests/test_config.py::TestConfig::test_configfile_parsing"
] |
[
"tests/test_config.py::TestConfig::test_all_config_max",
"tests/test_config.py::TestConfig::test_all_config_min",
"tests/test_config.py::TestConfig::test_doxy_junit_options_config_parsing",
"tests/test_config.py::TestConfig::test_doxygen_config_max",
"tests/test_config.py::TestConfig::test_doxygen_config_min",
"tests/test_config.py::TestConfig::test_junit_config_max",
"tests/test_config.py::TestConfig::test_junit_config_min",
"tests/test_config.py::TestConfig::test_partial_doxygen_config_parsing",
"tests/test_config.py::TestConfig::test_partial_junit_config_parsing",
"tests/test_config.py::TestConfig::test_partial_sphinx_config_parsing",
"tests/test_config.py::TestConfig::test_partial_xmlrunner_config_parsing",
"tests/test_config.py::TestConfig::test_sphinx_config_max",
"tests/test_config.py::TestConfig::test_sphinx_config_min",
"tests/test_config.py::TestConfig::test_sphinx_doxy_config_parsing",
"tests/test_config.py::TestConfig::test_xmlrunner_config_max",
"tests/test_config.py::TestConfig::test_xmlrunner_config_min"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-08-30 13:51:40+00:00
|
apache-2.0
| 3,863 |
|
melexis__warnings-plugin-88
|
diff --git a/README.rst b/README.rst
index 00a7092..fa9f802 100644
--- a/README.rst
+++ b/README.rst
@@ -6,11 +6,11 @@
:target: https://travis-ci.org/melexis/warnings-plugin
:alt: Build status
-.. image:: https://badge.fury.io/py/mlx.warnings.png
+.. image:: https://badge.fury.io/py/mlx.warnings.svg
:target: https://badge.fury.io/py/mlx.warnings
:alt: Pypi packaged release
-.. image:: https://img.shields.io/badge/Documentation-published-brightgreen.png
+.. image:: https://img.shields.io/badge/Documentation-published-brightgreen.svg
:target: https://melexis.github.io/warnings-plugin/
:alt: Documentation
@@ -307,9 +307,9 @@ Other options
-------------
Since the plugin is under active development there are new Features added fast.
-Important options currently include setting maximum number of warnings or
-minimum number of warnings, that are still acceptable to return 0 (success)
-return code. Look at scripts help, for more details about the options.
+Important options currently include setting a minimum and a maximum number of warnings
+that are still acceptable to return 0 (success). Requiring an exact amount of warnings
+using a single option is also possible. Look at scripts help for more details about the options.
Exclude matches with regexes
----------------------------
diff --git a/src/mlx/warnings.py b/src/mlx/warnings.py
index ef8c741..cbe08d0 100644
--- a/src/mlx/warnings.py
+++ b/src/mlx/warnings.py
@@ -190,10 +190,12 @@ def warnings_wrapper(args):
group1.add_argument('-s', '--sphinx', dest='sphinx', action='store_true')
group1.add_argument('-j', '--junit', dest='junit', action='store_true')
group1.add_argument('-x', '--xmlrunner', dest='xmlrunner', action='store_true')
- group1.add_argument('-m', '--maxwarnings', type=int, required=False, default=0,
+ group1.add_argument('-m', '--maxwarnings', '--max-warnings', type=int, default=0,
help='Maximum amount of warnings accepted')
- group1.add_argument('--minwarnings', type=int, required=False, default=0,
+ group1.add_argument('--minwarnings', '--min-warnings', type=int, default=0,
help='Minimum amount of warnings accepted')
+ group1.add_argument('--exact-warnings', type=int, default=0,
+ help='Exact amount of warnings expected')
group2 = parser.add_argument_group('Configuration file with options')
group2.add_argument('--config', dest='configfile', action='store', required=False,
help='Config file in JSON format provides toggle of checkers and their limits')
@@ -213,8 +215,9 @@ def warnings_wrapper(args):
# Read config file
if args.configfile is not None:
- checkersflag = args.sphinx or args.doxygen or args.junit or args.coverity or args.xmlrunner
- if checkersflag or (args.maxwarnings != 0) or (args.minwarnings != 0):
+ checker_flags = args.sphinx or args.doxygen or args.junit or args.coverity or args.xmlrunner
+ warning_args = (args.maxwarnings != 0) or (args.minwarnings != 0) or (args.exact_warnings != 0)
+ if checker_flags or warning_args:
print("Configfile cannot be provided with other arguments")
sys.exit(2)
warnings = WarningsPlugin(verbose=args.verbose, config_file=args.configfile)
@@ -230,8 +233,15 @@ def warnings_wrapper(args):
warnings.activate_checker_name('xmlrunner')
if args.coverity:
warnings.activate_checker_name('coverity')
- warnings.set_maximum(args.maxwarnings)
- warnings.set_minimum(args.minwarnings)
+ if args.exact_warnings:
+ if args.maxwarnings | args.minwarnings:
+ print("expected-warnings cannot be provided with maxwarnings or minwarnings")
+ sys.exit(2)
+ warnings.set_maximum(args.exact_warnings)
+ warnings.set_minimum(args.exact_warnings)
+ else:
+ warnings.set_maximum(args.maxwarnings)
+ warnings.set_minimum(args.minwarnings)
if args.include_sphinx_deprecation and 'sphinx' in warnings.activated_checkers.keys():
warnings.get_checker('sphinx').include_sphinx_deprecation()
diff --git a/src/mlx/warnings_checker.py b/src/mlx/warnings_checker.py
index d4a76c7..f044e3b 100644
--- a/src/mlx/warnings_checker.py
+++ b/src/mlx/warnings_checker.py
@@ -123,17 +123,22 @@ class WarningsChecker:
''' Function for checking whether the warning count is within the configured limits
Returns:
- int: 0 if the amount of warnings is within limits. the count of warnings otherwise
+ int: 0 if the amount of warnings is within limits, the count of warnings otherwise
'''
- if self.count > self.warn_max:
+ if self.warn_min == self.warn_max and self.count == self.warn_max:
+ print("Number of warnings ({0.count}) is exactly as expected. Well done."
+ .format(self))
+ elif self.count > self.warn_max:
print("Number of warnings ({0.count}) is higher than the maximum limit ({0.warn_max}). "
"Returning error code 1.".format(self))
return self.count
- if self.count < self.warn_min:
+ elif self.count < self.warn_min:
print("Number of warnings ({0.count}) is lower than the minimum limit ({0.warn_min}). "
"Returning error code 1.".format(self))
return self.count
- print("Number of warnings ({0.count}) is between limits {0.warn_min} and {0.warn_max}. Well done.".format(self))
+ else:
+ print("Number of warnings ({0.count}) is between limits {0.warn_min} and {0.warn_max}. Well done."
+ .format(self))
return 0
def print_when_verbose(self, message):
|
melexis/warnings-plugin
|
c2b1b00a48c3ca4a74238fea4c4a3e4cbf0f5f5e
|
diff --git a/tests/test_integration.py b/tests/test_integration.py
index 243acfe..f975e68 100644
--- a/tests/test_integration.py
+++ b/tests/test_integration.py
@@ -86,9 +86,33 @@ class TestIntegration(TestCase):
self.assertEqual(self.junit_warning_cnt, retval)
def test_min_but_still_ok(self):
- retval = warnings_wrapper(['--junit', '--maxwarnings', '100', '--minwarnings', '2', 'tests/junit*.xml'])
+ retval = warnings_wrapper(['--junit', '--max-warnings', '100', '--min-warnings', '2', 'tests/junit*.xml'])
self.assertEqual(0, retval)
+ def test_exact_sphinx(self):
+ retval = warnings_wrapper(['--sphinx', '--exact-warnings', '2', 'tests/sphinx_double_warning.txt'])
+ self.assertEqual(0, retval)
+
+ def test_exact_too_few(self):
+ retval = warnings_wrapper(['--sphinx', '--exact-warnings', '3', 'tests/sphinx_double_warning.txt'])
+ self.assertEqual(2, retval)
+
+ def test_exact_too_many(self):
+ retval = warnings_wrapper(['--sphinx', '--exact-warnings', '1', 'tests/sphinx_double_warning.txt'])
+ self.assertEqual(2, retval)
+
+ def test_exact_junit(self):
+ retval = warnings_wrapper(['--junit', '--exact-warnings', '3', 'tests/junit*.xml'])
+ self.assertEqual(0, retval)
+
+ def test_exact_with_min(self):
+ with self.assertRaises(SystemExit):
+ warnings_wrapper(['--junit', '--exact-warnings', '3', '--min-warnings', '3', 'tests/junit*.xml'])
+
+ def test_exact_with_max(self):
+ with self.assertRaises(SystemExit):
+ warnings_wrapper(['--junit', '--exact-warnings', '3', '--max-warnings', '3', 'tests/junit*.xml'])
+
def test_configfile_ok(self):
retval = warnings_wrapper(['--config', 'tests/config_example.json', 'tests/junit_single_fail.xml'])
self.assertEqual(0, retval)
|
Add exact number of warnings parameter
Now option is to have minimum and maximum number of warnings. If you use both parameters you can make the range, so that you get failure if number of warnings is different from what is expected. We could group this and use a `--expectedwarnings value` argument which would do this internally for us.
|
0.0
|
c2b1b00a48c3ca4a74238fea4c4a3e4cbf0f5f5e
|
[
"tests/test_integration.py::TestIntegration::test_exact_junit",
"tests/test_integration.py::TestIntegration::test_exact_sphinx",
"tests/test_integration.py::TestIntegration::test_exact_too_few",
"tests/test_integration.py::TestIntegration::test_exact_too_many",
"tests/test_integration.py::TestIntegration::test_min_but_still_ok"
] |
[
"tests/test_integration.py::TestIntegration::test_command_revtal_err",
"tests/test_integration.py::TestIntegration::test_command_revtal_err_supress",
"tests/test_integration.py::TestIntegration::test_command_to_stderr",
"tests/test_integration.py::TestIntegration::test_command_with_its_own_arguments",
"tests/test_integration.py::TestIntegration::test_configfile_exclude_commandline",
"tests/test_integration.py::TestIntegration::test_configfile_ok",
"tests/test_integration.py::TestIntegration::test_exact_with_max",
"tests/test_integration.py::TestIntegration::test_exact_with_min",
"tests/test_integration.py::TestIntegration::test_exclude_sphinx_deprecation",
"tests/test_integration.py::TestIntegration::test_faulty_command",
"tests/test_integration.py::TestIntegration::test_help",
"tests/test_integration.py::TestIntegration::test_ignore_sphinx_deprecation_flag",
"tests/test_integration.py::TestIntegration::test_max",
"tests/test_integration.py::TestIntegration::test_max_but_still_ok",
"tests/test_integration.py::TestIntegration::test_min",
"tests/test_integration.py::TestIntegration::test_no_parser_selection",
"tests/test_integration.py::TestIntegration::test_non_existing_logfile",
"tests/test_integration.py::TestIntegration::test_single_argument",
"tests/test_integration.py::TestIntegration::test_single_command_argument",
"tests/test_integration.py::TestIntegration::test_single_defect_coverity",
"tests/test_integration.py::TestIntegration::test_sphinx_deprecation",
"tests/test_integration.py::TestIntegration::test_two_arguments",
"tests/test_integration.py::TestIntegration::test_two_command_arguments",
"tests/test_integration.py::TestIntegration::test_version",
"tests/test_integration.py::TestIntegration::test_wildcarded_arguments"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-12-20 14:27:05+00:00
|
apache-2.0
| 3,864 |
|
melexis__warnings-plugin-9
|
diff --git a/setup.py b/setup.py
index 5672ae8..8f049c5 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@ from os.path import basename, dirname, join, splitext
from setuptools import find_packages, setup
PROJECT_URL = 'https://github.com/melexis/warnings-plugin'
-VERSION = '0.0.4'
+VERSION = '0.0.5'
def read(*names, **kwargs):
diff --git a/src/mlx/warnings.py b/src/mlx/warnings.py
index 4c45974..3112a5f 100644
--- a/src/mlx/warnings.py
+++ b/src/mlx/warnings.py
@@ -6,7 +6,7 @@ import math
DOXYGEN_WARNING_REGEX = r"(?:(?:((?:[/.]|[A-Za-z]:).+?):(-?\d+):\s*([Ww]arning|[Ee]rror)|<.+>:-?\d+(?::\s*([Ww]arning|[Ee]rror))?): (.+(?:\n(?!\s*(?:[Nn]otice|[Ww]arning|[Ee]rror): )[^/<\n][^:\n][^/\n].+)*)|\s*([Nn]otice|[Ww]arning|[Ee]rror): (.+))$"
doxy_pattern = re.compile(DOXYGEN_WARNING_REGEX)
-SPHINX_WARNING_REGEX = r"^(.+?:\d+): (DEBUG|INFO|WARNING|ERROR|SEVERE): (.+)\n?$"
+SPHINX_WARNING_REGEX = r"^(.+?:(?:\d+|None)): (DEBUG|INFO|WARNING|ERROR|SEVERE): (.+)\n?$"
sphinx_pattern = re.compile(SPHINX_WARNING_REGEX)
|
melexis/warnings-plugin
|
63ce3d59b74e0cbd3d4b6795d57d0ce22d8ae112
|
diff --git a/tests/test_sphinx.py b/tests/test_sphinx.py
index a9977fe..354a255 100644
--- a/tests/test_sphinx.py
+++ b/tests/test_sphinx.py
@@ -15,6 +15,11 @@ class TestSphinxWarnings(TestCase):
self.warnings.check_sphinx_warnings("/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'")
self.assertEqual(self.warnings.return_sphinx_warnings(), 1)
+ def test_single_warning_no_line_number(self):
+ self.warnings.check_sphinx_warnings("/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'")
+ self.warnings.check_sphinx_warnings("/home/bljah/test/index.rst:None: WARNING: toctree contains reference to nonexisting document u'installation'")
+ self.assertEqual(self.warnings.return_sphinx_warnings(), 2)
+
def test_single_warning_mixed(self):
self.warnings.check_sphinx_warnings('This1 should not be treated as warning')
self.warnings.check_sphinx_warnings("/home/bljah/test/index.rst:5: WARNING: toctree contains reference to nonexisting document u'installation'")
|
Some RST warnings are not recognized
Example of warning not recognized:
```
/path/to/file.rst:None: WARNING: Traceability: cannot link to XXX, item is not defined
```
Rumours say it is because of a missing line number, and more rumours say that this regex would work
```
^(.+?:None): (DEBUG|INFO|WARNING|ERROR|SEVERE): (.+)\n?$
```
Warning is from mlx.traceability plugin, which could print a line number iso the None (see issue https://github.com/melexis/sphinx-traceability-extension/issues/2).
|
0.0
|
63ce3d59b74e0cbd3d4b6795d57d0ce22d8ae112
|
[
"tests/test_sphinx.py::TestSphinxWarnings::test_single_warning_no_line_number"
] |
[
"tests/test_sphinx.py::TestSphinxWarnings::test_no_warning",
"tests/test_sphinx.py::TestSphinxWarnings::test_single_warning",
"tests/test_sphinx.py::TestSphinxWarnings::test_single_warning_mixed"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-06-09 08:01:32+00:00
|
apache-2.0
| 3,865 |
|
mercadona__rele-142
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a99b266..1c24dc9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,10 @@
Changelog
=========
+`0.9.1` (2020-1-2)
+* Ack messages when data not json serializable (#141)
+* Use ThreadScheduler instead of ThreadPoolExecutor (#145)
+
`0.9.0` (2019-12-20)
* Flask support via middleware (#127)
* Add message attributes to metrics log (#128)
diff --git a/docs/api/settings.rst b/docs/api/settings.rst
index 31c8d75..73b8f64 100644
--- a/docs/api/settings.rst
+++ b/docs/api/settings.rst
@@ -123,6 +123,8 @@ Ack deadline for all subscribers in seconds.
passes, the message is no longer considered outstanding, and Cloud Pub/Sub will attempt
to redeliver the message.*
+.. _settings_publisher_timeout:
+
``PUBLISHER_TIMEOUT``
---------------------
@@ -136,7 +138,7 @@ Timeout that the publishing result will wait on the future to publish successful
<https://googleapis.dev/python/pubsub/1.1.0/publisher/api/futures.html?highlight=result#google.cloud.pubsub_v1.publisher.futures.Future.result>`_
``THREADS_PER_SUBSCRIPTION``
-------------------
+----------------------------
**Optional**
diff --git a/rele/__init__.py b/rele/__init__.py
index 9f2820e..71852af 100644
--- a/rele/__init__.py
+++ b/rele/__init__.py
@@ -1,4 +1,4 @@
-__version__ = "0.9.0"
+__version__ = "0.9.1"
default_app_config = "rele.apps.ReleConfig"
from .client import Publisher, Subscriber # noqa
diff --git a/rele/client.py b/rele/client.py
index 53a02d1..062a82e 100644
--- a/rele/client.py
+++ b/rele/client.py
@@ -67,11 +67,14 @@ class Subscriber:
topic_path = self._client.topic_path(self._gc_project_id, topic)
with suppress(exceptions.AlreadyExists):
- self._client.create_subscription(
- name=subscription_path,
- topic=topic_path,
- ack_deadline_seconds=self._ack_deadline,
- )
+ try:
+ self._client.create_subscription(
+ name=subscription_path,
+ topic=topic_path,
+ ack_deadline_seconds=self._ack_deadline,
+ )
+ except exceptions.NotFound:
+ logger.error("Cannot subscribe to a topic that does not exist.")
def consume(self, subscription_name, callback, scheduler):
"""Begin listening to topic from the SubscriberClient.
@@ -104,7 +107,7 @@ class Publisher:
:param gc_project_id: string Google Cloud Project ID.
:param credentials: string Google Cloud Credentials.
:param encoder: A valid `json.encoder.JSONEncoder subclass <https://docs.python.org/3/library/json.html#json.JSONEncoder>`_ # noqa
- :param timeout: float
+ :param timeout: float, default :ref:`settings_publisher_timeout`
"""
def __init__(self, gc_project_id, credentials, encoder, timeout):
@@ -116,7 +119,7 @@ class Publisher:
else:
self._client = pubsub_v1.PublisherClient(credentials=credentials)
- def publish(self, topic, data, blocking=False, **attrs):
+ def publish(self, topic, data, blocking=False, timeout=None, **attrs):
"""Publishes message to Google PubSub topic.
Usage::
@@ -145,6 +148,7 @@ class Publisher:
:param topic: string topic to publish the data.
:param data: dict with the content of the message.
:param blocking: boolean
+ :param timeout: float, default None fallsback to :ref:`settings_publisher_timeout`
:param attrs: Extra parameters to be published.
:return: `Future <https://googleapis.github.io/google-cloud-python/latest/pubsub/subscriber/api/futures.html>`_ # noqa
"""
@@ -157,6 +161,6 @@ class Publisher:
if not blocking:
return future
- future.result(timeout=self._timeout)
+ future.result(timeout=timeout or self._timeout)
run_middleware_hook("post_publish", topic)
return future
|
mercadona/rele
|
81d45ad5c7964274c10483056fe489d00f563624
|
diff --git a/tests/test_client.py b/tests/test_client.py
index 954ff06..2e39828 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -3,6 +3,7 @@ import concurrent
from unittest.mock import ANY, patch
import pytest
+from google.api_core import exceptions
from google.cloud.pubsub_v1 import SubscriberClient
@@ -63,6 +64,21 @@ class TestPublisher:
)
mock_future.assert_called_once_with(timeout=100)
+ @patch.object(concurrent.futures.Future, "result")
+ def test_publishes_data_with_client_timeout_when_blocking_and_timeout_specified(
+ self, mock_future, publisher
+ ):
+ publisher._timeout = 100.0
+ publisher.publish(
+ topic="order-cancelled", data={"foo": "bar"}, blocking=True, timeout=50
+ )
+
+ publisher._client.publish.return_value = mock_future
+ publisher._client.publish.assert_called_with(
+ ANY, b'{"foo": "bar"}', published_at=ANY
+ )
+ mock_future.assert_called_once_with(timeout=50)
+
class TestSubscriber:
@patch.object(SubscriberClient, "create_subscription")
@@ -92,3 +108,34 @@ class TestSubscriber:
_mocked_client.assert_called_once_with(
ack_deadline_seconds=100, name=expected_subscription, topic=expected_topic
)
+
+ @patch.object(
+ SubscriberClient,
+ "create_subscription",
+ side_effect=exceptions.AlreadyExists("Subscription already exists"),
+ )
+ def test_does_not_raise_when_subscription_already_exists(
+ self, _mocked_client, project_id, subscriber
+ ):
+ subscriber.create_subscription(
+ subscription="test-topic", topic=f"{project_id}-test-topic"
+ )
+
+ _mocked_client.assert_called()
+
+ @patch.object(
+ SubscriberClient,
+ "create_subscription",
+ side_effect=exceptions.NotFound("Subscription topic does not exist"),
+ )
+ def test_logs_error_when_subscription_topic_does_not_exist(
+ self, _mocked_client, project_id, subscriber, caplog
+ ):
+ subscriber.create_subscription(
+ subscription="test-topic", topic=f"{project_id}-test-topic"
+ )
+
+ _mocked_client.assert_called()
+ log = caplog.records[0]
+ assert log.message == "Cannot subscribe to a topic that does not exist."
+ assert log.levelname == "ERROR"
|
Worker should not crash when a topic does not exist
When a worker starts it tries to create subscriptions for all topics. If a topic does not exist the worker will crash.
Proposed change:
catch the `Rendezvous` exception (404 Resource not found), log an error but gracefully create and consume the rest of the defined subscriptions
|
0.0
|
81d45ad5c7964274c10483056fe489d00f563624
|
[
"tests/test_client.py::TestPublisher::test_publishes_data_with_client_timeout_when_blocking_and_timeout_specified",
"tests/test_client.py::TestSubscriber::test_logs_error_when_subscription_topic_does_not_exist"
] |
[
"tests/test_client.py::TestPublisher::test_returns_future_when_published_called",
"tests/test_client.py::TestPublisher::test_publish_sets_published_at",
"tests/test_client.py::TestPublisher::test_publishes_data_with_custom_encoder",
"tests/test_client.py::TestPublisher::test_publishes_data_with_client_timeout_when_blocking",
"tests/test_client.py::TestSubscriber::test_creates_subscription_with_default_ack_deadline_when_none_provided",
"tests/test_client.py::TestSubscriber::test_creates_subscription_with_custom_ack_deadline_when_provided",
"tests/test_client.py::TestSubscriber::test_does_not_raise_when_subscription_already_exists"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-12-21 17:28:30+00:00
|
apache-2.0
| 3,866 |
|
mercadona__rele-187
|
diff --git a/rele/config.py b/rele/config.py
index 67f97df..da3ae8e 100644
--- a/rele/config.py
+++ b/rele/config.py
@@ -87,7 +87,7 @@ def subscription_from_attribute(attribute):
def load_subscriptions_from_paths(sub_module_paths, sub_prefix=None, filter_by=None):
- subscriptions = []
+ subscriptions = {}
for sub_module_path in sub_module_paths:
sub_module = importlib.import_module(sub_module_path)
for attr_name in dir(sub_module):
@@ -102,5 +102,13 @@ def load_subscriptions_from_paths(sub_module_paths, sub_prefix=None, filter_by=N
if filter_by and not subscription.filter_by:
subscription.set_filters(filter_by)
- subscriptions.append(subscription)
- return subscriptions
+ if subscription.name in subscriptions:
+ found_subscription = subscriptions[subscription.name]
+ raise RuntimeError(
+ f"Duplicate subscription name found: {subscription.name}. Subs "
+ f"{subscription._func.__module__}.{subscription._func.__name__} and "
+ f"{found_subscription._func.__module__}.{found_subscription._func.__name__} collide."
+ )
+
+ subscriptions[subscription.name] = subscription
+ return list(subscriptions.values())
|
mercadona/rele
|
a0f4b98408e7f0e1631d282627d9e27bb69bcb00
|
diff --git a/tests/test_config.py b/tests/test_config.py
index 00612f0..7a26be4 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -15,6 +15,11 @@ def sub_stub(data, **kwargs):
return data["id"]
+@sub(topic="another-cool-topic", prefix="rele")
+def another_sub_stub(data, **kwargs):
+ return data["id"]
+
+
class TestLoadSubscriptions:
@pytest.fixture
def subscriptions(self):
@@ -25,8 +30,8 @@ class TestLoadSubscriptions:
)
def test_load_subscriptions_in_a_module(self, subscriptions):
- assert len(subscriptions) == 1
- func_sub = subscriptions[0]
+ assert len(subscriptions) == 2
+ func_sub = subscriptions[-1]
assert isinstance(func_sub, Subscription)
assert func_sub.name == "rele-test-topic"
assert func_sub({"id": 4}, lang="en") == 4
@@ -44,6 +49,16 @@ class TestLoadSubscriptions:
assert klass_sub.name == "test-alternative-cool-topic"
assert klass_sub({"id": 4}, lang="en") == 4
+ def test_raises_error_when_subscription_is_duplicated(self):
+ with pytest.raises(RuntimeError) as excinfo:
+ load_subscriptions_from_paths(["tests.test_config", "tests.more_subs.subs"])
+
+ assert (
+ str(excinfo.value)
+ == "Duplicate subscription name found: rele-another-cool-topic. Subs "
+ "tests.more_subs.subs.another_sub_stub and tests.test_config.another_sub_stub collide."
+ )
+
def test_returns_sub_value_when_filtered_value_applied(self, subscriptions):
assert subscriptions[-1]({"id": 4}, lang="en") == 4
|
Worker initialization should raise an error on duplicate subscriptions
If two `subs` subscribe to the same topic and don't declare a suffix, stealing will happen where approximately half the messages will be processed by one subscription and half by the other. That's extremely difficult to test against and to debug in production, so a protection would be very useful.
I'd suggest raising an error straight away so the worker doesn't start. A warning log wouldn't be as effective because it could go unnoticed.
|
0.0
|
a0f4b98408e7f0e1631d282627d9e27bb69bcb00
|
[
"tests/test_config.py::TestLoadSubscriptions::test_raises_error_when_subscription_is_duplicated"
] |
[
"tests/test_config.py::TestLoadSubscriptions::test_load_subscriptions_in_a_module",
"tests/test_config.py::TestLoadSubscriptions::test_loads_subscriptions_when_they_are_class_based",
"tests/test_config.py::TestLoadSubscriptions::test_returns_sub_value_when_filtered_value_applied",
"tests/test_config.py::TestLoadSubscriptions::test_returns_none_when_filtered_value_does_not_apply",
"tests/test_config.py::TestConfig::test_parses_all_keys",
"tests/test_config.py::TestConfig::test_inits_service_account_creds_when_credential_path_given",
"tests/test_config.py::TestConfig::test_uses_project_id_from_creds_when_no_project_id_given",
"tests/test_config.py::TestConfig::test_sets_defaults",
"tests/test_config.py::TestConfig::test_sets_defaults_pulled_from_env"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-10-01 07:01:45+00:00
|
apache-2.0
| 3,867 |
|
mercadona__rele-188
|
diff --git a/rele/config.py b/rele/config.py
index 67f97df..da3ae8e 100644
--- a/rele/config.py
+++ b/rele/config.py
@@ -87,7 +87,7 @@ def subscription_from_attribute(attribute):
def load_subscriptions_from_paths(sub_module_paths, sub_prefix=None, filter_by=None):
- subscriptions = []
+ subscriptions = {}
for sub_module_path in sub_module_paths:
sub_module = importlib.import_module(sub_module_path)
for attr_name in dir(sub_module):
@@ -102,5 +102,13 @@ def load_subscriptions_from_paths(sub_module_paths, sub_prefix=None, filter_by=N
if filter_by and not subscription.filter_by:
subscription.set_filters(filter_by)
- subscriptions.append(subscription)
- return subscriptions
+ if subscription.name in subscriptions:
+ found_subscription = subscriptions[subscription.name]
+ raise RuntimeError(
+ f"Duplicate subscription name found: {subscription.name}. Subs "
+ f"{subscription._func.__module__}.{subscription._func.__name__} and "
+ f"{found_subscription._func.__module__}.{found_subscription._func.__name__} collide."
+ )
+
+ subscriptions[subscription.name] = subscription
+ return list(subscriptions.values())
diff --git a/rele/publishing.py b/rele/publishing.py
index e2bf1d4..3368548 100644
--- a/rele/publishing.py
+++ b/rele/publishing.py
@@ -1,3 +1,5 @@
+from rele import config, discover
+
from .client import Publisher
_publisher = None
@@ -41,5 +43,10 @@ def publish(topic, data, **kwargs):
:return: None
"""
if not _publisher:
- raise ValueError("init_global_publisher must be called first.")
+ settings, _ = discover.sub_modules()
+ if not hasattr(settings, "RELE"):
+ raise ValueError("Config setup not called and settings module not found.")
+
+ config.setup(settings.RELE)
+
_publisher.publish(topic, data, **kwargs)
|
mercadona/rele
|
a0f4b98408e7f0e1631d282627d9e27bb69bcb00
|
diff --git a/tests/test_config.py b/tests/test_config.py
index 00612f0..7a26be4 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -15,6 +15,11 @@ def sub_stub(data, **kwargs):
return data["id"]
+@sub(topic="another-cool-topic", prefix="rele")
+def another_sub_stub(data, **kwargs):
+ return data["id"]
+
+
class TestLoadSubscriptions:
@pytest.fixture
def subscriptions(self):
@@ -25,8 +30,8 @@ class TestLoadSubscriptions:
)
def test_load_subscriptions_in_a_module(self, subscriptions):
- assert len(subscriptions) == 1
- func_sub = subscriptions[0]
+ assert len(subscriptions) == 2
+ func_sub = subscriptions[-1]
assert isinstance(func_sub, Subscription)
assert func_sub.name == "rele-test-topic"
assert func_sub({"id": 4}, lang="en") == 4
@@ -44,6 +49,16 @@ class TestLoadSubscriptions:
assert klass_sub.name == "test-alternative-cool-topic"
assert klass_sub({"id": 4}, lang="en") == 4
+ def test_raises_error_when_subscription_is_duplicated(self):
+ with pytest.raises(RuntimeError) as excinfo:
+ load_subscriptions_from_paths(["tests.test_config", "tests.more_subs.subs"])
+
+ assert (
+ str(excinfo.value)
+ == "Duplicate subscription name found: rele-another-cool-topic. Subs "
+ "tests.more_subs.subs.another_sub_stub and tests.test_config.another_sub_stub collide."
+ )
+
def test_returns_sub_value_when_filtered_value_applied(self, subscriptions):
assert subscriptions[-1]({"id": 4}, lang="en") == 4
diff --git a/tests/test_publishing.py b/tests/test_publishing.py
index b8bc32f..4c7d4f8 100644
--- a/tests/test_publishing.py
+++ b/tests/test_publishing.py
@@ -1,12 +1,28 @@
from unittest.mock import MagicMock, patch
import pytest
+from tests import settings
from rele import Publisher, publishing
class TestPublish:
- def test_raises_when_global_publisher_does_not_exist(self):
+ @patch("rele.publishing.Publisher", autospec=True)
+ def test_instantiates_publisher_and_publishes_when_does_not_exist(
+ self, mock_publisher
+ ):
+ with patch("rele.publishing.discover") as mock_discover:
+ mock_discover.sub_modules.return_value = settings, []
+
+ message = {"foo": "bar"}
+ publishing.publish(topic="order-cancelled", data=message, myattr="hello")
+
+ mock_publisher.return_value.publish.assert_called_with(
+ "order-cancelled", {"foo": "bar"}, myattr="hello"
+ )
+
+ def test_raises_error_when_publisher_does_not_exists_and_settings_not_found(self):
+ publishing._publisher = None
message = {"foo": "bar"}
with pytest.raises(ValueError):
@@ -18,6 +34,7 @@ class TestInitGlobalPublisher:
def test_creates_global_publisher_when_published_called(
self, mock_publisher, config
):
+ publishing._publisher = None
mock_publisher.return_value = MagicMock(spec=Publisher)
publishing.init_global_publisher(config)
message = {"foo": "bar"}
|
Publishing without boilerplate
## Current Behaviour
Right now, if you want to call `rele.publish()`, you must call `config.setup()` before. This is so that
`init_global_publisher` is called and setup properly. Otherwise a `ValueError` is raised.
## Proposal
I propose that a user should be able to call `rele.publish` without the boilerplate of `config.setup`. Instead, I think we can call `init_global_publisher` if publishing and there is no global publisher. The tough part will be getting the settings so that the credentials can be configured properly.
But I do believe this can be solved, to make the UX more elegant and avoid boilerplate.
|
0.0
|
a0f4b98408e7f0e1631d282627d9e27bb69bcb00
|
[
"tests/test_config.py::TestLoadSubscriptions::test_raises_error_when_subscription_is_duplicated"
] |
[
"tests/test_config.py::TestLoadSubscriptions::test_load_subscriptions_in_a_module",
"tests/test_config.py::TestLoadSubscriptions::test_loads_subscriptions_when_they_are_class_based",
"tests/test_config.py::TestLoadSubscriptions::test_returns_sub_value_when_filtered_value_applied",
"tests/test_config.py::TestLoadSubscriptions::test_returns_none_when_filtered_value_does_not_apply",
"tests/test_config.py::TestConfig::test_parses_all_keys",
"tests/test_config.py::TestConfig::test_inits_service_account_creds_when_credential_path_given",
"tests/test_config.py::TestConfig::test_uses_project_id_from_creds_when_no_project_id_given",
"tests/test_config.py::TestConfig::test_sets_defaults",
"tests/test_config.py::TestConfig::test_sets_defaults_pulled_from_env",
"tests/test_publishing.py::TestPublish::test_raises_error_when_publisher_does_not_exists_and_settings_not_found",
"tests/test_publishing.py::TestInitGlobalPublisher::test_creates_global_publisher_when_published_called"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-01 13:16:17+00:00
|
apache-2.0
| 3,868 |
|
mercadona__rele-190
|
diff --git a/rele/client.py b/rele/client.py
index 8ba1be1..bcb3af5 100644
--- a/rele/client.py
+++ b/rele/client.py
@@ -167,6 +167,9 @@ class Publisher:
if raise_exception:
raise e
else:
+ run_middleware_hook("post_publish_success", topic)
+
+ # DEPRECATED
run_middleware_hook("post_publish", topic)
return future
diff --git a/rele/middleware.py b/rele/middleware.py
index 84fb8da..dceb84d 100644
--- a/rele/middleware.py
+++ b/rele/middleware.py
@@ -1,4 +1,5 @@
import importlib
+import warnings
_middlewares = []
@@ -24,7 +25,20 @@ def run_middleware_hook(hook_name, *args, **kwargs):
getattr(middleware, hook_name)(*args, **kwargs)
-class BaseMiddleware:
+class WarnDeprecatedHooks(type):
+ def __new__(cls, *args, **kwargs):
+ x = super().__new__(cls, *args, **kwargs)
+ if hasattr(x, "post_publish"):
+ warnings.warn(
+ "The post_publish hook in the middleware is deprecated "
+ "and will be removed in future versions. Please substitute it with "
+ "the post_publish_success hook instead.",
+ DeprecationWarning,
+ )
+ return x
+
+
+class BaseMiddleware(metaclass=WarnDeprecatedHooks):
"""Base class for middleware. The default implementations
for all hooks are no-ops and subclasses may implement whatever
subset of hooks they like.
@@ -43,6 +57,11 @@ class BaseMiddleware:
"""
def post_publish(self, topic):
+ """DEPRECATED: Called after Publisher sends message.
+ :param topic:
+ """
+
+ def post_publish_success(self, topic):
"""Called after Publisher sends message.
:param topic:
"""
diff --git a/rele/publishing.py b/rele/publishing.py
index e2bf1d4..3368548 100644
--- a/rele/publishing.py
+++ b/rele/publishing.py
@@ -1,3 +1,5 @@
+from rele import config, discover
+
from .client import Publisher
_publisher = None
@@ -41,5 +43,10 @@ def publish(topic, data, **kwargs):
:return: None
"""
if not _publisher:
- raise ValueError("init_global_publisher must be called first.")
+ settings, _ = discover.sub_modules()
+ if not hasattr(settings, "RELE"):
+ raise ValueError("Config setup not called and settings module not found.")
+
+ config.setup(settings.RELE)
+
_publisher.publish(topic, data, **kwargs)
|
mercadona/rele
|
bed9ec31edc4f6d33f82dfefe7090016be1947cc
|
diff --git a/tests/test_middleware.py b/tests/test_middleware.py
index b13364d..57037bc 100644
--- a/tests/test_middleware.py
+++ b/tests/test_middleware.py
@@ -3,6 +3,7 @@ from unittest.mock import patch
import pytest
import rele
+from rele.middleware import BaseMiddleware
class TestMiddleware:
@@ -23,3 +24,11 @@ class TestMiddleware:
rele.setup(settings, foo="bar")
assert mock_middleware_setup.called
assert mock_middleware_setup.call_args_list[0][-1] == {"foo": "bar"}
+
+ def test_warns_about_deprecated_hooks(self):
+
+ with pytest.warns(DeprecationWarning):
+
+ class TestMiddleware(BaseMiddleware):
+ def post_publish(self, topic):
+ pass
diff --git a/tests/test_publishing.py b/tests/test_publishing.py
index b8bc32f..4c7d4f8 100644
--- a/tests/test_publishing.py
+++ b/tests/test_publishing.py
@@ -1,12 +1,28 @@
from unittest.mock import MagicMock, patch
import pytest
+from tests import settings
from rele import Publisher, publishing
class TestPublish:
- def test_raises_when_global_publisher_does_not_exist(self):
+ @patch("rele.publishing.Publisher", autospec=True)
+ def test_instantiates_publisher_and_publishes_when_does_not_exist(
+ self, mock_publisher
+ ):
+ with patch("rele.publishing.discover") as mock_discover:
+ mock_discover.sub_modules.return_value = settings, []
+
+ message = {"foo": "bar"}
+ publishing.publish(topic="order-cancelled", data=message, myattr="hello")
+
+ mock_publisher.return_value.publish.assert_called_with(
+ "order-cancelled", {"foo": "bar"}, myattr="hello"
+ )
+
+ def test_raises_error_when_publisher_does_not_exists_and_settings_not_found(self):
+ publishing._publisher = None
message = {"foo": "bar"}
with pytest.raises(ValueError):
@@ -18,6 +34,7 @@ class TestInitGlobalPublisher:
def test_creates_global_publisher_when_published_called(
self, mock_publisher, config
):
+ publishing._publisher = None
mock_publisher.return_value = MagicMock(spec=Publisher)
publishing.init_global_publisher(config)
message = {"foo": "bar"}
|
Rename post_publish hook to post_publish_success
We want to be more explicit with this middleware hook, just like we do with`post_process_message_failure` and `post_process_message_success`.
For backwards compatibility we'll Initially allow both `post_publish` and `post_publish_success` and eventually we'll deprecate `post_publish`.
|
0.0
|
bed9ec31edc4f6d33f82dfefe7090016be1947cc
|
[
"tests/test_middleware.py::TestMiddleware::test_warns_about_deprecated_hooks"
] |
[
"tests/test_middleware.py::TestMiddleware::test_setup_fn_is_called_with_kwargs",
"tests/test_publishing.py::TestPublish::test_raises_error_when_publisher_does_not_exists_and_settings_not_found",
"tests/test_publishing.py::TestInitGlobalPublisher::test_creates_global_publisher_when_published_called"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-03 10:30:05+00:00
|
apache-2.0
| 3,869 |
|
mercadona__rele-49
|
diff --git a/docs/index.rst b/docs/index.rst
index 648a4cb..659092f 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -42,18 +42,10 @@ _______
.. toctree::
:maxdepth: 2
- subscription
+ modules/subscription
modules/client
-
-.. toctree::
- :maxdepth: 2
-
- publish
-
-.. toctree::
- :maxdepth: 2
-
- worker
+ modules/publish
+ modules/worker
Indices and tables
==================
diff --git a/docs/publish.rst b/docs/modules/publish.rst
similarity index 88%
rename from docs/publish.rst
rename to docs/modules/publish.rst
index 1b87350..866fb91 100644
--- a/docs/publish.rst
+++ b/docs/modules/publish.rst
@@ -1,4 +1,4 @@
-.. _publish
+.. _ publish
Publish
=======
diff --git a/docs/subscription.rst b/docs/modules/subscription.rst
similarity index 85%
rename from docs/subscription.rst
rename to docs/modules/subscription.rst
index ef3f215..f80da4b 100644
--- a/docs/subscription.rst
+++ b/docs/modules/subscription.rst
@@ -1,8 +1,9 @@
-.. _subscription
+.. _ subscription
Subscription
============
+
.. automodule:: rele
:members:
diff --git a/docs/worker.rst b/docs/modules/worker.rst
similarity index 88%
rename from docs/worker.rst
rename to docs/modules/worker.rst
index c068446..65b3eae 100644
--- a/docs/worker.rst
+++ b/docs/modules/worker.rst
@@ -1,4 +1,4 @@
-.. _worker
+.. _ worker
Worker
=======
diff --git a/rele/client.py b/rele/client.py
index dda57fe..f67514d 100644
--- a/rele/client.py
+++ b/rele/client.py
@@ -15,6 +15,7 @@ USE_EMULATOR = True if os.environ.get('PUBSUB_EMULATOR_HOST') else False
class Subscriber:
+ DEFAULT_ACK_DEADLINE = 60
def __init__(self, gc_project_id, credentials):
self._gc_project_id = gc_project_id
@@ -23,14 +24,21 @@ class Subscriber:
else:
self._client = pubsub_v1.SubscriberClient(credentials=credentials)
- def create_subscription(self, subscription, topic):
+ def get_default_ack_deadline(self):
+ return int(os.environ.get('DEFAULT_ACK_DEADLINE', self.DEFAULT_ACK_DEADLINE))
+
+ def create_subscription(self, subscription, topic, ack_deadline_seconds=None):
+ ack_deadline_seconds = ack_deadline_seconds or self.get_default_ack_deadline()
+
subscription_path = self._client.subscription_path(
self._gc_project_id, subscription)
topic_path = self._client.topic_path(self._gc_project_id, topic)
with suppress(exceptions.AlreadyExists):
self._client.create_subscription(
- name=subscription_path, topic=topic_path)
+ name=subscription_path,
+ topic=topic_path,
+ ack_deadline_seconds=ack_deadline_seconds)
def consume(self, subscription_name, callback):
subscription_path = self._client.subscription_path(
|
mercadona/rele
|
9db6037529309f81a8b13a1d980e4d7d3c316e4b
|
diff --git a/tests/conftest.py b/tests/conftest.py
index 9277d4f..3a21b7a 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -3,9 +3,25 @@ import concurrent
from unittest.mock import MagicMock, patch
from google.cloud.pubsub_v1 import PublisherClient
from rele import Publisher
+from rele.client import Subscriber
from tests import settings
[email protected]()
+def project_id():
+ return 'test-project-id'
+
+
[email protected]()
+def credentials():
+ return 'my-credentials'
+
+
[email protected]()
+def subscriber(project_id, credentials):
+ return Subscriber(project_id, credentials)
+
+
@pytest.fixture(scope='class')
def publisher():
publisher = Publisher(settings.RELE_GC_PROJECT_ID,
diff --git a/tests/test_client.py b/tests/test_client.py
index deb0603..dc462bc 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -1,6 +1,9 @@
-import pytest
+import os
import concurrent
-from unittest.mock import ANY
+from unittest.mock import ANY, patch
+
+import pytest
+from google.cloud.pubsub_v1 import SubscriberClient
@pytest.mark.usefixtures('publisher', 'time_mock')
@@ -48,3 +51,60 @@ class TestPublisher:
publisher._client.publish.assert_called_with(
ANY, b'{"foo": "bar"}', published_at=str(published_at))
+
+
+class TestSubscriber:
+ @patch.object(SubscriberClient, 'create_subscription')
+ def test_creates_subscription_with_default_ack_deadline_when_none_provided(
+ self, _mocked_client, project_id, subscriber):
+ expected_subscription = (f'projects/{project_id}/subscriptions/'
+ f'test-topic')
+ expected_topic = (f'projects/{project_id}/topics/'
+ f'{project_id}-test-topic')
+
+ subscriber.create_subscription('test-topic',
+ f'{project_id}-test-topic')
+
+ _mocked_client.assert_called_once_with(ack_deadline_seconds=60,
+ name=expected_subscription,
+ topic=expected_topic)
+
+ @patch.object(SubscriberClient, 'create_subscription')
+ def test_creates_subscription_with_custom_ack_deadline_when_provided(
+ self, _mocked_client, project_id, subscriber):
+ expected_subscription = (f'projects/{project_id}/subscriptions/'
+ f'test-topic')
+ expected_topic = (f'projects/{project_id}/topics/'
+ f'{project_id}-test-topic')
+
+ subscriber.create_subscription('test-topic',
+ f'{project_id}-test-topic',
+ ack_deadline_seconds=100)
+
+ _mocked_client.assert_called_once_with(ack_deadline_seconds=100,
+ name=expected_subscription,
+ topic=expected_topic)
+
+ @patch.object(SubscriberClient, 'create_subscription')
+ def test_creates_subscription_with_custom_ack_deadline_from_environment(
+ self, _mocked_client, project_id, subscriber):
+ expected_subscription = (f'projects/{project_id}/subscriptions/'
+ f'test-topic')
+ expected_topic = (f'projects/{project_id}/topics/'
+ f'{project_id}-test-topic')
+
+ with patch.dict(os.environ, {'DEFAULT_ACK_DEADLINE': '200'}):
+ subscriber.create_subscription('test-topic',
+ f'{project_id}-test-topic')
+
+ _mocked_client.assert_called_once_with(ack_deadline_seconds=200,
+ name=expected_subscription,
+ topic=expected_topic)
+
+ def test_get_default_ack_deadline(self, subscriber):
+ assert subscriber.get_default_ack_deadline() == 60
+
+ def test_get_default_ack_deadline_from_environment_variable(
+ self, subscriber):
+ with patch.dict(os.environ, {'DEFAULT_ACK_DEADLINE': '200'}):
+ assert subscriber.get_default_ack_deadline() == 200
|
Parametrize subscription ack deadline
For any message for a topic (rele's subscription), use a custom ack deadline value instead of default 10 seconds using `ack_deadline_seconds` parameter in `SubscriberClient.create_subscription()` when creating a subscription.
It has been seen, that in some cases, there is a requirement to expend more than default 10 seconds to consume (late acknowledge) a message.
## Related
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyAckDeadline
|
0.0
|
9db6037529309f81a8b13a1d980e4d7d3c316e4b
|
[
"tests/test_client.py::TestSubscriber::test_creates_subscription_with_default_ack_deadline_when_none_provided",
"tests/test_client.py::TestSubscriber::test_creates_subscription_with_custom_ack_deadline_when_provided",
"tests/test_client.py::TestSubscriber::test_creates_subscription_with_custom_ack_deadline_from_environment",
"tests/test_client.py::TestSubscriber::test_get_default_ack_deadline",
"tests/test_client.py::TestSubscriber::test_get_default_ack_deadline_from_environment_variable"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-06-12 15:44:17+00:00
|
apache-2.0
| 3,870 |
|
metno__py-mms-26
|
diff --git a/pymms/productevent.py b/pymms/productevent.py
index f017152..11318f2 100644
--- a/pymms/productevent.py
+++ b/pymms/productevent.py
@@ -33,7 +33,8 @@ class ProductEvent():
"""
def __init__(
- self, jobName="", product="", productionHub="", productLocation="", eventInterval=0
+ self, jobName="", product="", productionHub="", productLocation="", refTime="",
+ eventInterval=0, counter=1, totalCount=1
):
# Event properties
@@ -42,6 +43,9 @@ class ProductEvent():
self._eventProductionHub = productionHub
self._eventProductLocation = productLocation
self._eventInterval = eventInterval
+ self._refTime = refTime
+ self._counter = counter
+ self._totalCount = totalCount
return
@@ -69,6 +73,18 @@ class ProductEvent():
def eventInterval(self):
return self._eventInterval
+ @property
+ def refTime(self):
+ return self._refTime
+
+ @property
+ def counter(self):
+ return self._counter
+
+ @property
+ def totalCount(self):
+ return self._totalCount
+
##
# Setters
##
@@ -118,6 +134,33 @@ class ProductEvent():
raise ValueError("ProductEvent.eventInterval must be an integer")
return
+ @refTime.setter
+ def refTime(self, value):
+ if isinstance(value, str):
+ self._refTime = value
+ else:
+ self._refTime = ""
+ raise ValueError("ProductEvent.refTime must be a string")
+ return
+
+ @counter.setter
+ def counter(self, value):
+ if isinstance(value, int):
+ self._counter = value
+ else:
+ self._counter = ""
+ raise ValueError("ProductEvent.eventInterval must be an integer")
+ return
+
+ @totalCount.setter
+ def totalCount(self, value):
+ if isinstance(value, int):
+ self._totalCount = value
+ else:
+ self._totalCount = ""
+ raise ValueError("ProductEvent.eventInterval must be an integer")
+ return
+
##
# Methods
##
@@ -135,6 +178,9 @@ class ProductEvent():
"ProductLocation": str(self._eventProductLocation),
"CreatedAt": nowTime.isoformat(),
"NextEventAt": nextTime.isoformat(),
+ "RefTime": str(self._refTime),
+ "Counter": str(self.counter),
+ "TotalCount": str(self.totalCount)
})
apiURL = self._eventProductionHub + "/api/v1/events"
|
metno/py-mms
|
25f47af8327aef69405a1e1c8a78334234dd0122
|
diff --git a/tests/test_postevent.py b/tests/test_postevent.py
index da46d46..63787c6 100644
--- a/tests/test_postevent.py
+++ b/tests/test_postevent.py
@@ -15,6 +15,9 @@ def testCreateProductEvent():
productionHub="FirstC",
productLocation="FirstD",
eventInterval=42,
+ refTime="2021-06-22T12:00:00Z",
+ counter=1,
+ totalCount=1
)
assert pEvent.jobName == "FirstA"
@@ -22,6 +25,9 @@ def testCreateProductEvent():
assert pEvent.productionHub == "FirstC"
assert pEvent.productLocation == "FirstD"
assert pEvent.eventInterval == 42
+ assert pEvent.refTime == "2021-06-22T12:00:00Z"
+ assert pEvent.counter == 1
+ assert pEvent.totalCount == 1
with pytest.raises(ValueError):
pEvent.jobName = 0
@@ -38,27 +44,45 @@ def testCreateProductEvent():
with pytest.raises(ValueError):
pEvent.eventInterval = "2"
+ with pytest.raises(ValueError):
+ pEvent.refTime = 0
+
+ with pytest.raises(ValueError):
+ pEvent.counter = "1"
+
+ with pytest.raises(ValueError):
+ pEvent.totalCount = "1"
+
pEvent.jobName = "SecondA"
pEvent.product = "SecondB"
pEvent.productionHub = "SecondC"
pEvent.productLocation = "SecondD"
pEvent.eventInterval = 43
+ pEvent.refTime = "2000-01-01T00:00:00Z"
+ pEvent.counter = 2
+ pEvent.totalCount = 3
assert pEvent.jobName == "SecondA"
assert pEvent.product == "SecondB"
assert pEvent.productionHub == "SecondC"
assert pEvent.productLocation == "SecondD"
assert pEvent.eventInterval == 43
+ assert pEvent.refTime == "2000-01-01T00:00:00Z"
+ assert pEvent.counter == 2
+ assert pEvent.totalCount == 3
@pytest.mark.events
def testSendProductEvent(monkeypatch):
# Valid Event
pEvent = ProductEvent(
+ eventInterval=3600,
jobName="TestJob",
product="TestProduct",
productionHub="http://localhost:8080",
- productLocation="/tmp",
- eventInterval=3600,
+ productLocation="/tmp/testproduct.ext",
+ refTime="2021-06-22T12:00:00Z",
+ counter=1,
+ totalCount=1,
)
monkeypatch.setattr(request, "urlopen", lambda *args, **kwargs: None)
assert pEvent.send() is None
|
Sync with go-mms api
Py-mms hasn't been updated since before last sprint, should be updated to integrate the new functionality added in last sprint..
|
0.0
|
25f47af8327aef69405a1e1c8a78334234dd0122
|
[
"tests/test_postevent.py::testCreateProductEvent",
"tests/test_postevent.py::testSendProductEvent"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-22 09:55:31+00:00
|
apache-2.0
| 3,871 |
|
metov__openeditor-4
|
diff --git a/src/lib.py b/src/lib.py
index 9d0eb86..9a0e2f0 100644
--- a/src/lib.py
+++ b/src/lib.py
@@ -37,3 +37,10 @@ def editor() -> str:
"Both $VISUAL and $EDITOR are unset, could not pick "
"an appropriate editor."
)
+
+
+def write_file(path, contents=""):
+ """Create a file. If contents are given, they will be written to it."""
+ with open(path, 'w') as f:
+ if contents:
+ f.write(contents)
diff --git a/src/openeditor.py b/src/openeditor.py
index 51efe9a..ea94349 100644
--- a/src/openeditor.py
+++ b/src/openeditor.py
@@ -3,7 +3,7 @@
import tempfile
from pathlib import Path
-from lib import edit
+from lib import edit, write_file
DEFAULT_TEMPFILE = "tempfile"
@@ -31,14 +31,10 @@ def edit_temp(contents="", name=""):
:return: Contents of the file when the editor is closed.
"""
- # Create a temp file with requested name, if any
+ # Create a temp file, ensure it has requested name and contents
td = tempfile.TemporaryDirectory()
tfpath = Path(td.name) / (name or DEFAULT_TEMPFILE)
-
- # Populate contents if needed
- if contents:
- with tfpath.open("w") as f:
- f.write(contents)
+ write_file(tfpath, contents)
# Edit interactively
return edit(tfpath)
|
metov/openeditor
|
9045b027b6255a107640a5bb1bacdd8222ad28ed
|
diff --git a/tests/lib/test_write_file.py b/tests/lib/test_write_file.py
new file mode 100644
index 0000000..45acbb1
--- /dev/null
+++ b/tests/lib/test_write_file.py
@@ -0,0 +1,10 @@
+from unittest.mock import mock_open, patch
+
+from lib import write_file
+
+
+def test_empty_file_created(monkeypatch):
+ fake_open = mock_open()
+ with patch('lib.open', fake_open):
+ write_file('/tmp/nosuchfile.txt', contents="")
+ fake_open.assert_called_once()
diff --git a/tests/openeditor/test_edit_temp.py b/tests/openeditor/test_edit_temp.py
index e5ccd8d..e6e7233 100644
--- a/tests/openeditor/test_edit_temp.py
+++ b/tests/openeditor/test_edit_temp.py
@@ -1,6 +1,5 @@
import tempfile
from pathlib import Path
-from unittest.mock import mock_open
import openeditor
@@ -9,23 +8,17 @@ class MockDir:
name = "/fake/temp/dir"
-class Recorder:
- """A class for recording calls."""
-
- last_arg = None
-
- def record(self, arg):
- self.last_arg = arg
+class MockWrite:
+ def record(self, path, contents):
+ self.path = path
def test_default_name_on_empty_name(monkeypatch):
- # Don't actually create a temp dir
monkeypatch.setattr(tempfile, "TemporaryDirectory", MockDir)
- mock_open()
- fake_edit = Recorder()
- monkeypatch.setattr(openeditor, "edit", fake_edit.record)
+ fake_write = MockWrite()
+ monkeypatch.setattr(openeditor, "write_file", fake_write.record)
+ monkeypatch.setattr(openeditor, "edit", lambda fn: None)
openeditor.edit_temp(name="")
- exp_args = str(Path(MockDir.name) / openeditor.DEFAULT_TEMPFILE)
- obs_args = str(fake_edit.last_arg)
- assert exp_args == obs_args
+ exp = str(Path(MockDir.name) / openeditor.DEFAULT_TEMPFILE)
+ assert exp == str(fake_write.path)
|
FileNotFoundError when editing a file without contents
The following succeeds:
```
python -c "import openeditor; print(openeditor.edit_temp(contents='hello'))"
```
but this gives FileNotFoundError:
```
python -c "import openeditor; print(openeditor.edit_temp())"
```
|
0.0
|
9045b027b6255a107640a5bb1bacdd8222ad28ed
|
[
"tests/lib/test_write_file.py::test_empty_file_created",
"tests/openeditor/test_edit_temp.py::test_default_name_on_empty_name"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-04-30 05:30:46+00:00
|
mit
| 3,872 |
|
mfuentesg__SyncSettings-151
|
diff --git a/.gitignore b/.gitignore
index 8babfb9..2c1bd69 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,5 @@ tests/options.json
.idea
cover
.coverage
+
+pyproject.toml
diff --git a/sync_settings/commands/download.py b/sync_settings/commands/download.py
index d0f2985..5a00a8e 100644
--- a/sync_settings/commands/download.py
+++ b/sync_settings/commands/download.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-import json
import os
import sublime
import sublime_plugin
@@ -50,7 +49,7 @@ class SyncSettingsDownloadCommand(sublime_plugin.WindowCommand):
file_content = manager.get_content(
path.join(self.temp_folder, path.encode('Package Control.sublime-settings'))
)
- package_settings = json.loads('{}' if file_content == '' else file_content)
+ package_settings = sublime.decode_value('{}' if file_content == '' else file_content)
# read installed_packages from remote reference and merge it with the local version
local_settings = sublime.load_settings('Package Control.sublime-settings')
setting = 'installed_packages'
diff --git a/sync_settings/libs/gist.py b/sync_settings/libs/gist.py
index a6cdc70..ec6a5d2 100644
--- a/sync_settings/libs/gist.py
+++ b/sync_settings/libs/gist.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-import json
+import sublime
import re
import requests
from functools import wraps
@@ -63,14 +63,14 @@ class Gist:
def create(self, data):
if not isinstance(data, dict) or not len(data):
raise ValueError('Gist can`t be created without data')
- return self.__do_request('post', self.make_uri(), data=json.dumps(data)).json()
+ return self.__do_request('post', self.make_uri(), data=sublime.encode_value(data, True)).json()
@auth
@with_gid
def update(self, gid, data):
if not isinstance(data, dict) or not len(data):
raise ValueError('Gist can`t be updated without data')
- return self.__do_request('patch', self.make_uri(gid), data=json.dumps(data)).json()
+ return self.__do_request('patch', self.make_uri(gid), data=sublime.encode_value(data, True)).json()
@auth
@with_gid
diff --git a/sync_settings/sync_version.py b/sync_settings/sync_version.py
index 2f28eda..7e37570 100644
--- a/sync_settings/sync_version.py
+++ b/sync_settings/sync_version.py
@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*
import sublime
-import json
import os
from .libs.gist import Gist
from .libs import settings, path
@@ -14,7 +13,7 @@ def get_local_version():
return {}
try:
with open(file_path) as f:
- return json.load(f)
+ return sublime.decode_value(f.read())
except: # noqa: E722
pass
return {}
@@ -37,7 +36,7 @@ def get_remote_version():
def update_config_file(info):
with open(file_path, 'w') as f:
- json.dump(info, f)
+ f.write(sublime.encode_value(info, True))
def show_update_dialog(on_yes=None):
|
mfuentesg/SyncSettings
|
5c7fd2595b3c4fe8672d33bc9c6043f4b192613d
|
diff --git a/tests/mocks/sublime_mock.py b/tests/mocks/sublime_mock.py
index c7d5c25..d22a014 100644
--- a/tests/mocks/sublime_mock.py
+++ b/tests/mocks/sublime_mock.py
@@ -1,3 +1,6 @@
+import json
+import re
+
DIALOG_YES = 1
@@ -29,3 +32,12 @@ def load_settings(*args):
'included_files': [],
'excluded_files': []
})
+
+
+def encode_value(data, pretty):
+ return json.dumps(data)
+
+
+def decode_value(content):
+ decoded = re.sub(re.compile(r"/\*.*?\*/", re.DOTALL), "", content)
+ return json.loads(re.sub(re.compile(r"//.*?\n"), "", decoded))
diff --git a/tests/test_sync_version.py b/tests/test_sync_version.py
index 7a4930d..e518ce8 100644
--- a/tests/test_sync_version.py
+++ b/tests/test_sync_version.py
@@ -42,6 +42,17 @@ class TestSyncVersion(unittest.TestCase):
v = version.get_local_version()
self.assertDictEqual({'hash': '123123123', 'created_at': '2019-01-11T02:15:15Z'}, v)
+ @mock.patch('sync_settings.libs.path.exists', mock.MagicMock(return_value=True))
+ @mock.patch(
+ 'sync_settings.sync_version.open',
+ mock.mock_open(
+ read_data='{"created_at": "2019-01-11T02:15:15Z", /* some comment */"hash": "123123123"}'
+ ),
+ )
+ def test_get_local_version_with_commented_content(self):
+ v = version.get_local_version()
+ self.assertDictEqual({"hash": "123123123", "created_at": "2019-01-11T02:15:15Z"}, v)
+
@mock.patch('sublime.yes_no_cancel_dialog', mock.MagicMock(return_value=1))
def test_show_update_dialog(self):
def on_done():
|
No JSON object could be decoded
Hey! Faced a problem.
Maybe it correlates with the new version of sublime text 4070 alpha

```
WARNING:Sync Settings.sync_settings.libs.logger:{'message': 'Not Found', 'documentation_url': 'https://developer.github.com/v3/gists/#delete-a-gist'}
ERROR:Sync Settings.sync_settings.libs.logger:No JSON object could be decoded
Traceback (most recent call last):
File "./python3.3/json/decoder.py", line 367, in raw_decode
StopIteration
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\krupi\AppData\Roaming\Sublime Text 3\Installed Packages\Sync Settings.sublime-package\sync_settings/commands/download.py", line 53, in download
package_settings = json.loads('{}' if file_content == '' else file_content)
File "./python3.3/json/__init__.py", line 316, in loads
File "./python3.3/json/decoder.py", line 351, in decode
File "./python3.3/json/decoder.py", line 369, in raw_decode
ValueError: No JSON object could be decoded
ERROR:Sync Settings.sync_settings.libs.logger:No JSON object could be decoded
Traceback (most recent call last):
File "./python3.3/json/decoder.py", line 367, in raw_decode
StopIteration
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\krupi\AppData\Roaming\Sublime Text 3\Installed Packages\Sync Settings.sublime-package\sync_settings/commands/download.py", line 53, in download
package_settings = json.loads('{}' if file_content == '' else file_content)
File "./python3.3/json/__init__.py", line 316, in loads
File "./python3.3/json/decoder.py", line 351, in decode
File "./python3.3/json/decoder.py", line 369, in raw_decode
ValueError: No JSON object could be decoded
```
My gist
https://gist.github.com/krupitskas/b272fea836faffa356c8f0cce9a121b3
|
0.0
|
5c7fd2595b3c4fe8672d33bc9c6043f4b192613d
|
[
"tests/test_sync_version.py::TestSyncVersion::test_get_local_version_with_commented_content"
] |
[
"tests/test_sync_version.py::TestSyncVersion::test_get_local_version_empty_json",
"tests/test_sync_version.py::TestSyncVersion::test_get_local_version_invalid_content",
"tests/test_sync_version.py::TestSyncVersion::test_get_local_version_no_file",
"tests/test_sync_version.py::TestSyncVersion::test_get_local_version_with_content",
"tests/test_sync_version.py::TestSyncVersion::test_get_remote_version",
"tests/test_sync_version.py::TestSyncVersion::test_get_remote_version_failed",
"tests/test_sync_version.py::TestSyncVersion::test_show_update_dialog",
"tests/test_sync_version.py::TestSyncVersion::test_upgrade_outdated_version",
"tests/test_sync_version.py::TestSyncVersion::test_upgrade_same_version",
"tests/test_sync_version.py::TestSyncVersion::test_upgrade_without_local_version",
"tests/test_sync_version.py::TestSyncVersion::test_upgrade_without_remote_version"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-10 20:39:47+00:00
|
mit
| 3,873 |
|
mgcth__mysgen-49
|
diff --git a/src/mysgen/mysgen.py b/src/mysgen/mysgen.py
index f6698c0..a230943 100644
--- a/src/mysgen/mysgen.py
+++ b/src/mysgen/mysgen.py
@@ -396,14 +396,14 @@ class MySGEN:
meta, content = self._parse(item_path)
if item_type == "pages":
- if "data" in meta:
+ if "data" in meta and meta["data"] is not False:
self.pages[item] = DataPage(meta, content, src_path, build_path)
else:
self.pages[item] = Page(meta, content, src_path, build_path)
else:
- if "image" in meta:
+ if "image" in meta and meta["image"] is not False:
self.posts[item] = ImagePost(meta, content, src_path, build_path)
- elif "data" in meta:
+ elif "data" in meta and meta["data"] is not False:
self.posts[item] = DataPost(meta, content, src_path, build_path)
else:
self.posts[item] = Post(meta, content, src_path, build_path)
@@ -471,6 +471,10 @@ class MySGEN:
if value == "":
continue
+ if (key == "data" or key == "image") and value == "false":
+ meta[key] = False
+ continue
+
if key == "date":
meta[key] = datetime.strptime(value.pop(), "%Y-%m-%d")
continue
|
mgcth/mysgen
|
72621078c35fd6bd153158d45496b6a961cc1a6c
|
diff --git a/tests/test_unit.py b/tests/test_unit.py
index 6031ccb..a2d018d 100644
--- a/tests/test_unit.py
+++ b/tests/test_unit.py
@@ -163,8 +163,11 @@ class TestUnitMySGEN:
("posts", [], {}),
("pages", ["file"], {}),
("pages", ["file"], {"data": "data"}),
+ ("pages", ["file"], {"data": False}),
("posts", ["file"], {"image": "image"}),
+ ("posts", ["file"], {"image": False}),
("posts", ["file"], {"data": "data"}),
+ ("posts", ["file"], {"data": False}),
("posts", ["file"], {}),
],
)
@@ -208,14 +211,14 @@ class TestUnitMySGEN:
mysgen.find_and_parse(item_type)
if item_type == "pages":
- if "data" in meta:
+ if "data" in meta and meta["data"] is not False:
mock_datapage.assert_called_once()
else:
mock_page.assert_called_once()
else:
- if "image" in meta:
+ if "image" in meta and meta["image"] is not False:
mock_imagepost.assert_called_once()
- elif "data" in meta:
+ elif "data" in meta and meta["data"] is not False:
mock_datapost.assert_called_once()
else:
mock_post.assert_called_once()
@@ -297,6 +300,8 @@ class TestUnitMySGEN:
"tags": ["a, b"],
"category": ["c"],
"test": "",
+ "data": "false",
+ "image": "false",
}
meta_return = mysgen._format_metadata(meta)
@@ -305,6 +310,8 @@ class TestUnitMySGEN:
"tags": ["a", " b"],
"category": "c",
"test": "",
+ "data": False,
+ "image": False,
}
assert meta_return == meta_answer
|
Allow image and data metadata to be false
## Description
If the `data` or `image` metadata fields are present but empty mysgen expects there to be some data for that post. Allow them to be `false`.
|
0.0
|
72621078c35fd6bd153158d45496b6a961cc1a6c
|
[
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[pages-files5-meta5]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files7-meta7]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files9-meta9]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_format_metadata"
] |
[
"tests/test_unit.py::test_unit_build",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_init",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_build[bucket]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_build[False]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_build[None]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_set_base_config",
"tests/test_unit.py::TestUnitMySGEN::test_unit_build_menu",
"tests/test_unit.py::TestUnitMySGEN::test_unit_define_environment",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[unknown-files0-meta0]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[pages-files1-meta1]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files2-meta2]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[pages-files3-meta3]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[pages-files4-meta4]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files6-meta6]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files8-meta8]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files10-meta10]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[unknown-data0]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[pages-data1]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[pages-data2]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[posts-data3]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[posts-data4]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_copy_s3",
"tests/test_unit.py::TestUnitMySGEN::test_unit_parse",
"tests/test_unit.py::TestUnitItem::test_unit_item_init",
"tests/test_unit.py::TestUnitItem::test_unit_item_abstract_process",
"tests/test_unit.py::TestUnitItem::test_unit_item_patch_content",
"tests/test_unit.py::TestUnitItem::test_unit_item_copy",
"tests/test_unit.py::TestUnitPost::test_unit_post_init",
"tests/test_unit.py::TestUnitPost::test_unit_post_process[meta0-content-base0-template0]",
"tests/test_unit.py::TestUnitPost::test_unit_post_copy_raises",
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_init",
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_process[True]",
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_process[False]",
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_resize_image[posts/path/images/image1.jpg-image_size0-thumbnail_size0-image1_small.jpg]",
"tests/test_unit.py::TestUnitDataPost::test_unit_datapost_init",
"tests/test_unit.py::TestUnitDataPost::test_unit_datapost_process",
"tests/test_unit.py::TestUnitPage::test_unit_page_init",
"tests/test_unit.py::TestUnitPage::test_unit_page_process[pages/home--home]",
"tests/test_unit.py::TestUnitPage::test_unit_page_process[pages/archive-archive-archive]",
"tests/test_unit.py::TestUnitDataPage::test_unit_datapage_init",
"tests/test_unit.py::TestUnitDataPage::test_unit_datapage_process"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-12-16 18:55:45+00:00
|
mit
| 3,874 |
|
mgcth__mysgen-53
|
diff --git a/pyproject.toml b/pyproject.toml
index fd5217a..7e57d98 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "mysgen"
-version = "0.4.0"
+version = "0.4.1"
description = "My simple static site generator."
license = { text = "MIT" }
readme = "README.md"
diff --git a/src/mysgen/mysgen.py b/src/mysgen/mysgen.py
index a230943..de03e15 100644
--- a/src/mysgen/mysgen.py
+++ b/src/mysgen/mysgen.py
@@ -432,6 +432,7 @@ class MySGEN:
)
base["pages"] = self.pages
base["articles"] = posts_metadata
+ base["all_posts"] = self.posts
else:
raise NotImplementedError(
"Item type {item_type} not implemented.".format(item_type=item_type)
|
mgcth/mysgen
|
827a06bfd067e1df26451fe86d6cfc888a3bf5af
|
diff --git a/tests/test_unit.py b/tests/test_unit.py
index a2d018d..9dccccd 100644
--- a/tests/test_unit.py
+++ b/tests/test_unit.py
@@ -261,7 +261,8 @@ class TestUnitMySGEN:
mysgen.process(item_type)
if data["page"].meta["status"] == "published":
data["page"].process.assert_called_once_with(
- {"pages": data, "articles": "posts_metadata"}, "template"
+ {"pages": data, "articles": "posts_metadata", "all_posts": {}},
+ "template",
)
mock_sorted.assert_called_once()
|
Forward all posts dict to pages template
## Description
For more flexibility, forward all posts_dict to page template.
|
0.0
|
827a06bfd067e1df26451fe86d6cfc888a3bf5af
|
[
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[pages-data2]"
] |
[
"tests/test_unit.py::test_unit_build",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_init",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_build[bucket]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_build[False]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_build[None]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_set_base_config",
"tests/test_unit.py::TestUnitMySGEN::test_unit_build_menu",
"tests/test_unit.py::TestUnitMySGEN::test_unit_define_environment",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[unknown-files0-meta0]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[pages-files1-meta1]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files2-meta2]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[pages-files3-meta3]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[pages-files4-meta4]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[pages-files5-meta5]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files6-meta6]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files7-meta7]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files8-meta8]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files9-meta9]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files10-meta10]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[unknown-data0]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[pages-data1]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[posts-data3]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[posts-data4]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_copy_s3",
"tests/test_unit.py::TestUnitMySGEN::test_unit_format_metadata",
"tests/test_unit.py::TestUnitMySGEN::test_unit_parse",
"tests/test_unit.py::TestUnitItem::test_unit_item_init",
"tests/test_unit.py::TestUnitItem::test_unit_item_abstract_process",
"tests/test_unit.py::TestUnitItem::test_unit_item_patch_content",
"tests/test_unit.py::TestUnitItem::test_unit_item_copy",
"tests/test_unit.py::TestUnitPost::test_unit_post_init",
"tests/test_unit.py::TestUnitPost::test_unit_post_process[meta0-content-base0-template0]",
"tests/test_unit.py::TestUnitPost::test_unit_post_copy_raises",
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_init",
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_process[True]",
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_process[False]",
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_resize_image[posts/path/images/image1.jpg-image_size0-thumbnail_size0-image1_small.jpg]",
"tests/test_unit.py::TestUnitDataPost::test_unit_datapost_init",
"tests/test_unit.py::TestUnitDataPost::test_unit_datapost_process",
"tests/test_unit.py::TestUnitPage::test_unit_page_init",
"tests/test_unit.py::TestUnitPage::test_unit_page_process[pages/home--home]",
"tests/test_unit.py::TestUnitPage::test_unit_page_process[pages/archive-archive-archive]",
"tests/test_unit.py::TestUnitDataPage::test_unit_datapage_init",
"tests/test_unit.py::TestUnitDataPage::test_unit_datapage_process"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-08 11:38:17+00:00
|
mit
| 3,875 |
|
mgcth__mysgen-59
|
diff --git a/README.md b/README.md
index 0c5fd30..5ff2ff9 100644
--- a/README.md
+++ b/README.md
@@ -57,7 +57,8 @@ The configuration file `config.json` should contain the following
"fenced_code",
"mdx_math"
],
- "s3-bucket": "bucket"
+ "s3-bucket": "bucket",
+ "mangle_image_name": true
}
```
diff --git a/src/mysgen/mysgen.py b/src/mysgen/mysgen.py
index 86a694e..a460fc8 100644
--- a/src/mysgen/mysgen.py
+++ b/src/mysgen/mysgen.py
@@ -4,6 +4,8 @@ import os
import json
import glob
import boto3
+import shutil
+import hashlib
import markdown
from PIL import Image
from typing import Any
@@ -159,10 +161,32 @@ class ImagePost(Post):
self.meta["thumbnail_size"] = base["thumbnail_size"]
self.meta["thumbnails"] = []
self.meta["image_paths"] = []
- for to_image in glob.glob(join(self.to_path, "*.*")):
- if not isfile(to_image):
- continue
+ images = [
+ to_image
+ for to_image in glob.glob(join(self.to_path, "*.*"))
+ if isfile(to_image)
+ ]
+
+ if base["mangle_image_name"]:
+ sorted_images = sorted(images)
+ images = [
+ join(
+ *split(to_image)[:-2],
+ str(i)
+ + "-"
+ + hashlib.sha256(
+ bytearray(split(to_image)[-1].split(".")[0], "utf-8")
+ ).hexdigest()[:7],
+ )
+ + "."
+ + to_image.split(".")[-1]
+ for i, to_image in enumerate(sorted_images)
+ ]
+ for im, im_sha in zip(sorted_images, images):
+ shutil.move(im, im_sha)
+
+ for to_image in images:
self.meta["image_paths"].append(split(to_image)[-1])
self._resize_image(to_image)
|
mgcth/mysgen
|
1dc0fa88ac5f4cb99d2bd0c18a90f168e3c1e55d
|
diff --git a/tests/fixtures/test_config.json b/tests/fixtures/test_config.json
index bb04cfa..aa2fa08 100644
--- a/tests/fixtures/test_config.json
+++ b/tests/fixtures/test_config.json
@@ -23,5 +23,6 @@
"fenced_code",
"mdx_math"
],
- "s3-bucket": false
+ "s3-bucket": false,
+ "mangle_image_name": false
}
\ No newline at end of file
diff --git a/tests/test_unit.py b/tests/test_unit.py
index 9dccccd..065921e 100644
--- a/tests/test_unit.py
+++ b/tests/test_unit.py
@@ -4,6 +4,7 @@ Functions to test mysgen.
import os
import json
import pytest
+import hashlib
from datetime import datetime
from collections import OrderedDict
from distutils.errors import DistutilsFileError
@@ -465,7 +466,10 @@ class TestUnitImagePost:
assert post.from_path == "src/images/post"
assert post.to_path == "build/posts/post/images"
- @pytest.mark.parametrize("isfile", [(True), (False)])
+ @pytest.mark.parametrize(
+ "isfile, mangle_image_name", [(True, False), (False, False), (True, True)]
+ )
+ @patch("mysgen.mysgen.shutil.move")
@patch("mysgen.mysgen.ImagePost._resize_image")
@patch("mysgen.mysgen.isfile")
@patch("mysgen.mysgen.glob.glob")
@@ -478,17 +482,19 @@ class TestUnitImagePost:
mock_glob,
mock_isfile,
mock_resize_image,
+ mock_move,
isfile,
+ mangle_image_name,
):
"""
Unit test of ImagePost process method.
"""
- mock_base = MagicMock()
+ mock_base = {"mangle_image_name": mangle_image_name, "thumbnail_size": 0}
mock_template = MagicMock()
post = ImagePost(
{"path": "posts/post1.md"}, MagicMock(), MagicMock(), MagicMock()
)
- mock_glob.return_value = (g for g in ["path/image1.jpg", "path/image2.jpg"])
+ mock_glob.return_value = (g for g in ["path/image2.jpg", "path/image1.jpg"])
mock_isfile.return_value = isfile
post.process(mock_base, mock_template)
@@ -500,8 +506,19 @@ class TestUnitImagePost:
if isfile:
assert mock_resize_image.call_count == 2
- assert post.meta["image_paths"] == ["image1.jpg", "image2.jpg"]
mock_item_process.assert_called_once_with(mock_base, mock_template)
+
+ if mangle_image_name:
+ assert post.meta["image_paths"] == [
+ "0-"
+ + hashlib.sha256(bytearray("image1", "utf-8")).hexdigest()[:7]
+ + ".jpg",
+ "1-"
+ + hashlib.sha256(bytearray("image2", "utf-8")).hexdigest()[:7]
+ + ".jpg",
+ ]
+ else:
+ assert post.meta["image_paths"] == ["image2.jpg", "image1.jpg"]
else:
assert mock_resize_image.call_count == 0
assert post.meta["image_paths"] == []
|
Mangle image names
## Description
Add option to mangel image names and sort them by date. Requires that image name is the date, as the code expects images with stripped metadata.
|
0.0
|
1dc0fa88ac5f4cb99d2bd0c18a90f168e3c1e55d
|
[
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_process[True-False]",
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_process[False-False]",
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_process[True-True]"
] |
[
"tests/test_unit.py::test_unit_build",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_init",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_build[bucket]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_build[False]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_build[None]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_mysgen_set_base_config",
"tests/test_unit.py::TestUnitMySGEN::test_unit_build_menu",
"tests/test_unit.py::TestUnitMySGEN::test_unit_define_environment",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[unknown-files0-meta0]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[pages-files1-meta1]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files2-meta2]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[pages-files3-meta3]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[pages-files4-meta4]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[pages-files5-meta5]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files6-meta6]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files7-meta7]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files8-meta8]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files9-meta9]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_find_and_parse[posts-files10-meta10]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[unknown-data0]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[pages-data1]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[pages-data2]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[posts-data3]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_process[posts-data4]",
"tests/test_unit.py::TestUnitMySGEN::test_unit_copy_s3",
"tests/test_unit.py::TestUnitMySGEN::test_unit_format_metadata",
"tests/test_unit.py::TestUnitMySGEN::test_unit_parse",
"tests/test_unit.py::TestUnitItem::test_unit_item_init",
"tests/test_unit.py::TestUnitItem::test_unit_item_abstract_process",
"tests/test_unit.py::TestUnitItem::test_unit_item_patch_content",
"tests/test_unit.py::TestUnitItem::test_unit_item_copy",
"tests/test_unit.py::TestUnitPost::test_unit_post_init",
"tests/test_unit.py::TestUnitPost::test_unit_post_process[meta0-content-base0-template0]",
"tests/test_unit.py::TestUnitPost::test_unit_post_copy_raises",
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_init",
"tests/test_unit.py::TestUnitImagePost::test_unit_imagepost_resize_image[posts/path/images/image1.jpg-image_size0-thumbnail_size0-image1_small.jpg]",
"tests/test_unit.py::TestUnitDataPost::test_unit_datapost_init",
"tests/test_unit.py::TestUnitDataPost::test_unit_datapost_process",
"tests/test_unit.py::TestUnitPage::test_unit_page_init",
"tests/test_unit.py::TestUnitPage::test_unit_page_process[pages/home--home]",
"tests/test_unit.py::TestUnitPage::test_unit_page_process[pages/archive-archive-archive]",
"tests/test_unit.py::TestUnitDataPage::test_unit_datapage_init",
"tests/test_unit.py::TestUnitDataPage::test_unit_datapage_process"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-10 14:41:43+00:00
|
mit
| 3,876 |
|
mgedmin__ghcloneall-10
|
diff --git a/CHANGES.rst b/CHANGES.rst
index a128502..23c9f9f 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -5,8 +5,8 @@ Changelog
1.9.3 (unreleased)
------------------
+- Allow authentication with github token
- Add support for Python 3.9.
-
- Drop support for Python 3.5.
diff --git a/README.rst b/README.rst
index d7f7175..149408f 100644
--- a/README.rst
+++ b/README.rst
@@ -80,11 +80,13 @@ Other command-line options::
$ ghcloneall --help
usage: ghcloneall [-h] [--version] [-c CONCURRENCY] [-n] [-q] [-v]
[--start-from REPO] [--organization ORGANIZATION]
- [--user USER] [--gists] [--repositories] [--pattern PATTERN]
- [--include-forks] [--exclude-forks] [--include-archived]
- [--exclude-archived] [--include-private] [--exclude-private]
- [--include-disabled] [--exclude-disabled] [--init]
- [--http-cache DBNAME] [--no-http-cache]
+ [--user USER] [--github-token GITHUB_TOKEN] [--gists]
+ [--repositories] [--pattern PATTERN] [--include-forks]
+ [--exclude-forks] [--include-archived] [--exclude-archived]
+ [--include-private] [--exclude-private] [--include-disabled]
+ [--exclude-disabled] [--init] [--http-cache DBNAME]
+ [--no-http-cache]
+
Clone/update all user/org repositories from GitHub.
@@ -101,6 +103,8 @@ Other command-line options::
--organization ORGANIZATION
specify the GitHub organization
--user USER specify the GitHub user
+ --github-token GITHUB_TOKEN
+ specify the GitHub token
--gists clone user's gists
--repositories clone user's or organisation's repositories (default)
--pattern PATTERN specify repository name glob pattern to filter
@@ -130,6 +134,8 @@ should look like this::
# github_org = ZopeFoundation
github_user = mgedmin
pattern = *.vim
+ # Provide github token for authentication
+ # github_token = <my-github-token>
# You can also uncomment and change these if you wish
# gists = False
# include_forks = False
diff --git a/ghcloneall.py b/ghcloneall.py
index 43f7cd0..e5bd1e2 100755
--- a/ghcloneall.py
+++ b/ghcloneall.py
@@ -83,7 +83,7 @@ def get_github_list(url, batch_size=100, progress_callback=None, session=None):
while 'next' in links:
if progress_callback:
progress_callback(len(res))
- more, links = get_json_and_links(links['next']['url'])
+ more, links = get_json_and_links(links['next']['url'], session)
res += more
return res
@@ -383,7 +383,8 @@ class Repo(object):
class RepoWrangler(object):
- def __init__(self, dry_run=False, verbose=0, progress=None, quiet=False):
+ def __init__(self, dry_run=False, verbose=0, progress=None, quiet=False,
+ token=None):
self.n_repos = 0
self.n_updated = 0
self.n_new = 0
@@ -394,13 +395,18 @@ class RepoWrangler(object):
self.progress = progress if progress else Progress()
self.lock = threading.Lock()
+ self.session = requests.Session()
+ if token:
+ self.session.auth = ('', token)
+
def get_github_list(self, list_url, message):
self.progress.status(message)
def progress_callback(n):
self.progress.status("{} ({})".format(message, n))
- return get_github_list(list_url, progress_callback=progress_callback)
+ return get_github_list(list_url, progress_callback=progress_callback,
+ session=self.session)
def list_gists(self, user, pattern=None):
list_url = 'https://api.github.com/users/{}/gists'.format(user)
@@ -777,6 +783,9 @@ def _main():
parser.add_argument(
'--user',
help='specify the GitHub user')
+ parser.add_argument(
+ '--github-token',
+ help='specify the GitHub token')
parser.add_argument(
'--gists', action='store_true', default=None,
help="clone user's gists")
@@ -834,6 +843,9 @@ def _main():
args.user = config.get(CONFIG_SECTION, 'github_user')
if config.has_option(CONFIG_SECTION, 'github_org'):
args.organization = config.get(CONFIG_SECTION, 'github_org')
+ if not args.github_token:
+ if config.has_option(CONFIG_SECTION, 'github_token'):
+ args.github_token = config.get(CONFIG_SECTION, 'github_token')
if not args.pattern:
if config.has_option(CONFIG_SECTION, 'pattern'):
args.pattern = config.get(CONFIG_SECTION, 'pattern')
@@ -874,6 +886,8 @@ def _main():
config.set(CONFIG_SECTION, 'github_user', args.user)
if args.organization:
config.set(CONFIG_SECTION, 'github_org', args.organization)
+ if args.github_token:
+ config.set(CONFIG_SECTION, 'github_token', args.github_token)
if args.pattern:
config.set(CONFIG_SECTION, 'pattern', args.pattern)
if args.gists is not None:
@@ -913,7 +927,8 @@ def _main():
with Progress() as progress:
wrangler = RepoWrangler(dry_run=args.dry_run, verbose=args.verbose,
- progress=progress, quiet=args.quiet)
+ progress=progress, quiet=args.quiet,
+ token=args.github_token)
if args.gists:
repos = wrangler.list_gists(
user=args.user,
|
mgedmin/ghcloneall
|
5dbe065908a4baaf7067b61e77a02e30e6151c77
|
diff --git a/tests.py b/tests.py
index 4542bdf..c5f04c6 100644
--- a/tests.py
+++ b/tests.py
@@ -695,6 +695,12 @@ def Gist(name, **kwargs):
return ghcloneall.Repo.from_gist(gist(name, **kwargs))
+def test_RepoWrangler_auth():
+ token = 'UNITTEST'
+ wrangler = ghcloneall.RepoWrangler(token=token)
+ assert wrangler.session.auth == ('', token)
+
+
def test_RepoWrangler_list_gists(mock_requests_get):
mock_requests_get.update(mock_multi_page_api_responses(
url='https://api.github.com/users/test_user/gists',
@@ -1417,6 +1423,23 @@ def test_main_init_org(monkeypatch, capsys, config_writes_allowed):
)
+def test_main_init_org_token(monkeypatch, capsys, config_writes_allowed):
+ monkeypatch.setattr(sys, 'argv', [
+ 'ghcloneall', '--init', '--org', 'gtimelog', '--github-token',
+ 'UNITTEST'
+ ])
+ ghcloneall.main()
+ assert capsys.readouterr().out == (
+ 'Wrote .ghcloneallrc\n'
+ )
+ assert config_writes_allowed.read_text() == (
+ '[ghcloneall]\n'
+ 'github_org = gtimelog\n'
+ 'github_token = UNITTEST\n'
+ '\n'
+ )
+
+
def test_main_init_filter_flags(monkeypatch, capsys, config_writes_allowed):
monkeypatch.setattr(sys, 'argv', [
'ghcloneall', '--init', '--org', 'gtimelog',
@@ -1443,6 +1466,7 @@ def test_main_reads_config_file(monkeypatch, capsys, config_writes_allowed):
u'[ghcloneall]\n'
u'github_user = mgedmin\n'
u'github_org = gtimelog\n'
+ u'github_token = UNITTEST\n'
u'gists = False\n'
u'pattern = *.vim\n'
u'include_forks = True\n'
|
clone private repositories
In #6 you wrote...
> No, I'm using anonymous access. This means, of course, that ghcloneall is only suitable for cloning open source repositories that are publicly available.
1. oh no!
2. how does this go together with the optional argument "--include-private include private repositories (default)"? (see readme)
|
0.0
|
5dbe065908a4baaf7067b61e77a02e30e6151c77
|
[
"tests.py::test_RepoWrangler_auth",
"tests.py::test_main_init_org_token"
] |
[
"tests.py::test_get_json_and_links",
"tests.py::test_get_json_and_links_failure",
"tests.py::test_get_github_list",
"tests.py::test_Progress",
"tests.py::test_Progress_no_output_after_finish",
"tests.py::test_Progress_progress",
"tests.py::test_Progress_context_manager",
"tests.py::test_Progress_item_details",
"tests.py::test_Progress_item_failure",
"tests.py::test_Progress_item_finished",
"tests.py::test_Progress_item_finished_and_hidden",
"tests.py::test_Progress_item_once_hidden_stays_hidden",
"tests.py::test_Progress_extra_info",
"tests.py::test_Progress_error_info",
"tests.py::test_Progress_extra_info_but_not_really",
"tests.py::test_Progress_extra_info_multiple_lines",
"tests.py::test_Progress_extra_info_not_last_item",
"tests.py::test_Progress_extra_info_not_last_item_redraws_all_below",
"tests.py::test_Repo",
"tests.py::test_RepoWrangler_list_gists",
"tests.py::test_RepoWrangler_list_gists_filtering",
"tests.py::test_RepoWrangler_list_repos_for_user",
"tests.py::test_RepoWrangler_list_repos_for_org",
"tests.py::test_RepoWrangler_list_repos_filter_by_name",
"tests.py::test_RepoWrangler_list_repos_filter_by_status",
"tests.py::test_RepoWrangler_list_repos_progress_bar",
"tests.py::test_RepoWrangler_list_repos_missing_arguments",
"tests.py::test_RepoWrangler_repo_task",
"tests.py::test_RepoTask_run_updates",
"tests.py::test_RepoTask_run_handles_errors",
"tests.py::test_RepoTask_run_in_quiet_mode",
"tests.py::test_RepoTask_aborted",
"tests.py::test_RepoTask_verify",
"tests.py::test_RepoTask_verify_unknown_files",
"tests.py::test_RepoTask_branch_name",
"tests.py::test_RepoTask_call_status_handling",
"tests.py::test_RepoTask_call_error_handling",
"tests.py::test_RepoTask_call_error_handling_verbose",
"tests.py::test_RepoTask_check_call_status_handling",
"tests.py::test_RepoTask_check_call_output_is_shown",
"tests.py::test_RepoTask_check_call_status_and_output",
"tests.py::test_RepoTask_check_output_error_handling",
"tests.py::test_RepoTask_check_output_stderr_without_rc",
"tests.py::test_RepoTask_get_current_branch",
"tests.py::test_RepoTask_get_remote_url",
"tests.py::test_SequentialJobQueue",
"tests.py::test_ConcurrentJobQueue",
"tests.py::test_ConcurrentJobQueue_can_be_interrupted",
"tests.py::test_main_version",
"tests.py::test_main_help",
"tests.py::test_main_keyboard_interrupt",
"tests.py::test_main_missing_args",
"tests.py::test_main_conflicting_args",
"tests.py::test_main_no_org_gists",
"tests.py::test_main_run_error_handling",
"tests.py::test_main_run",
"tests.py::test_main_run_start_from",
"tests.py::test_main_run_gists",
"tests.py::test_main_init_dry_run",
"tests.py::test_main_init",
"tests.py::test_main_init_gists",
"tests.py::test_main_init_org",
"tests.py::test_main_init_filter_flags",
"tests.py::test_main_reads_config_file"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-24 06:36:38+00:00
|
mit
| 3,877 |
|
mggg__GerryChain-289
|
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 12d5575..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-# Config file for automatic testing at travis-ci.org
-
-language: python
- - "2.7"
- - "3.6"
-sudo: required
-before_install:
- - sudo apt-get update
-install:
- - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
- wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O miniconda.sh;
- else
- wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
- fi
- - bash miniconda.sh -b -p $HOME/miniconda
- - export PATH="$HOME/miniconda/bin:$PATH"
- - hash -r
- - conda config --set always_yes yes
- - conda update -q conda
- - conda install -n conda-build
- - conda install anaconda-client
- - conda install pytest-cov
- - pip install coveralls
- - conda build .
- - conda install --use-local gerrychain
- - conda info -a
-script:
- - pytest -v --cov=gerrychain tests
-after_success:
- - coveralls
diff --git a/gerrychain/partition/assignment.py b/gerrychain/partition/assignment.py
index e1c4a43..fd4f967 100644
--- a/gerrychain/partition/assignment.py
+++ b/gerrychain/partition/assignment.py
@@ -1,11 +1,13 @@
from collections import defaultdict
+from itertools import chain, repeat
+from collections.abc import Mapping
import pandas
from ..updaters.flows import flows_from_changes
-class Assignment:
+class Assignment(Mapping):
"""An assignment of nodes into parts.
The goal of :class:`Assignment` is to provide an interface that mirrors a
@@ -16,20 +18,21 @@ class Assignment:
``{part: <frozenset of nodes in part>}``.
"""
- def __init__(self, parts: dict):
+ def __init__(self, parts, validate=True):
+ if validate:
+ number_of_keys = sum(len(keys) for keys in parts.values())
+ number_of_unique_keys = len(set().union(*parts.values()))
+ if number_of_keys != number_of_unique_keys:
+ raise ValueError("Keys must have unique assignments.")
+ if not all(isinstance(keys, frozenset) for keys in parts.values()):
+ raise TypeError("Level sets must be frozensets")
self.parts = parts
- @classmethod
- def from_dict(cls, assignment):
- """Create an Assignment from a dictionary. This is probably the method you want
- to use to create a new assignment.
+ def __iter__(self):
+ return self.keys()
- This also works for pandas Series.
- """
- parts = {
- part: frozenset(nodes) for part, nodes in level_sets(assignment).items()
- }
- return cls(parts)
+ def __len__(self):
+ return sum(len(keys) for keys in self.parts.values())
def __getitem__(self, node):
for part, nodes in self.parts.items():
@@ -41,9 +44,9 @@ class Assignment:
"""Returns a copy of the assignment.
Does not duplicate the frozensets of nodes, just the parts dictionary.
"""
- return Assignment(self.parts.copy())
+ return Assignment(self.parts.copy(), validate=False)
- def update(self, mapping: dict):
+ def update(self, mapping):
"""Update the assignment for some nodes using the given mapping.
"""
flows = flows_from_changes(self, mapping)
@@ -59,6 +62,14 @@ class Assignment:
for node in nodes:
yield (node, part)
+ def keys(self):
+ return chain(*self.parts.values())
+
+ def values(self):
+ return chain(
+ *(repeat(value, times=len(keys)) for value, keys in self.parts.items())
+ )
+
def update_parts(self, new_parts):
"""Update some parts of the assignment. Does not check that every node is
still assigned to a part.
@@ -69,12 +80,6 @@ class Assignment:
for part, nodes in new_parts.items():
self.parts[part] = frozenset(nodes)
- def get(self, key, default=None):
- try:
- return self[key]
- except KeyError:
- return default
-
def to_series(self):
"""Convert the assignment to a :class:`pandas.Series`."""
groups = [
@@ -83,10 +88,21 @@ class Assignment:
return pandas.concat(groups)
def to_dict(self):
- """Convert the assignment to a {node: part} dictionary.
+ """Convert the assignment to a ``{node: part}`` dictionary.
This is expensive and should be used rarely."""
return {node: part for part, nodes in self.parts.items() for node in nodes}
+ @classmethod
+ def from_dict(cls, assignment):
+ """Create an :class:`Assignment` from a dictionary. This is probably the method you want
+ to use to create a new assignment.
+
+ This also works for :class:`pandas.Series`.
+ """
+ parts = {part: frozenset(keys) for part, keys in level_sets(assignment).items()}
+
+ return cls(parts)
+
def get_assignment(assignment, graph=None):
if isinstance(assignment, str):
diff --git a/gerrychain/partition/partition.py b/gerrychain/partition/partition.py
index b9a9e5d..f263e5e 100644
--- a/gerrychain/partition/partition.py
+++ b/gerrychain/partition/partition.py
@@ -41,8 +41,8 @@ class Partition:
self.assignment = get_assignment(assignment, graph)
- if not self.validate_assignment():
- raise NameError("Graph's nodes' names do not match the Assignment's geo units' names.")
+ if set(self.assignment) != set(graph):
+ raise KeyError("The graph's node labels do not match the Assignment's keys")
if updaters is None:
updaters = dict()
@@ -54,14 +54,6 @@ class Partition:
self.flows = None
self.edge_flows = None
- def validate_assignment(self):
- node_names = set(self.graph.nodes)
- if len(node_names) != sum(len(dist) for dist in self.assignment.parts.values()):
- return False
-
- assgn_names = set(name for dist in self.assignment.parts.values() for name in dist)
- return node_names == assgn_names
-
def _from_parent(self, parent, flips):
self.parent = parent
self.flips = flips
|
mggg/GerryChain
|
bf11fe51ae5237e9b889b754b80896edaef998d7
|
diff --git a/tests/partition/test_assignment.py b/tests/partition/test_assignment.py
index 334cbe1..bdbe7e5 100644
--- a/tests/partition/test_assignment.py
+++ b/tests/partition/test_assignment.py
@@ -2,6 +2,7 @@ import pandas
import pytest
from gerrychain.partition.assignment import Assignment, get_assignment
+from collections.abc import Mapping
@pytest.fixture
@@ -43,6 +44,50 @@ class TestAssignment:
assignment.update_parts({2: {2}, 3: {3}})
assert assignment.to_dict() == {1: 1, 2: 2, 3: 3}
+ def test_implements_Mapping_abc(self, assignment):
+ # __iter__
+ assert list(assignment) == [1, 2, 3]
+
+ # __contains__
+ for i in [1, 2, 3]:
+ assert i in assignment
+
+ # __len__
+ assert len(assignment) == 3
+
+ # __getitem__
+ assert assignment[1] == 1
+ assert assignment[3] == 2
+
+ # keys()
+ keys = list(assignment.keys())
+ assert len(keys) == 3
+ assert set(keys) == {1, 2, 3}
+
+ # values()
+ values = list(assignment.values())
+ assert len(values) == 3
+ assert set(values) == {1, 2}
+
+ # items()
+ items = list(assignment.items())
+ assert len(items) == 3
+ assert set(items) == {(1, 1), (2, 2), (3, 2)}
+
+ # __eq__
+ assert assignment == {1: 1, 2: 2, 3: 2}
+
+ assert isinstance(assignment, Mapping)
+
+ def test_assignment_raises_if_a_key_has_two_assignments(self):
+ with pytest.raises(ValueError):
+ Assignment({"one": {1, 2, 3}, "two": {1, 4, 5}})
+
+ def test_assignment_can_be_instantiated_from_series(self):
+ series = pandas.Series([1, 2, 1, 2], index=[1, 2, 3, 4])
+ assignment = Assignment.from_dict(series)
+ assert assignment == {1: 1, 2: 2, 3: 1, 4: 2}
+
def test_get_assignment_accepts_assignment(assignment):
created = assignment
@@ -53,3 +98,10 @@ def test_get_assignment_accepts_assignment(assignment):
def test_get_assignment_raises_typeerror_for_unexpected_input():
with pytest.raises(TypeError):
get_assignment(None)
+
+
+def test_get_assignment_with_series():
+ series = pandas.Series([1, 2, 1, 2], index=[1, 2, 3, 4])
+ assignment = get_assignment(series)
+ assert isinstance(assignment, Assignment)
+ assert assignment == {1: 1, 2: 2, 3: 1, 4: 2}
diff --git a/tests/partition/test_partition.py b/tests/partition/test_partition.py
index ddfb2c8..4695043 100644
--- a/tests/partition/test_partition.py
+++ b/tests/partition/test_partition.py
@@ -24,22 +24,18 @@ def test_Partition_can_be_flipped(example_partition):
assert new_partition.assignment[1] == 2
-def test_Partition_misnamed_vertices_raises_namerror():
+def test_Partition_misnamed_vertices_raises_keyerror():
graph = networkx.complete_graph(3)
- assignment = {'0': 1, '1': 1, '2': 2}
- with pytest.raises(NameError):
- partition = Partition(graph, assignment, {"cut_edges": cut_edges})
+ assignment = {"0": 1, "1": 1, "2": 2}
+ with pytest.raises(KeyError):
+ Partition(graph, assignment, {"cut_edges": cut_edges})
-def test_Partition_unlabelled_vertices_raises_namerror():
+
+def test_Partition_unlabelled_vertices_raises_keyerror():
graph = networkx.complete_graph(3)
assignment = {0: 1, 2: 2}
- with pytest.raises(NameError):
- partition = Partition(graph, assignment, {"cut_edges": cut_edges})
-
-
-def test_Partition_validate_vertex_in_unique_district(example_partition):
- example_partition.assignment.parts[1] = frozenset([0,1,2])
- assert example_partition.validate_assignment() == False
+ with pytest.raises(KeyError):
+ Partition(graph, assignment, {"cut_edges": cut_edges})
def test_Partition_knows_cut_edges_K3(example_partition):
|
Should Assignment implement collections.abc.Mapping?
Creating this in case we want to continue with the conversation started in #280 .
Abstractly, I feel like the answer is yes, but in practice this might mean adding a bunch of methods that we'll never use...
|
0.0
|
bf11fe51ae5237e9b889b754b80896edaef998d7
|
[
"tests/partition/test_assignment.py::TestAssignment::test_implements_Mapping_abc",
"tests/partition/test_assignment.py::TestAssignment::test_assignment_raises_if_a_key_has_two_assignments",
"tests/partition/test_assignment.py::TestAssignment::test_assignment_can_be_instantiated_from_series",
"tests/partition/test_assignment.py::test_get_assignment_with_series",
"tests/partition/test_partition.py::test_Partition_misnamed_vertices_raises_keyerror",
"tests/partition/test_partition.py::test_Partition_unlabelled_vertices_raises_keyerror"
] |
[
"tests/partition/test_assignment.py::TestAssignment::test_assignment_can_be_updated",
"tests/partition/test_assignment.py::TestAssignment::test_assignment_copy_does_not_copy_the_node_sets",
"tests/partition/test_assignment.py::TestAssignment::test_to_series",
"tests/partition/test_assignment.py::TestAssignment::test_to_dict",
"tests/partition/test_assignment.py::TestAssignment::test_has_get_method_like_a_dict",
"tests/partition/test_assignment.py::TestAssignment::test_raises_keyerror_for_missing_nodes",
"tests/partition/test_assignment.py::TestAssignment::test_can_update_parts",
"tests/partition/test_assignment.py::test_get_assignment_accepts_assignment",
"tests/partition/test_assignment.py::test_get_assignment_raises_typeerror_for_unexpected_input",
"tests/partition/test_partition.py::test_Partition_can_be_flipped",
"tests/partition/test_partition.py::test_Partition_knows_cut_edges_K3",
"tests/partition/test_partition.py::test_propose_random_flip_proposes_a_partition",
"tests/partition/test_partition.py::test_Partition_parts_is_a_dictionary_of_parts_to_nodes",
"tests/partition/test_partition.py::test_Partition_has_subgraphs",
"tests/partition/test_partition.py::test_Partition_caches_subgraphs",
"tests/partition/test_partition.py::test_partition_implements_getattr_for_updater_access",
"tests/partition/test_partition.py::test_can_be_created_from_a_districtr_file",
"tests/partition/test_partition.py::test_from_districtr_plan_raises_if_id_column_missing"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-01 20:32:09+00:00
|
bsd-3-clause
| 3,878 |
|
mggg__GerryChain-316
|
diff --git a/gerrychain/constraints/validity.py b/gerrychain/constraints/validity.py
index 4a1f663..cf6e352 100644
--- a/gerrychain/constraints/validity.py
+++ b/gerrychain/constraints/validity.py
@@ -1,5 +1,6 @@
from ..updaters import CountySplit
from .bounds import Bounds
+import numpy
class Validator:
@@ -31,6 +32,10 @@ class Validator:
# check each constraint function and fail when a constraint test fails
for constraint in self.constraints:
is_valid = constraint(partition)
+ # Coerce NumPy booleans
+ if isinstance(is_valid, numpy.bool_):
+ is_valid = bool(is_valid)
+
if is_valid is False:
return False
elif is_valid is True:
diff --git a/gerrychain/partition/partition.py b/gerrychain/partition/partition.py
index 83c4b9a..b893aee 100644
--- a/gerrychain/partition/partition.py
+++ b/gerrychain/partition/partition.py
@@ -6,6 +6,7 @@ from ..graph import Graph
from ..updaters import compute_edge_flows, flows_from_changes
from .assignment import get_assignment
from .subgraphs import SubgraphView
+from ..updaters import cut_edges
class Partition:
@@ -15,7 +16,9 @@ class Partition:
aggregations and calculations that we want to optimize.
"""
- default_updaters = {}
+ default_updaters = {
+ "cut_edges": cut_edges
+ }
def __init__(
self, graph=None, assignment=None, updaters=None, parent=None, flips=None
@@ -46,6 +49,7 @@ class Partition:
if updaters is None:
updaters = dict()
+
self.updaters = self.default_updaters.copy()
self.updaters.update(updaters)
diff --git a/gerrychain/tree.py b/gerrychain/tree.py
index 60e0f2f..c56761a 100644
--- a/gerrychain/tree.py
+++ b/gerrychain/tree.py
@@ -202,20 +202,31 @@ def recursive_tree_part(
"""
flips = {}
remaining_nodes = set(graph.nodes)
+ # We keep a running tally of deviation from ``epsilon`` at each partition
+ # and use it to tighten the population constraints on a per-partition
+ # basis such that every partition, including the last partition, has a
+ # population within +/-``epsilon`` of the target population.
+ # For instance, if district n's population exceeds the target by 2%
+ # with a +/-2% epsilon, then district n+1's population should be between
+ # 98% of the target population and the target population.
+ debt = 0
for part in parts[:-1]:
+ min_pop = max(pop_target * (1 - epsilon), pop_target * (1 - epsilon) - debt)
+ max_pop = min(pop_target * (1 + epsilon), pop_target * (1 + epsilon) - debt)
nodes = method(
graph.subgraph(remaining_nodes),
pop_col=pop_col,
- pop_target=pop_target,
- epsilon=epsilon,
+ pop_target=(min_pop + max_pop) / 2,
+ epsilon=(max_pop - min_pop) / (2 * pop_target),
node_repeats=node_repeats,
)
+ part_pop = 0
for node in nodes:
flips[node] = part
- # update pop_target?
-
+ part_pop += graph.nodes[node][pop_col]
+ debt += part_pop - pop_target
remaining_nodes -= nodes
# All of the remaining nodes go in the last part
|
mggg/GerryChain
|
0ac0c6ac09c618c6c834223c5aca6d8a192d7b75
|
diff --git a/tests/constraints/test_validity.py b/tests/constraints/test_validity.py
index d4061a0..006b563 100644
--- a/tests/constraints/test_validity.py
+++ b/tests/constraints/test_validity.py
@@ -1,6 +1,7 @@
from unittest.mock import MagicMock
import networkx as nx
+import numpy
import pytest
from gerrychain.constraints import (SelfConfiguringLowerBound, Validator,
@@ -140,6 +141,17 @@ def test_validator_raises_TypeError_if_constraint_returns_non_boolean():
validator(mock_partition)
+def test_validator_accepts_numpy_booleans():
+ mock_partition = MagicMock()
+
+ mock_constraint = MagicMock()
+ mock_constraint.return_value = numpy.bool_(True)
+ mock_constraint.__name__ = "mock_constraint"
+
+ is_valid = Validator([mock_constraint])
+ assert is_valid(mock_partition)
+
+
def test_no_vanishing_districts_works():
parent = MagicMock()
parent.assignment = get_assignment({1: 1, 2: 2}, MagicMock())
diff --git a/tests/partition/test_partition.py b/tests/partition/test_partition.py
index 6b44a41..56b0b16 100644
--- a/tests/partition/test_partition.py
+++ b/tests/partition/test_partition.py
@@ -137,6 +137,16 @@ def districtr_plan_file():
json.dump(districtr_plan, f)
yield filename
-
def test_repr(example_partition):
assert repr(example_partition) == "<Partition [2 parts]>"
+
+def test_partition_has_default_updaters(example_partition):
+ partition = example_partition
+ default_updaters = partition.default_updaters
+ should_have_updaters = {
+ "cut_edges": cut_edges
+ }
+
+ for updater in should_have_updaters:
+ assert default_updaters.get(updater, None) is not None
+ assert should_have_updaters[updater](partition) == partition[updater]
diff --git a/tests/test_tree.py b/tests/test_tree.py
index 1cab047..1acefaf 100644
--- a/tests/test_tree.py
+++ b/tests/test_tree.py
@@ -11,6 +11,7 @@ from gerrychain.tree import (
bipartition_tree,
random_spanning_tree,
find_balanced_edge_cuts,
+ recursive_tree_part,
PopulatedGraph,
)
from gerrychain.updaters import Tally, cut_edges
@@ -32,6 +33,16 @@ def partition_with_pop(graph_with_pop):
)
[email protected]
+def twelve_by_twelve_with_pop():
+ xy_grid = networkx.grid_graph([12, 12])
+ nodes = {node: node[1] + 12 * node[0] for node in xy_grid}
+ grid = networkx.relabel_nodes(xy_grid, nodes)
+ for node in grid:
+ grid.nodes[node]["pop"] = 1
+ return grid
+
+
def test_bipartition_tree_returns_a_subset_of_nodes(graph_with_pop):
ideal_pop = sum(graph_with_pop.nodes[node]["pop"] for node in graph_with_pop) / 2
result = bipartition_tree(graph_with_pop, "pop", ideal_pop, 0.25, 10)
@@ -48,6 +59,19 @@ def test_bipartition_tree_returns_within_epsilon_of_target_pop(graph_with_pop):
assert abs(part_pop - ideal_pop) / ideal_pop < epsilon
+def test_recursive_tree_part_returns_within_epsilon_of_target_pop(twelve_by_twelve_with_pop):
+ n_districts = 7 # 144/7 ≈ 20.5 nodes/subgraph (1 person/node)
+ ideal_pop = (sum(twelve_by_twelve_with_pop.nodes[node]["pop"]
+ for node in twelve_by_twelve_with_pop)) / n_districts
+ epsilon = 0.05
+ result = recursive_tree_part(twelve_by_twelve_with_pop, range(n_districts),
+ ideal_pop, "pop", epsilon)
+ partition = Partition(twelve_by_twelve_with_pop, result,
+ updaters={"pop": Tally("pop")})
+ return all(abs(part_pop - ideal_pop) / ideal_pop < epsilon
+ for part_pop in partition['pop'].values())
+
+
def test_random_spanning_tree_returns_tree_with_pop_attribute(graph_with_pop):
tree = random_spanning_tree(graph_with_pop)
assert networkx.is_tree(tree)
|
Four of six default proposal functions require a `cut_edges` updater
The `propose_chunk_flip`, `propose_random_flip`, `slow_reversible_propose_bi`, and `slow_reversible_propose` proposal functions all require that a `cut_edges` updater is present on the `partition` parameter passed to the function.
Returns a `KeyError`.
|
0.0
|
0ac0c6ac09c618c6c834223c5aca6d8a192d7b75
|
[
"tests/constraints/test_validity.py::test_validator_accepts_numpy_booleans",
"tests/partition/test_partition.py::test_partition_has_default_updaters"
] |
[
"tests/constraints/test_validity.py::test_contiguous_with_contiguity_no_flips_is_true",
"tests/constraints/test_validity.py::test_contiguous_with_contiguity_flips_is_true",
"tests/constraints/test_validity.py::test_discontiguous_with_contiguous_no_flips_is_false",
"tests/constraints/test_validity.py::test_discontiguous_with_single_flip_contiguous_no_flips_is_false",
"tests/constraints/test_validity.py::test_discontiguous_with_contiguous_bfs_no_flips_is_false",
"tests/constraints/test_validity.py::test_discontiguous_with_contiguous_flips_is_false",
"tests/constraints/test_validity.py::test_discontiguous_with_contiguous_bfs_flips_is_false",
"tests/constraints/test_validity.py::test_districts_within_tolerance_returns_false_if_districts_are_not_within_tolerance",
"tests/constraints/test_validity.py::test_districts_within_tolerance_returns_true_if_districts_are_within_tolerance",
"tests/constraints/test_validity.py::test_self_configuring_lower_bound_always_allows_the_first_argument_it_gets",
"tests/constraints/test_validity.py::test_validator_raises_TypeError_if_constraint_returns_non_boolean",
"tests/constraints/test_validity.py::test_no_vanishing_districts_works",
"tests/partition/test_partition.py::test_Partition_can_be_flipped",
"tests/partition/test_partition.py::test_Partition_misnamed_vertices_raises_keyerror",
"tests/partition/test_partition.py::test_Partition_unlabelled_vertices_raises_keyerror",
"tests/partition/test_partition.py::test_Partition_knows_cut_edges_K3",
"tests/partition/test_partition.py::test_propose_random_flip_proposes_a_partition",
"tests/partition/test_partition.py::test_Partition_parts_is_a_dictionary_of_parts_to_nodes",
"tests/partition/test_partition.py::test_Partition_has_subgraphs",
"tests/partition/test_partition.py::test_Partition_caches_subgraphs",
"tests/partition/test_partition.py::test_partition_implements_getattr_for_updater_access",
"tests/partition/test_partition.py::test_can_be_created_from_a_districtr_file",
"tests/partition/test_partition.py::test_from_districtr_plan_raises_if_id_column_missing",
"tests/partition/test_partition.py::test_repr",
"tests/test_tree.py::test_bipartition_tree_returns_a_subset_of_nodes",
"tests/test_tree.py::test_bipartition_tree_returns_within_epsilon_of_target_pop",
"tests/test_tree.py::test_recursive_tree_part_returns_within_epsilon_of_target_pop",
"tests/test_tree.py::test_random_spanning_tree_returns_tree_with_pop_attribute",
"tests/test_tree.py::test_bipartition_tree_returns_a_tree",
"tests/test_tree.py::test_recom_works_as_a_proposal",
"tests/test_tree.py::test_find_balanced_cuts"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-06-30 21:49:55+00:00
|
mit
| 3,879 |
|
mgxd__etelemetry-client-4
|
diff --git a/etelemetry/client.py b/etelemetry/client.py
index 04094ba..0f8ff0c 100644
--- a/etelemetry/client.py
+++ b/etelemetry/client.py
@@ -1,18 +1,26 @@
-from requests import request, ConnectionError
+from requests import request, ConnectionError, ReadTimeout
from .config import ET_PROJECTS
def _etrequest(endpoint, method="get", **kwargs):
+ if kwargs.get('timeout') is None:
+ kwargs['timeout'] = 5
try:
res = request(method, endpoint, **kwargs)
except ConnectionError:
raise RuntimeError("Connection to server could not be made")
+ except ReadTimeout:
+ raise RuntimeError(
+ "No response from server in {timeout} seconds".format(
+ timeout=kwargs.get('timeout')
+ )
+ )
res.raise_for_status()
return res
-def get_project(repo):
+def get_project(repo, **rargs):
"""
Fetch latest version from server.
@@ -20,6 +28,8 @@ def get_project(repo):
==========
repo : str
GitHub repository as <owner>/<project>
+ **rargs
+ Request keyword arguments
Returns
=======
@@ -28,5 +38,5 @@ def get_project(repo):
"""
if "/" not in repo:
raise ValueError("Invalid repository")
- res = _etrequest(ET_PROJECTS + repo)
+ res = _etrequest(ET_PROJECTS.format(repo=repo), **rargs)
return res.json(encoding="utf-8")
diff --git a/etelemetry/config/__init__.py b/etelemetry/config/__init__.py
index 0e1f954..1f2a1a1 100644
--- a/etelemetry/config/__init__.py
+++ b/etelemetry/config/__init__.py
@@ -1,12 +1,13 @@
-hostname = "rig.mit.edu/"
+hostname = "rig.mit.edu"
https = True
if https is True:
- ET_ROOT = "https://" + hostname
+ prefix = "https"
else:
- ET_ROOT = "http://" + hostname
+ prefix = "http"
-root_endpoint = "et/"
-ET_ROOT += root_endpoint
+ET_ROOT = "{prefix}://{hostname}/et/".format(
+ prefix=prefix, hostname=hostname
+)
-ET_PROJECTS = ET_ROOT + "projects/"
+ET_PROJECTS = ET_ROOT + "projects/{repo}"
|
mgxd/etelemetry-client
|
a46b922ba071f06a1f978d9d196a03b8c49f63f8
|
diff --git a/etelemetry/tests/test_client.py b/etelemetry/tests/test_client.py
index f2368d4..9bf1790 100644
--- a/etelemetry/tests/test_client.py
+++ b/etelemetry/tests/test_client.py
@@ -4,11 +4,16 @@ from ..config import ET_ROOT
from ..client import _etrequest, get_project
-def test_request():
+def test_etrequest():
endpoint = "http://fakeendpoint/"
- with pytest.raises(Exception):
+ with pytest.raises(RuntimeError):
_etrequest(endpoint, method="get")
assert _etrequest(ET_ROOT)
+ # ensure timeout is working properly
+ endpoint = "https://google.com"
+ with pytest.raises(RuntimeError):
+ _etrequest(endpoint, timeout=0.01)
+ assert _etrequest(endpoint)
def test_get_project():
|
add a timeout variable
when http calls misbehave because of dns or routing, it would be good to return a response within a specific period of time.
|
0.0
|
a46b922ba071f06a1f978d9d196a03b8c49f63f8
|
[
"etelemetry/tests/test_client.py::test_etrequest"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-09-16 14:45:37+00:00
|
apache-2.0
| 3,880 |
|
mhe__pynrrd-103
|
diff --git a/nrrd/writer.py b/nrrd/writer.py
index b9187f7..24d78b0 100644
--- a/nrrd/writer.py
+++ b/nrrd/writer.py
@@ -180,6 +180,11 @@ def write(filename, data, header=None, detached_header=False, relative_data_path
if 'encoding' not in header:
header['encoding'] = 'gzip'
+ # If 'datafile' is specified, then we rename to 'data file'
+ # The standard seems to advocate for 'data file' OVER 'datafile'
+ if 'datafile' in header:
+ header['data file'] = header.pop('datafile')
+
# A bit of magic in handling options here.
# If *.nhdr filename provided, this overrides `detached_header=False`
# If *.nrrd filename provided AND detached_header=True, separate header and data files written.
@@ -188,7 +193,9 @@ def write(filename, data, header=None, detached_header=False, relative_data_path
if filename.endswith('.nhdr'):
detached_header = True
- if 'data file' not in header:
+ # TODO This will cause issues for relative data files because it will not save in the correct spot
+ data_filename = header.get('datafile', None)
+ if not data_filename:
# Get the base filename without the extension
base_filename = os.path.splitext(filename)[0]
@@ -207,9 +214,6 @@ def write(filename, data, header=None, detached_header=False, relative_data_path
header['data file'] = os.path.basename(data_filename) \
if relative_data_path else os.path.abspath(data_filename)
- else:
- # TODO This will cause issues for relative data files because it will not save in the correct spot
- data_filename = header['data file']
elif filename.endswith('.nrrd') and detached_header:
data_filename = filename
header['data file'] = os.path.basename(data_filename) \
|
mhe/pynrrd
|
b500fbce20b4c7126587714b4a3bcefe9fa1d645
|
diff --git a/nrrd/tests/test_writing.py b/nrrd/tests/test_writing.py
index 535b75d..7c0c1c7 100644
--- a/nrrd/tests/test_writing.py
+++ b/nrrd/tests/test_writing.py
@@ -310,6 +310,26 @@ class TestWritingFunctions(object):
self.assertTrue('space units: "mm" "cm" "in"' in lines)
self.assertTrue('labels: "X" "Y" "f(log(X, 10), Y)"' in lines)
+ def test_write_detached_datafile_check(self):
+ output_filename = os.path.join(self.temp_write_dir, 'testfile_detached.nhdr')
+
+ nrrd.write(output_filename, self.data_input, {'datafile': 'testfile_detached.gz'}, detached_header=True,
+ index_order=self.index_order)
+
+ # Read back the same file
+ data, header = nrrd.read(output_filename, index_order=self.index_order)
+ self.assertEqual(header['data file'], 'testfile_detached.raw.gz')
+
+ def test_write_detached_datafile_check2(self):
+ output_filename = os.path.join(self.temp_write_dir, 'testfile_detached.nhdr')
+
+ nrrd.write(output_filename, self.data_input, {'data file': 'testfile_detached.gz'}, detached_header=True,
+ index_order=self.index_order)
+
+ # Read back the same file
+ data, header = nrrd.read(output_filename, index_order=self.index_order)
+ self.assertEqual(header['data file'], 'testfile_detached.raw.gz')
+
class TestWritingFunctionsFortran(TestWritingFunctions, unittest.TestCase):
index_order = 'F'
|
Need to account for both 'datafile' and 'data file'
We should account for both 'data file' and 'datafile' in [Ln 190](https://github.com/mhe/pynrrd/blob/master/nrrd/writer.py#L190) and [Ln 211](https://github.com/mhe/pynrrd/blob/master/nrrd/writer.py#L211) just like:
https://github.com/mhe/pynrrd/blob/master/nrrd/reader.py#L354
|
0.0
|
b500fbce20b4c7126587714b4a3bcefe9fa1d645
|
[
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_datafile_check",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_datafile_check2",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_datafile_check",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_datafile_check2"
] |
[
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_invalid_custom_field",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_invalid_index_order",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_quoted_string_list_header",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_remove_endianness",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_unsupported_encoding",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_ascii_1d",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_ascii_2d",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_ascii_3d",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_bzip2",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_bzip2_level1",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_custom_fields_with_custom_field_map",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_custom_fields_without_custom_field_map",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_default_header",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_ascii",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_bz2",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_gz",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_raw",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_raw_as_nrrd",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_raw_odd_extension",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_fake_encoding",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_gz",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_gz_level1",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_raw",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_invalid_custom_field",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_invalid_index_order",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_quoted_string_list_header",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_remove_endianness",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_unsupported_encoding",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_ascii_1d",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_ascii_2d",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_ascii_3d",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_bzip2",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_bzip2_level1",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_custom_fields_with_custom_field_map",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_custom_fields_without_custom_field_map",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_default_header",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_ascii",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_bz2",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_gz",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_raw",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_raw_as_nrrd",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_raw_odd_extension",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_fake_encoding",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_gz",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_gz_level1",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_raw"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-11-16 17:18:04+00:00
|
mit
| 3,881 |
|
mhe__pynrrd-104
|
diff --git a/nrrd/writer.py b/nrrd/writer.py
index 24d78b0..f459451 100644
--- a/nrrd/writer.py
+++ b/nrrd/writer.py
@@ -109,9 +109,9 @@ def write(filename, data, header=None, detached_header=False, relative_data_path
.. note::
The following fields are automatically generated based on the :obj:`data` parameter ignoring these values
- in the :obj:`header`: 'type', 'endian', 'dimension', 'sizes'. In addition, the generated fields will be
- added to the given :obj:`header`. Thus, one can check the generated fields by viewing the passed
- :obj:`header`.
+ in the :obj:`header`: 'type', 'endian', 'dimension', 'sizes', and 'data file'. In addition, the generated
+ fields will be added to the given :obj:`header`. Thus, one can check the generated fields by viewing the
+ passed :obj:`header`.
.. note::
The default encoding field used if not specified in :obj:`header` is 'gzip'.
@@ -129,8 +129,10 @@ def write(filename, data, header=None, detached_header=False, relative_data_path
Filename of the NRRD file
data : :class:`numpy.ndarray`
Data to save to the NRRD file
- detached_header : :obj:`bool`, optional
- Whether the header and data should be saved in separate files. Defaults to :obj:`False`
+ detached_header : :obj:`bool` or :obj:`str`, optional
+ Whether the header and data should be saved in separate files. Defaults to :obj:`False`. If a :obj:`str` is
+ given this specifies the path to the datafile. This path will ONLY be used if the given filename ends with nhdr
+ (i.e. the file is a header)
relative_data_path : :class:`bool`
Whether the data filename in detached header is saved with a relative path or absolute path.
This parameter is ignored if there is no detached header. Defaults to :obj:`True`
@@ -180,22 +182,24 @@ def write(filename, data, header=None, detached_header=False, relative_data_path
if 'encoding' not in header:
header['encoding'] = 'gzip'
- # If 'datafile' is specified, then we rename to 'data file'
- # The standard seems to advocate for 'data file' OVER 'datafile'
+ # Remove detached data filename from the header
if 'datafile' in header:
- header['data file'] = header.pop('datafile')
+ header.pop('datafile')
+
+ if 'data file' in header:
+ header.pop('data file')
# A bit of magic in handling options here.
# If *.nhdr filename provided, this overrides `detached_header=False`
# If *.nrrd filename provided AND detached_header=True, separate header and data files written.
- # If detached_header=True and data file is present, then write the files separately
# For all other cases, header & data written to same file.
if filename.endswith('.nhdr'):
- detached_header = True
-
- # TODO This will cause issues for relative data files because it will not save in the correct spot
- data_filename = header.get('datafile', None)
- if not data_filename:
+ if isinstance(detached_header, str):
+ # Utilize the detached_header if a string was given as the path
+ # Note: An absolute path is obtained and assumed to be relative to the current path of the running Python
+ # program
+ data_filename = os.path.abspath(detached_header)
+ else:
# Get the base filename without the extension
base_filename = os.path.splitext(filename)[0]
@@ -212,13 +216,17 @@ def write(filename, data, header=None, detached_header=False, relative_data_path
else:
raise NRRDError('Invalid encoding specification while writing NRRD file: %s' % header['encoding'])
- header['data file'] = os.path.basename(data_filename) \
- if relative_data_path else os.path.abspath(data_filename)
+ # Update the data file field in the header with the path of the detached data
+ # TODO This will cause problems when the user specifies a relative data path and gives a custom path OUTSIDE
+ # of the current directory.
+ header['data file'] = os.path.basename(data_filename) \
+ if relative_data_path else os.path.abspath(data_filename)
+ detached_header = True
elif filename.endswith('.nrrd') and detached_header:
data_filename = filename
+ filename = '%s.nhdr' % os.path.splitext(filename)[0]
header['data file'] = os.path.basename(data_filename) \
if relative_data_path else os.path.abspath(data_filename)
- filename = '%s.nhdr' % os.path.splitext(filename)[0]
else:
# Write header & data as one file
data_filename = filename
|
mhe/pynrrd
|
6faf838d36094955adafc6c60c8e115dff2f2939
|
diff --git a/nrrd/tests/test_writing.py b/nrrd/tests/test_writing.py
index 7c0c1c7..7da1019 100644
--- a/nrrd/tests/test_writing.py
+++ b/nrrd/tests/test_writing.py
@@ -313,7 +313,7 @@ class TestWritingFunctions(object):
def test_write_detached_datafile_check(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_detached.nhdr')
- nrrd.write(output_filename, self.data_input, {'datafile': 'testfile_detached.gz'}, detached_header=True,
+ nrrd.write(output_filename, self.data_input, {'datafile': 'testfile_detachedWRONG.gz'}, detached_header=True,
index_order=self.index_order)
# Read back the same file
@@ -323,13 +323,36 @@ class TestWritingFunctions(object):
def test_write_detached_datafile_check2(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_detached.nhdr')
- nrrd.write(output_filename, self.data_input, {'data file': 'testfile_detached.gz'}, detached_header=True,
+ nrrd.write(output_filename, self.data_input, {'data file': 'testfile_detachedWRONG.gz'}, detached_header=True,
index_order=self.index_order)
# Read back the same file
data, header = nrrd.read(output_filename, index_order=self.index_order)
self.assertEqual(header['data file'], 'testfile_detached.raw.gz')
+ def test_write_detached_datafile_custom_name(self):
+ output_filename = os.path.join(self.temp_write_dir, 'testfile_detached.nhdr')
+ # Specify a custom path to write the
+ output_header_filename = os.path.join(self.temp_write_dir, 'testfile_detachedDifferent.gz')
+
+ nrrd.write(output_filename, self.data_input, detached_header=output_header_filename,
+ index_order=self.index_order)
+
+ # Read back the same file
+ data, header = nrrd.read(output_filename, index_order=self.index_order)
+ self.assertEqual(header['data file'], 'testfile_detachedDifferent.gz')
+
+ def test_write_check_remove_datafile(self):
+ output_filename = os.path.join(self.temp_write_dir, 'testfile.nrrd')
+
+ nrrd.write(output_filename, self.data_input, {'data file': 'testfile_detached.gz'}, detached_header=False,
+ index_order=self.index_order)
+
+ # Read back the same file
+ # The 'data file' parameter should be missing since this is NOT a detached file
+ data, header = nrrd.read(output_filename, index_order=self.index_order)
+ self.assertFalse('data file' in header)
+
class TestWritingFunctionsFortran(TestWritingFunctions, unittest.TestCase):
index_order = 'F'
|
Modify 'data file' whenever detached_header=True
We should omit the if-else below:
https://github.com/mhe/pynrrd/blob/master/nrrd/writer.py#L190
Omitting the condition should also address the TODO below:
https://github.com/mhe/pynrrd/blob/master/nrrd/writer.py#L210
I experienced this while modifying an NHDR and saving it as another NHDR. In this case, the modified detached data is not saved where my modified NHDR is.
Correction:
Detached data is saved in the working directory.
|
0.0
|
6faf838d36094955adafc6c60c8e115dff2f2939
|
[
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_check_remove_datafile",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_datafile_custom_name",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_check_remove_datafile",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_datafile_custom_name"
] |
[
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_invalid_custom_field",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_invalid_index_order",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_quoted_string_list_header",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_remove_endianness",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_unsupported_encoding",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_ascii_1d",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_ascii_2d",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_ascii_3d",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_bzip2",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_bzip2_level1",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_custom_fields_with_custom_field_map",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_custom_fields_without_custom_field_map",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_default_header",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_ascii",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_bz2",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_datafile_check",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_datafile_check2",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_gz",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_raw",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_raw_as_nrrd",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_detached_raw_odd_extension",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_fake_encoding",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_gz",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_gz_level1",
"nrrd/tests/test_writing.py::TestWritingFunctionsFortran::test_write_raw",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_invalid_custom_field",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_invalid_index_order",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_quoted_string_list_header",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_remove_endianness",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_unsupported_encoding",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_ascii_1d",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_ascii_2d",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_ascii_3d",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_bzip2",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_bzip2_level1",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_custom_fields_with_custom_field_map",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_custom_fields_without_custom_field_map",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_default_header",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_ascii",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_bz2",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_datafile_check",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_datafile_check2",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_gz",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_raw",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_raw_as_nrrd",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_detached_raw_odd_extension",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_fake_encoding",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_gz",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_gz_level1",
"nrrd/tests/test_writing.py::TestWritingFunctionsC::test_write_raw"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-11-16 18:06:48+00:00
|
mit
| 3,882 |
|
mhe__pynrrd-79
|
diff --git a/nrrd/writer.py b/nrrd/writer.py
index d8c0b1c..0a63f59 100644
--- a/nrrd/writer.py
+++ b/nrrd/writer.py
@@ -93,7 +93,7 @@ def _format_field_value(value, field_type):
raise NRRDError('Invalid field type given: %s' % field_type)
-def write(filename, data, header={}, detached_header=False, custom_field_map=None,
+def write(filename, data, header={}, detached_header=False, relative_data_path=True, custom_field_map=None,
compression_level = 9):
"""Write :class:`numpy.ndarray` to NRRD file
@@ -121,6 +121,9 @@ def write(filename, data, header={}, detached_header=False, custom_field_map=Non
Data to save to the NRRD file
detached_header : :obj:`bool`, optional
Whether the header and data should be saved in separate files. Defaults to :obj:`False`
+ relative_data_path : :class:`bool`
+ Whether the data filename in detached header is saved with a relative path or absolute path.
+ This parameter is ignored if there is no detached header. Defaults to :obj:`True`
custom_field_map : :class:`dict` (:class:`str`, :class:`str`), optional
Dictionary used for parsing custom field types where the key is the custom field name and the value is a
string identifying datatype for the custom field.
@@ -183,12 +186,14 @@ def write(filename, data, header={}, detached_header=False, custom_field_map=Non
else:
raise NRRDError('Invalid encoding specification while writing NRRD file: %s' % header['encoding'])
- header['data file'] = data_filename
+ header['data file'] = os.path.basename(data_filename) \
+ if relative_data_path else os.path.abspath(data_filename)
else:
data_filename = header['data file']
elif filename.endswith('.nrrd') and detached_header:
data_filename = filename
- header['data file'] = data_filename
+ header['data file'] = os.path.basename(data_filename) \
+ if relative_data_path else os.path.abspath(data_filename)
filename = '%s.nhdr' % os.path.splitext(filename)[0]
else:
# Write header & data as one file
|
mhe/pynrrd
|
c1c42d97f74ff2230b50df019d033a010f797918
|
diff --git a/nrrd/tests/test_writing.py b/nrrd/tests/test_writing.py
index 2b8274e..db1b507 100644
--- a/nrrd/tests/test_writing.py
+++ b/nrrd/tests/test_writing.py
@@ -158,7 +158,8 @@ class TestWritingFunctions(unittest.TestCase):
output_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.nhdr')
output_data_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.nrrd')
- nrrd.write(output_data_filename, self.data_input, {u'encoding': 'raw'}, detached_header=True)
+ nrrd.write(output_data_filename, self.data_input, {u'encoding': 'raw'}, detached_header=True,
+ relative_data_path=False)
# Read back the same file
data, header = nrrd.read(output_filename)
@@ -198,7 +199,8 @@ class TestWritingFunctions(unittest.TestCase):
output_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.nhdr')
output_data_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.raw.gz')
- nrrd.write(output_filename, self.data_input, {u'encoding': 'gz'}, detached_header=False)
+ nrrd.write(output_filename, self.data_input, {u'encoding': 'gz'}, detached_header=False,
+ relative_data_path=False)
# Read back the same file
data, header = nrrd.read(output_filename)
@@ -208,7 +210,6 @@ class TestWritingFunctions(unittest.TestCase):
def test_write_detached_bz2(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.nhdr')
- output_data_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.raw.bz2')
nrrd.write(output_filename, self.data_input, {u'encoding': 'bz2'}, detached_header=False)
@@ -216,11 +217,10 @@ class TestWritingFunctions(unittest.TestCase):
data, header = nrrd.read(output_filename)
self.assertEqual(self.expected_data, data.tostring(order='F'))
self.assertEqual(header['encoding'], 'bz2')
- self.assertEqual(header['data file'], output_data_filename)
+ self.assertEqual(header['data file'], 'testfile_detached_raw.raw.bz2')
def test_write_detached_ascii(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.nhdr')
- output_data_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.txt')
nrrd.write(output_filename, self.data_input, {u'encoding': 'txt'}, detached_header=False)
@@ -228,7 +228,7 @@ class TestWritingFunctions(unittest.TestCase):
data, header = nrrd.read(output_filename)
self.assertEqual(self.expected_data, data.tostring(order='F'))
self.assertEqual(header['encoding'], 'txt')
- self.assertEqual(header['data file'], output_data_filename)
+ self.assertEqual(header['data file'], 'testfile_detached_raw.txt')
if __name__ == '__main__':
|
Print relative data file name in the header
In the detached header, the data file name is printed with abspath. That means, if someone moves the data around, than data file location is invalid. Instead, how about we write the data file with basename?
https://github.com/mhe/pynrrd/blob/master/nrrd/writer.py#L186 and https://github.com/mhe/pynrrd/blob/master/nrrd/writer.py#L191
I think the specification also suggests that:
> Breaking the dataset into a header and one or more data files raises a new concerns, namely that the header file can't know if the data file has been erased, renamed, or moved. NRRD provides no means to overcome these problems once they've been created. On the other hand, moving the header and data files together to a new place is a common operation, and is supported by the special semantics associated with the data filename:
If the filename (given either directly from "filename", or generated from "format", or listed in a "LIST") does not begins with "/", it is taken to be the filename of the data file, relative to the location of the detached header file. The NRRD reader is responsible for constructing the full data filename from the filename of the detached header and "filename". Obviously, if the detached header has been passed to the reader not as a file name, but as a FILE* (such as stdin), this filename construction is impossible.
Otherwise, if "filename" does start with "/", then filename gives the full path of the data file. It is passed directly to fopen() without any interpretation.
Note: as of NRRD0004, the signifier of a header-relative file changed from the presence (at the beginning of the filename) of "./", to the absence of "/". Since essentially all uses of detached headers assumed header-relative data files, the explicit "./" flag was deemed unnecessary. With this change, it becomes impossible for a header to refer to the data file relative to the current working directory of the reader (which may be different that the directory of the header), but that's probably a good thing.
Suggestion:
> header['data file']= os.path.basename(data_filename)
Happy to submit a PR.
cc: @ihnorton
|
0.0
|
c1c42d97f74ff2230b50df019d033a010f797918
|
[
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_detached_ascii",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_detached_bz2",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_detached_gz",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_detached_raw_as_nrrd"
] |
[
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_ascii_1d",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_ascii_2d",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_ascii_3d",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_bzip2",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_bzip2_level1",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_custom_fields_with_custom_field_map",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_custom_fields_without_custom_field_map",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_detached_raw_odd_extension",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_fake_encoding",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_gz",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_gz_level1",
"nrrd/tests/test_writing.py::TestWritingFunctions::test_write_raw"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-01-24 20:53:52+00:00
|
mit
| 3,883 |
|
mherrmann__gitignore_parser-17
|
diff --git a/gitignore_parser.py b/gitignore_parser.py
index c8a7771..5ce75f8 100644
--- a/gitignore_parser.py
+++ b/gitignore_parser.py
@@ -102,7 +102,7 @@ def rule_from_pattern(pattern, base_path=None, source=None):
if striptrailingspaces:
pattern = pattern[:i]
i = i - 1
- regex = fnmatch_pathname_to_regex(pattern)
+ regex = fnmatch_pathname_to_regex(pattern, directory_only)
if anchored:
regex = ''.join(['^', regex])
return IgnoreRule(
@@ -147,7 +147,7 @@ class IgnoreRule(collections.namedtuple('IgnoreRule_', IGNORE_RULE_FIELDS)):
# Frustratingly, python's fnmatch doesn't provide the FNM_PATHNAME
# option that .gitignore's behavior depends on.
-def fnmatch_pathname_to_regex(pattern):
+def fnmatch_pathname_to_regex(pattern, directory_only: bool):
"""
Implements fnmatch style-behavior, as though with FNM_PATHNAME flagged;
the path separator will not match shell-style '*' and '.' wildcards.
@@ -201,5 +201,6 @@ def fnmatch_pathname_to_regex(pattern):
else:
res.append(re.escape(c))
res.insert(0, '(?ms)')
- res.append('$')
- return ''.join(res)
\ No newline at end of file
+ if not directory_only:
+ res.append('$')
+ return ''.join(res)
|
mherrmann/gitignore_parser
|
43b39d04366038924239f7057ab5d8fcb0e24b42
|
diff --git a/tests.py b/tests.py
index 6b28b64..c1b6aad 100644
--- a/tests.py
+++ b/tests.py
@@ -74,6 +74,18 @@ class Test(TestCase):
self.assertTrue(matches('/home/michael/othermatch'))
self.assertTrue(matches('/home/michael/#imnocomment'))
+ def test_ignore_directory(self):
+ matches = _parse_gitignore_string('.venv/', fake_base_dir='/home/michael')
+ self.assertTrue(matches('/home/michael/.venv'))
+ self.assertTrue(matches('/home/michael/.venv/folder'))
+ self.assertTrue(matches('/home/michael/.venv/file.txt'))
+
+ def test_ignore_directory_astrix(self):
+ matches = _parse_gitignore_string('.venv/*', fake_base_dir='/home/michael')
+ self.assertFalse(matches('/home/michael/.venv'))
+ self.assertTrue(matches('/home/michael/.venv/folder'))
+ self.assertTrue(matches('/home/michael/.venv/file.txt'))
+
def test_negation(self):
matches = _parse_gitignore_string(
'''
|
Bug with matching paths within .gitignore directory
Is this correct? It feels like a bug to me since it works when you specify the rule in `.gitignore`.
Imagine `.gitignore` contents including
```
.venv/
```
And a script evaluating this
```
gitignore_matcher('/path/to/repo/.venv/')
True
gitignore_matcher('/path/to/repo/.venv/bin')
False
```
To me the second one should return `True`
|
0.0
|
43b39d04366038924239f7057ab5d8fcb0e24b42
|
[
"tests.py::Test::test_ignore_directory"
] |
[
"tests.py::Test::test_anchored_wildcard",
"tests.py::Test::test_comment",
"tests.py::Test::test_ignore_directory_astrix",
"tests.py::Test::test_negation",
"tests.py::Test::test_simple",
"tests.py::Test::test_trailingspaces",
"tests.py::Test::test_wildcard"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-08-10 20:29:05+00:00
|
mit
| 3,884 |
|
mherrmann__gitignore_parser-38
|
diff --git a/gitignore_parser.py b/gitignore_parser.py
index f78d86f..0c79c46 100644
--- a/gitignore_parser.py
+++ b/gitignore_parser.py
@@ -4,6 +4,7 @@ import re
from os.path import dirname
from pathlib import Path
+from typing import Union
def handle_negation(file_path, rules):
matched = False
@@ -132,7 +133,7 @@ class IgnoreRule(collections.namedtuple('IgnoreRule_', IGNORE_RULE_FIELDS)):
def __repr__(self):
return ''.join(['IgnoreRule(\'', self.pattern, '\')'])
- def match(self, abs_path):
+ def match(self, abs_path: Union[str, Path]):
matched = False
if self.base_path:
rel_path = str(Path(abs_path).resolve().relative_to(self.base_path))
@@ -140,7 +141,7 @@ class IgnoreRule(collections.namedtuple('IgnoreRule_', IGNORE_RULE_FIELDS)):
rel_path = str(Path(abs_path))
# Path() strips the trailing slash, so we need to preserve it
# in case of directory-only negation
- if self.negation and abs_path[-1] == '/':
+ if self.negation and type(abs_path) == str and abs_path[-1] == '/':
rel_path += '/'
if rel_path.startswith('./'):
rel_path = rel_path[2:]
|
mherrmann/gitignore_parser
|
eeaeb72b0e376e74f01cfffbf1cb048ac99fc1bd
|
diff --git a/tests.py b/tests.py
index 1340401..2934841 100644
--- a/tests.py
+++ b/tests.py
@@ -1,4 +1,5 @@
from unittest.mock import patch, mock_open
+from pathlib import Path
from gitignore_parser import parse_gitignore
@@ -127,6 +128,10 @@ data/**
self.assertTrue(matches('/home/michael/directory'))
self.assertTrue(matches('/home/michael/directory-trailing/'))
+ def test_supports_path_type_argument(self):
+ matches = _parse_gitignore_string('file1\n!file2', fake_base_dir='/home/michael')
+ self.assertTrue(matches(Path('/home/michael/file1')))
+ self.assertFalse(matches(Path('/home/michael/file2')))
def _parse_gitignore_string(data: str, fake_base_dir: str = None):
with patch('builtins.open', mock_open(read_data=data)):
|
Unsafe string slicing in IgnoreRule.match method. It fails with Path type.
## Synopsis
After upgrade from `0.1.0` to `0.1.1` I faced an issue with existing test. Looks like there is unsafe operation with `abs_path` which supposed to be a string only. That's a wrong assuming, because most libraries support the both types: [str, Path].
Code impacted: https://github.com/mherrmann/gitignore_parser/blob/v0.1.1/gitignore_parser.py#L143
### Steps to reproduce
```python
import tempfile
from pathlib import Path
from gitignore_parser import parse_gitignore
def reproduce_issue():
with tempfile.TemporaryDirectory() as git_dir:
git_dir = Path(git_dir)
gitignore_file_path = git_dir / ".gitignore"
gitignore_file_path.write_text("file1\n!file2")
matches = parse_gitignore(gitignore_file_path)
assert matches(git_dir / "file1")
assert not matches(git_dir / "file2")
if __name__ == '__main__':
reproduce_issue()
```
### Observed behaviour
```
Traceback (most recent call last):
File "/Users/kukusan2/Workspace/gitignore_issue/reproduce_issue.py", line 20, in <module>
reproduce_issue()
File "/Users/kukusan2/Workspace/gitignore_issue/reproduce_issue.py", line 15, in reproduce_issue
assert matches(git_dir / "file1")
File "/Users/kukusan2/Workspace/gitignore_issue/venv2/lib/python3.9/site-packages/gitignore_parser.py", line 36, in <lambda>
return lambda file_path: handle_negation(file_path, rules)
File "/Users/kukusan2/Workspace/gitignore_issue/venv2/lib/python3.9/site-packages/gitignore_parser.py", line 11, in handle_negation
if rule.match(file_path):
File "/Users/kukusan2/Workspace/gitignore_issue/venv2/lib/python3.9/site-packages/gitignore_parser.py", line 143, in match
if self.negation and abs_path[-1] == '/':
TypeError: 'PosixPath' object is not subscriptable
```
|
0.0
|
eeaeb72b0e376e74f01cfffbf1cb048ac99fc1bd
|
[
"tests.py::Test::test_supports_path_type_argument"
] |
[
"tests.py::Test::test_anchored_wildcard",
"tests.py::Test::test_comment",
"tests.py::Test::test_directory_only_negation",
"tests.py::Test::test_double_asterisks",
"tests.py::Test::test_ignore_directory",
"tests.py::Test::test_ignore_directory_asterisk",
"tests.py::Test::test_negation",
"tests.py::Test::test_simple",
"tests.py::Test::test_single_asterisk",
"tests.py::Test::test_trailingspaces",
"tests.py::Test::test_wildcard"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-11-24 13:17:30+00:00
|
mit
| 3,885 |
|
mhostetter__galois-206
|
diff --git a/galois/_codes/_bch.py b/galois/_codes/_bch.py
index 2278eae7e..48c95131c 100644
--- a/galois/_codes/_bch.py
+++ b/galois/_codes/_bch.py
@@ -89,7 +89,7 @@ def bch_valid_codes(n, t_min=1):
while True:
c = 1
roots = alpha**(c + np.arange(0, 2*t))
- powers = GF.characteristic**np.arange(0, GF.degree)
+ powers = GF.characteristic**np.arange(0, GF.degree, dtype=GF.dtypes[-1])
conjugates = np.unique(np.power.outer(roots, powers))
g_degree = len(conjugates)
k = n - g_degree
@@ -201,7 +201,7 @@ class BCH:
# minimal polynomial and then doing an LCM, we will compute all the unique conjugates of all the roots
# and then compute (x - c1)*(x - c2)*...*(x - cn), which is equivalent.
roots = alpha**(c + np.arange(0, 2*t))
- powers = GF.characteristic**np.arange(0, GF.degree)
+ powers = GF.characteristic**np.arange(0, GF.degree, dtype=GF.dtypes[-1])
conjugates = np.unique(np.power.outer(roots, powers))
g_degree = len(conjugates)
diff --git a/galois/_codes/_cyclic.py b/galois/_codes/_cyclic.py
index 289b5527a..19da32611 100644
--- a/galois/_codes/_cyclic.py
+++ b/galois/_codes/_cyclic.py
@@ -109,6 +109,7 @@ def roots_to_parity_check_matrix(n, roots):
if not isinstance(roots, FieldArray):
raise TypeError(f"Argument `roots` must be a galois.FieldArray, not {type(roots)}.")
- H = np.power.outer(roots, np.arange(n - 1, -1, -1))
+ GF = type(roots)
+ H = np.power.outer(roots, np.arange(n - 1, -1, -1, dtype=GF.dtypes[-1]))
return H
diff --git a/galois/_fields/_array.py b/galois/_fields/_array.py
index dcfc80c2b..d7554ea94 100644
--- a/galois/_fields/_array.py
+++ b/galois/_fields/_array.py
@@ -765,7 +765,7 @@ class FieldArray(np.ndarray, metaclass=FieldClass):
subfield = field.prime_subfield
p = field.characteristic
m = field.degree
- conjugates = np.power.outer(self, p**np.arange(0, m, dtype=self.dtype))
+ conjugates = np.power.outer(self, p**np.arange(0, m, dtype=field.dtypes[-1]))
trace = np.add.reduce(conjugates, axis=-1)
return subfield(trace)
diff --git a/galois/_fields/_class.py b/galois/_fields/_class.py
index fe72b56fb..746214926 100644
--- a/galois/_fields/_class.py
+++ b/galois/_fields/_class.py
@@ -313,7 +313,7 @@ class FieldClass(FunctionMeta, UfuncMeta, PropertiesMeta):
if cls.display_mode == "power":
# Order elements by powers of the primitive element
- x_default = np.concatenate((np.atleast_1d(cls(0)), cls.primitive_element**np.arange(0, cls.order - 1)))
+ x_default = np.concatenate((np.atleast_1d(cls(0)), cls.primitive_element**np.arange(0, cls.order - 1, dtype=cls.dtypes[-1])))
else:
x_default = cls.Elements()
y_default = x_default if operation != "/" else x_default[1:]
diff --git a/galois/_polys/_functions.py b/galois/_polys/_functions.py
index 8fd3d83de..34a50c27b 100644
--- a/galois/_polys/_functions.py
+++ b/galois/_polys/_functions.py
@@ -113,7 +113,7 @@ def minimal_poly(element):
if field.is_prime_field:
return x - element
else:
- conjugates = np.unique(element**(field.characteristic**np.arange(0, field.degree)))
+ conjugates = np.unique(element**(field.characteristic**np.arange(0, field.degree, dtype=field.dtypes[-1])))
poly = Poly.Roots(conjugates, field=field)
poly = Poly(poly.coeffs, field=field.prime_subfield)
return poly
|
mhostetter/galois
|
158f5ff81fad4aa89bf4d55d0e05d57a57d54b70
|
diff --git a/tests/polys/test_minimal_polys.py b/tests/polys/test_minimal_polys.py
index 814d5507f..daec41c4c 100644
--- a/tests/polys/test_minimal_polys.py
+++ b/tests/polys/test_minimal_polys.py
@@ -380,3 +380,11 @@ def test_minimal_poly(characteristic, degree):
e = GFpm(item[0])
poly = galois.Poly(item[1], field=GFp)
assert galois.minimal_poly(e) == poly
+
+
+def test_minimal_poly_large_field():
+ # Test vectors generated with SageMath
+ GF = galois.GF(2**100)
+ galois.minimal_poly(GF(2)) == galois.Poly.String("x^100 + x^57 + x^56 + x^55 + x^52 + x^48 + x^47 + x^46 + x^45 + x^44 + x^43 + x^41 + x^37 + x^36 + x^35 + x^34 + x^31 + x^30 + x^27 + x^25 + x^24 + x^22 + x^20 + x^19 + x^16 + x^15 + x^11 + x^9 + x^8 + x^6 + x^5 + x^3 + 1")
+ galois.minimal_poly(GF(3)) == galois.Poly.String("x^100 + x^96 + x^68 + x^64 + x^57 + x^55 + x^54 + x^53 + x^51 + x^50 + x^48 + x^47 + x^42 + x^41 + x^38 + x^36 + x^31 + x^29 + x^26 + x^24 + x^15 + x^14 + x^12 + x^9 + x^8 + x^5 + x^2 + x + 1")
+ galois.minimal_poly(GF(6)) == galois.Poly.String("x^100 + x^78 + x^76 + x^74 + x^73 + x^71 + x^67 + x^66 + x^65 + x^62 + x^60 + x^55 + x^52 + x^51 + x^50 + x^48 + x^47 + x^45 + x^42 + x^41 + x^35 + x^34 + x^33 + x^31 + x^30 + x^29 + x^28 + x^27 + x^26 + x^23 + x^22 + x^21 + x^20 + x^19 + x^16 + x^14 + x^13 + x^12 + x^10 + x^9 + x^8 + x^6 + x^3 + x + 1")
|
Error in `minimal_poly()` with large fields
As discovered [here](https://github.com/mhostetter/galois/issues/185#issuecomment-969938384), `galois.minimal_poly()` fails for large extension fields.
Here's a reproducible example:
```python
In [1]: import galois
In [2]: poly = galois.Poly.Degrees([256, 241, 178, 121, 0]); poly
Out[2]: Poly(x^256 + x^241 + x^178 + x^121 + 1, GF(2))
In [3]: GF = galois.GF((2**256), irreducible_poly=poly, primitive_element=2, verify=False)
In [4]: print(GF.properties)
GF(2^256):
characteristic: 2
degree: 256
order: 115792089237316195423570985008687907853269984665640564039457584007913129639936
irreducible_poly: x^256 + x^241 + x^178 + x^121 + 1
is_primitive_poly: True
primitive_element: x
In [5]: x = GF(4); x
Out[5]: GF(4, order=2^256)
In [6]: galois.minimal_poly(x)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-976c7185bfb2> in <module>
----> 1 galois.minimal_poly(x)
/mnt/c/Users/matth/repos/galois/galois/_polys/_functions.py in minimal_poly(element)
116 conjugates = np.unique(element**(field.characteristic**np.arange(0, field.degree)))
117 poly = Poly.Roots(conjugates, field=field)
--> 118 poly = Poly(poly.coeffs, field=field.prime_subfield)
119 return poly
/mnt/c/Users/matth/repos/galois/galois/_polys/_poly.py in __new__(cls, coeffs, field, order)
109 coeffs = coeffs[::-1] # Ensure it's in descending-degree order
110
--> 111 coeffs, field = cls._convert_coeffs(coeffs, field)
112
113 if field is GF2:
/mnt/c/Users/matth/repos/galois/galois/_polys/_poly.py in _convert_coeffs(cls, coeffs, field)
136 # Ensure coeffs is an iterable
137 coeffs = coeffs.tolist()
--> 138 coeffs = field([int(-field(abs(c))) if c < 0 else c for c in coeffs])
139
140 return coeffs, field
/mnt/c/Users/matth/repos/galois/galois/_fields/_array.py in __new__(cls, array, dtype, copy, order, ndmin)
116 if cls is FieldArray:
117 raise NotImplementedError("FieldArray is an abstract base class that cannot be directly instantiated. Instead, create a FieldArray subclass for GF(p^m) arithmetic using `GF = galois.GF(p**m)` and instantiate an array using `x = GF(array_like)`.")
--> 118 return cls._array(array, dtype=dtype, copy=copy, order=order, ndmin=ndmin)
119
120 def __init__(self, array, dtype=None, copy=True, order="K", ndmin=0):
/mnt/c/Users/matth/repos/galois/galois/_fields/_array.py in _array(cls, array_like, dtype, copy, order, ndmin)
171 def _array(cls, array_like, dtype=None, copy=True, order="K", ndmin=0):
172 dtype = cls._get_dtype(dtype)
--> 173 array_like = cls._check_array_like_object(array_like)
174 array = np.array(array_like, dtype=dtype, copy=copy, order=order, ndmin=ndmin)
175 return array.view(cls)
/mnt/c/Users/matth/repos/galois/galois/_fields/_array.py in _check_array_like_object(cls, array_like)
191 # Recursively check the items in the iterable to ensure they're of the correct type
192 # and that their values are in range
--> 193 array_like = cls._check_iterable_types_and_values(array_like)
194 elif isinstance(array_like, np.ndarray):
195 # If this a NumPy array, but not a FieldArray, verify the array
/mnt/c/Users/matth/repos/galois/galois/_fields/_array.py in _check_iterable_types_and_values(cls, iterable)
218 raise TypeError(f"When {cls.name} arrays are created/assigned with an iterable, each element must be an integer. Found type {type(item)}.")
219
--> 220 cls._check_array_values(item)
221 # if not 0 <= item < cls.order:
222 # raise ValueError(f"{cls.name} arrays must have elements in 0 <= x < {cls.order}, not {item}.")
/mnt/c/Users/matth/repos/galois/galois/_fields/_array.py in _check_array_values(cls, array)
257 idxs = np.logical_or(array < 0, array >= cls.order)
258 values = array if array.ndim == 0 else array[idxs]
--> 259 raise ValueError(f"{cls.name} arrays must have elements in `0 <= x < {cls.order}`, not {values}.")
260
261 @classmethod
ValueError: GF(2) arrays must have elements in `0 <= x < 2`, not 107745580774404846491629807443664560507035291931888949065819222618122380761054.
```
After this PR, the example runs as follows:
```python
In [1]: import galois
In [2]: poly = galois.Poly.Degrees([256, 241, 178, 121, 0]); poly
Out[2]: Poly(x^256 + x^241 + x^178 + x^121 + 1, GF(2))
In [3]: GF = galois.GF((2**256), irreducible_poly=poly, primitive_element=2, verify=False)
In [4]: print(GF.properties)
GF(2^256):
characteristic: 2
degree: 256
order: 115792089237316195423570985008687907853269984665640564039457584007913129639936
irreducible_poly: x^256 + x^241 + x^178 + x^121 + 1
is_primitive_poly: True
primitive_element: x
In [5]: x = GF(4); x
Out[5]: GF(4, order=2^256)
In [6]: galois.minimal_poly(x)
Out[6]: Poly(x^256 + x^241 + x^178 + x^121 + 1, GF(2))
```
|
0.0
|
158f5ff81fad4aa89bf4d55d0e05d57a57d54b70
|
[
"tests/polys/test_minimal_polys.py::test_minimal_poly_large_field"
] |
[
"tests/polys/test_minimal_polys.py::test_minimal_poly_exceptions",
"tests/polys/test_minimal_polys.py::test_minimal_poly[2-1]",
"tests/polys/test_minimal_polys.py::test_minimal_poly[2-2]",
"tests/polys/test_minimal_polys.py::test_minimal_poly[2-3]",
"tests/polys/test_minimal_polys.py::test_minimal_poly[2-4]",
"tests/polys/test_minimal_polys.py::test_minimal_poly[3-1]",
"tests/polys/test_minimal_polys.py::test_minimal_poly[3-2]",
"tests/polys/test_minimal_polys.py::test_minimal_poly[3-3]",
"tests/polys/test_minimal_polys.py::test_minimal_poly[3-4]",
"tests/polys/test_minimal_polys.py::test_minimal_poly[5-1]",
"tests/polys/test_minimal_polys.py::test_minimal_poly[5-2]",
"tests/polys/test_minimal_polys.py::test_minimal_poly[5-3]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-11-16 15:43:04+00:00
|
mit
| 3,886 |
|
mhostetter__galois-219
|
diff --git a/galois/_polys/_factor.py b/galois/_polys/_factor.py
index 6d25fd82d..693e271ae 100644
--- a/galois/_polys/_factor.py
+++ b/galois/_polys/_factor.py
@@ -129,8 +129,8 @@ def square_free_factorization(poly):
# Step 2: Find all remaining factors (their multiplicities are divisible by p)
if d != one:
- degrees = [degree // p for degree in d.degrees]
- coeffs = d.coeffs ** (field.characteristic**(field.degree - 1)) # The inverse Frobenius automorphism of the coefficients
+ degrees = [degree // p for degree in d.nonzero_degrees]
+ coeffs = d.nonzero_coeffs ** (field.characteristic**(field.degree - 1)) # The inverse Frobenius automorphism of the coefficients
delta = Poly.Degrees(degrees, coeffs=coeffs, field=field) # The p-th root of d(x)
f, m = square_free_factorization(delta)
factors_.extend(f)
diff --git a/galois/_polys/_poly.py b/galois/_polys/_poly.py
index f0082ba15..55161b5e6 100644
--- a/galois/_polys/_poly.py
+++ b/galois/_polys/_poly.py
@@ -133,12 +133,13 @@ class Poly:
# Use the field of the coefficients
field = type(coeffs)
else:
- # Convert coefficients to the specified field (or GF2 if unspecified)
+ # Convert coefficients to the specified field (or GF2 if unspecified), taking into
+ # account negative coefficients
field = GF2 if field is None else field
- if isinstance(coeffs, np.ndarray):
- # Ensure coeffs is an iterable
- coeffs = coeffs.tolist()
- coeffs = field([int(-field(abs(c))) if c < 0 else c for c in coeffs])
+ coeffs = np.array(coeffs, dtype=field.dtypes[-1])
+ idxs = coeffs < 0
+ coeffs = field(np.abs(coeffs))
+ coeffs[idxs] *= -1
return coeffs, field
@@ -451,29 +452,31 @@ class Poly:
GF = galois.GF(2**8)
galois.Poly.Degrees([3,1,0], coeffs=[251,73,185], field=GF)
"""
- coeffs = [1,]*len(degrees) if coeffs is None else coeffs
if not isinstance(degrees, (list, tuple, np.ndarray)):
raise TypeError(f"Argument `degrees` must array-like, not {type(degrees)}.")
- if not isinstance(coeffs, (list, tuple, np.ndarray, FieldArray)):
+ if not isinstance(coeffs, (type(None), list, tuple, np.ndarray, FieldArray)):
raise TypeError(f"Argument `coeffs` must array-like, not {type(coeffs)}.")
if not isinstance(field, (type(None), FieldClass)):
- raise TypeError(f"Argument `field` must be a Galois field array class, not {field}.")
- if isinstance(degrees, (np.ndarray, FieldArray)) and not degrees.ndim <= 1:
+ raise TypeError(f"Argument `field` must be a Galois field array class, not {type(field)}.")
+
+ degrees = np.array(degrees, dtype=np.int64)
+ coeffs = [1,]*len(degrees) if coeffs is None else coeffs
+ coeffs, field = cls._convert_coeffs(coeffs, field)
+
+ if not degrees.ndim <= 1:
raise ValueError(f"Argument `degrees` can have dimension at most 1, not {degrees.ndim}.")
- if isinstance(coeffs, (np.ndarray, FieldArray)) and not coeffs.ndim <= 1:
- raise ValueError(f"Argument `coeffs` can have dimension at most 1, not {coeffs.ndim}.")
- if not all(degree >= 0 for degree in degrees):
+ if not degrees.size == np.unique(degrees).size:
+ raise ValueError(f"Argument `degrees` must have unique entries, not {degrees}.")
+ if not np.all(degrees >= 0):
raise ValueError(f"Argument `degrees` must have non-negative values, not {degrees}.")
- if not len(degrees) == len(coeffs):
- raise ValueError(f"Arguments `degrees` and `coeffs` must have the same length, not {len(degrees)} and {len(coeffs)}.")
-
+ if not coeffs.ndim <= 1:
+ raise ValueError(f"Argument `coeffs` can have dimension at most 1, not {coeffs.ndim}.")
+ if not degrees.size == coeffs.size:
+ raise ValueError(f"Arguments `degrees` and `coeffs` must have the same length, not {degrees.size} and {coeffs.size}.")
+ # No nonzero degrees means it's the zero polynomial
if len(degrees) == 0:
- degrees = [0]
- coeffs = [0]
- dtype = np.int64 if max(degrees) <= np.iinfo(np.int64).max else np.object_
- degrees = np.array(degrees, dtype=dtype)
- coeffs, field = cls._convert_coeffs(coeffs, field)
+ degrees, coeffs = np.array([0]), field([0])
if field is GF2:
if len(degrees) < SPARSE_VS_BINARY_POLY_FACTOR*max(degrees):
|
mhostetter/galois
|
a89fd868478949fe249ff5705d9acf6123441e3c
|
diff --git a/tests/polys/test_constructors.py b/tests/polys/test_constructors.py
index c7146da19..15eaa3a58 100644
--- a/tests/polys/test_constructors.py
+++ b/tests/polys/test_constructors.py
@@ -180,12 +180,14 @@ def test_degrees_exceptions():
with pytest.raises(ValueError):
galois.Poly.Degrees(np.atleast_2d(degrees), coeffs=coeffs, field=GF)
+ with pytest.raises(ValueError):
+ galois.Poly.Degrees([5, 1, 1, 0], coeffs=[1, 2, 2, 1], field=GF)
+ with pytest.raises(ValueError):
+ galois.Poly.Degrees([5, -3, 0], coeffs=coeffs, field=GF)
with pytest.raises(ValueError):
galois.Poly.Degrees(degrees, coeffs=np.atleast_2d(coeffs), field=GF)
with pytest.raises(ValueError):
galois.Poly.Degrees([7] + degrees, coeffs=coeffs, field=GF)
- with pytest.raises(ValueError):
- galois.Poly.Degrees([5, -3, 0], coeffs=coeffs, field=GF)
@pytest.mark.parametrize("field", FIELDS)
|
`Poly.Degrees` with duplicate degrees produces incorrect polynomial
An issue was peripherally discovered in #212 where `Poly.Degrees` produces the wrong polynomial if duplicate degrees are provided.
```python
# Wrong answer
In [5]: galois.Poly.Degrees([256, 1, 1, 1, 0])
Out[5]: Poly(x^256 + x^2 + x + 1, GF(2))
In [6]: galois.Poly.Degrees([256, 1, 0])
Out[6]: Poly(x^256 + x + 1, GF(2))
```
The solution is to throw a `ValueError` if duplicates are passed in.
|
0.0
|
a89fd868478949fe249ff5705d9acf6123441e3c
|
[
"tests/polys/test_constructors.py::test_degrees_exceptions"
] |
[
"tests/polys/test_constructors.py::test_zero_exceptions",
"tests/polys/test_constructors.py::test_zero[GF2]",
"tests/polys/test_constructors.py::test_zero[GF31]",
"tests/polys/test_constructors.py::test_zero[GF36893488147419103183]",
"tests/polys/test_constructors.py::test_zero[GF2_8]",
"tests/polys/test_constructors.py::test_zero[GF2_100]",
"tests/polys/test_constructors.py::test_zero[GF7_3]",
"tests/polys/test_constructors.py::test_zero[GF109987_4]",
"tests/polys/test_constructors.py::test_one_exceptions",
"tests/polys/test_constructors.py::test_one[GF2]",
"tests/polys/test_constructors.py::test_one[GF31]",
"tests/polys/test_constructors.py::test_one[GF36893488147419103183]",
"tests/polys/test_constructors.py::test_one[GF2_8]",
"tests/polys/test_constructors.py::test_one[GF2_100]",
"tests/polys/test_constructors.py::test_one[GF7_3]",
"tests/polys/test_constructors.py::test_one[GF109987_4]",
"tests/polys/test_constructors.py::test_identity_exceptions",
"tests/polys/test_constructors.py::test_identity[GF2]",
"tests/polys/test_constructors.py::test_identity[GF31]",
"tests/polys/test_constructors.py::test_identity[GF36893488147419103183]",
"tests/polys/test_constructors.py::test_identity[GF2_8]",
"tests/polys/test_constructors.py::test_identity[GF2_100]",
"tests/polys/test_constructors.py::test_identity[GF7_3]",
"tests/polys/test_constructors.py::test_identity[GF109987_4]",
"tests/polys/test_constructors.py::test_random_exceptions",
"tests/polys/test_constructors.py::test_random[None-GF2]",
"tests/polys/test_constructors.py::test_random[None-GF31]",
"tests/polys/test_constructors.py::test_random[None-GF36893488147419103183]",
"tests/polys/test_constructors.py::test_random[None-GF2_8]",
"tests/polys/test_constructors.py::test_random[None-GF2_100]",
"tests/polys/test_constructors.py::test_random[None-GF7_3]",
"tests/polys/test_constructors.py::test_random[None-GF109987_4]",
"tests/polys/test_constructors.py::test_random[42-GF2]",
"tests/polys/test_constructors.py::test_random[42-GF31]",
"tests/polys/test_constructors.py::test_random[42-GF36893488147419103183]",
"tests/polys/test_constructors.py::test_random[42-GF2_8]",
"tests/polys/test_constructors.py::test_random[42-GF2_100]",
"tests/polys/test_constructors.py::test_random[42-GF7_3]",
"tests/polys/test_constructors.py::test_random[42-GF109987_4]",
"tests/polys/test_constructors.py::test_random[seed2-GF2]",
"tests/polys/test_constructors.py::test_random[seed2-GF31]",
"tests/polys/test_constructors.py::test_random[seed2-GF36893488147419103183]",
"tests/polys/test_constructors.py::test_random[seed2-GF2_8]",
"tests/polys/test_constructors.py::test_random[seed2-GF2_100]",
"tests/polys/test_constructors.py::test_random[seed2-GF7_3]",
"tests/polys/test_constructors.py::test_random[seed2-GF109987_4]",
"tests/polys/test_constructors.py::test_random[seed3-GF2]",
"tests/polys/test_constructors.py::test_random[seed3-GF31]",
"tests/polys/test_constructors.py::test_random[seed3-GF36893488147419103183]",
"tests/polys/test_constructors.py::test_random[seed3-GF2_8]",
"tests/polys/test_constructors.py::test_random[seed3-GF2_100]",
"tests/polys/test_constructors.py::test_random[seed3-GF7_3]",
"tests/polys/test_constructors.py::test_random[seed3-GF109987_4]",
"tests/polys/test_constructors.py::test_random[seed4-GF2]",
"tests/polys/test_constructors.py::test_random[seed4-GF31]",
"tests/polys/test_constructors.py::test_random[seed4-GF36893488147419103183]",
"tests/polys/test_constructors.py::test_random[seed4-GF2_8]",
"tests/polys/test_constructors.py::test_random[seed4-GF2_100]",
"tests/polys/test_constructors.py::test_random[seed4-GF7_3]",
"tests/polys/test_constructors.py::test_random[seed4-GF109987_4]",
"tests/polys/test_constructors.py::test_integer_exceptions",
"tests/polys/test_constructors.py::test_integer[GF2]",
"tests/polys/test_constructors.py::test_integer[GF31]",
"tests/polys/test_constructors.py::test_integer[GF36893488147419103183]",
"tests/polys/test_constructors.py::test_integer[GF2_8]",
"tests/polys/test_constructors.py::test_integer[GF2_100]",
"tests/polys/test_constructors.py::test_integer[GF7_3]",
"tests/polys/test_constructors.py::test_integer[GF109987_4]",
"tests/polys/test_constructors.py::test_string_exceptions",
"tests/polys/test_constructors.py::test_string[GF2]",
"tests/polys/test_constructors.py::test_string[GF31]",
"tests/polys/test_constructors.py::test_string[GF36893488147419103183]",
"tests/polys/test_constructors.py::test_string[GF2_8]",
"tests/polys/test_constructors.py::test_string[GF2_100]",
"tests/polys/test_constructors.py::test_string[GF7_3]",
"tests/polys/test_constructors.py::test_string[GF109987_4]",
"tests/polys/test_constructors.py::test_string_large",
"tests/polys/test_constructors.py::test_degrees[GF2]",
"tests/polys/test_constructors.py::test_degrees[GF31]",
"tests/polys/test_constructors.py::test_degrees[GF36893488147419103183]",
"tests/polys/test_constructors.py::test_degrees[GF2_8]",
"tests/polys/test_constructors.py::test_degrees[GF2_100]",
"tests/polys/test_constructors.py::test_degrees[GF7_3]",
"tests/polys/test_constructors.py::test_degrees[GF109987_4]",
"tests/polys/test_constructors.py::test_degrees_empty[GF2]",
"tests/polys/test_constructors.py::test_degrees_empty[GF31]",
"tests/polys/test_constructors.py::test_degrees_empty[GF36893488147419103183]",
"tests/polys/test_constructors.py::test_degrees_empty[GF2_8]",
"tests/polys/test_constructors.py::test_degrees_empty[GF2_100]",
"tests/polys/test_constructors.py::test_degrees_empty[GF7_3]",
"tests/polys/test_constructors.py::test_degrees_empty[GF109987_4]",
"tests/polys/test_constructors.py::test_roots_exceptions",
"tests/polys/test_constructors.py::test_roots[GF2]",
"tests/polys/test_constructors.py::test_roots[GF31]",
"tests/polys/test_constructors.py::test_roots[GF36893488147419103183]",
"tests/polys/test_constructors.py::test_roots[GF2_8]",
"tests/polys/test_constructors.py::test_roots[GF2_100]",
"tests/polys/test_constructors.py::test_roots[GF7_3]",
"tests/polys/test_constructors.py::test_roots[GF109987_4]",
"tests/polys/test_constructors.py::test_roots_with_multiplicity[GF2]",
"tests/polys/test_constructors.py::test_roots_with_multiplicity[GF31]",
"tests/polys/test_constructors.py::test_roots_with_multiplicity[GF36893488147419103183]",
"tests/polys/test_constructors.py::test_roots_with_multiplicity[GF2_8]",
"tests/polys/test_constructors.py::test_roots_with_multiplicity[GF2_100]",
"tests/polys/test_constructors.py::test_roots_with_multiplicity[GF7_3]",
"tests/polys/test_constructors.py::test_roots_with_multiplicity[GF109987_4]",
"tests/polys/test_constructors.py::test_roots_field_override"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-01-06 20:30:12+00:00
|
mit
| 3,887 |
|
mhostetter__galois-323
|
diff --git a/galois/_fields/_gf2m.py b/galois/_fields/_gf2m.py
index 299b13362..833b2e3d6 100644
--- a/galois/_fields/_gf2m.py
+++ b/galois/_fields/_gf2m.py
@@ -58,13 +58,6 @@ class GF2mMeta(FieldClass, DirMeta):
###############################################################################
# Arithmetic functions using explicit calculation
- #
- # NOTE: The ufunc inputs a and b are cast to integers at the beginning of each
- # ufunc to prevent the non-JIT-compiled invocations (used in "large"
- # fields with dtype=object) from performing infintely recursive
- # arithmetic. Instead, the intended arithmetic inside the ufuncs is
- # integer arithmetic.
- # See https://github.com/mhostetter/galois/issues/253.
###############################################################################
@staticmethod
@@ -72,9 +65,6 @@ class GF2mMeta(FieldClass, DirMeta):
"""
Not actually used. `np.bitwise_xor()` is faster.
"""
- a = int(a)
- b = int(b)
-
return a ^ b
@staticmethod
@@ -82,8 +72,6 @@ class GF2mMeta(FieldClass, DirMeta):
"""
Not actually used. `np.positive()` is faster.
"""
- a = int(a)
-
return a
@staticmethod
@@ -91,9 +79,6 @@ class GF2mMeta(FieldClass, DirMeta):
"""
Not actually used. `np.bitwise_xor()` is faster.
"""
- a = int(a)
- b = int(b)
-
return a ^ b
@staticmethod
@@ -105,13 +90,11 @@ class GF2mMeta(FieldClass, DirMeta):
p(x) in GF(2)[x] with degree m is the irreducible polynomial of GF(2^m)
a * b = c
- = (a(x) * b(x)) % p(x) in GF(2)
- = c(x)
- = c
+ = (a(x) * b(x)) % p(x) in GF(2)
+ = c(x)
+ = c
"""
ORDER = CHARACTERISTIC**DEGREE
- a = int(a)
- b = int(b)
# Re-order operands such that a > b so the while loop has less loops
if b > a:
@@ -145,7 +128,6 @@ class GF2mMeta(FieldClass, DirMeta):
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
ORDER = CHARACTERISTIC**DEGREE
- a = int(a)
exponent = ORDER - 2
result_s = a # The "squaring" part
@@ -169,9 +151,6 @@ class GF2mMeta(FieldClass, DirMeta):
if b == 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
- a = int(a)
- b = int(b)
-
if a == 0:
c = 0
else:
@@ -197,9 +176,6 @@ class GF2mMeta(FieldClass, DirMeta):
if a == 0 and b < 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
- a = int(a)
- b = int(b)
-
if b == 0:
return 1
elif b < 0:
@@ -236,8 +212,6 @@ class GF2mMeta(FieldClass, DirMeta):
raise ArithmeticError("Cannot compute the discrete logarithm of 0 in a Galois field.")
ORDER = CHARACTERISTIC**DEGREE
- a = int(a)
- b = int(b)
# Naive algorithm
result = 1
diff --git a/galois/_fields/_gfp.py b/galois/_fields/_gfp.py
index 0638a4aa8..d41238e74 100644
--- a/galois/_fields/_gfp.py
+++ b/galois/_fields/_gfp.py
@@ -56,21 +56,11 @@ class GFpMeta(FieldClass, DirMeta):
###############################################################################
# Arithmetic functions using explicit calculation
- #
- # NOTE: The ufunc inputs a and b are cast to integers at the beginning of each
- # ufunc to prevent the non-JIT-compiled invocations (used in "large"
- # fields with dtype=object) from performing infintely recursive
- # arithmetic. Instead, the intended arithmetic inside the ufuncs is
- # integer arithmetic.
- # See https://github.com/mhostetter/galois/issues/253.
###############################################################################
@staticmethod
@numba.extending.register_jitable
def _add_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
- a = int(a)
- b = int(b)
-
c = a + b
if c >= CHARACTERISTIC:
c -= CHARACTERISTIC
@@ -80,8 +70,6 @@ class GFpMeta(FieldClass, DirMeta):
@staticmethod
@numba.extending.register_jitable
def _negative_calculate(a, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
- a = int(a)
-
if a == 0:
c = 0
else:
@@ -92,9 +80,6 @@ class GFpMeta(FieldClass, DirMeta):
@staticmethod
@numba.extending.register_jitable
def _subtract_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
- a = int(a)
- b = int(b)
-
if a >= b:
c = a - b
else:
@@ -105,9 +90,6 @@ class GFpMeta(FieldClass, DirMeta):
@staticmethod
@numba.extending.register_jitable
def _multiply_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
- a = int(a)
- b = int(b)
-
c = (a * b) % CHARACTERISTIC
return c
@@ -124,8 +106,6 @@ class GFpMeta(FieldClass, DirMeta):
if a == 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
- a = int(a)
-
r2, r1 = CHARACTERISTIC, a
t2, t1 = 0, 1
@@ -145,9 +125,6 @@ class GFpMeta(FieldClass, DirMeta):
if b == 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
- a = int(a)
- b = int(b)
-
if a == 0:
c = 0
else:
@@ -163,19 +140,16 @@ class GFpMeta(FieldClass, DirMeta):
Square and Multiply Algorithm
a^13 = (1) * (a)^13
- = (a) * (a)^12
- = (a) * (a^2)^6
- = (a) * (a^4)^3
- = (a * a^4) * (a^4)^2
- = (a * a^4) * (a^8)
- = result_m * result_s
+ = (a) * (a)^12
+ = (a) * (a^2)^6
+ = (a) * (a^4)^3
+ = (a * a^4) * (a^4)^2
+ = (a * a^4) * (a^8)
+ = result_m * result_s
"""
if a == 0 and b < 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
- a = int(a)
- b = int(b)
-
if b == 0:
return 1
elif b < 0:
@@ -212,8 +186,6 @@ class GFpMeta(FieldClass, DirMeta):
raise ArithmeticError("Cannot compute the discrete logarithm of 0 in a Galois field.")
ORDER = CHARACTERISTIC**DEGREE
- a = int(a)
- b = int(b)
# Naive algorithm
result = 1
diff --git a/galois/_fields/_gfpm.py b/galois/_fields/_gfpm.py
index 207d8f905..ca772fae9 100644
--- a/galois/_fields/_gfpm.py
+++ b/galois/_fields/_gfpm.py
@@ -154,13 +154,6 @@ class GFpmMeta(FieldClass, DirMeta):
###############################################################################
# Arithmetic functions using explicit calculation
- #
- # NOTE: The ufunc inputs a and b are cast to integers at the beginning of each
- # ufunc to prevent the non-JIT-compiled invocations (used in "large"
- # fields with dtype=object) from performing infintely recursive
- # arithmetic. Instead, the intended arithmetic inside the ufuncs is
- # integer arithmetic.
- # See https://github.com/mhostetter/galois/issues/253.
###############################################################################
@staticmethod
@@ -169,8 +162,6 @@ class GFpmMeta(FieldClass, DirMeta):
"""
Convert the integer representation to vector/polynomial representation
"""
- a = int(a)
-
a_vec = np.zeros(DEGREE, dtype=DTYPE)
for i in range(DEGREE - 1, -1, -1):
q, r = divmod(a, CHARACTERISTIC)
@@ -196,9 +187,6 @@ class GFpmMeta(FieldClass, DirMeta):
@staticmethod
@numba.extending.register_jitable
def _add_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
- a = int(a)
- b = int(b)
-
a_vec = INT_TO_POLY(a, CHARACTERISTIC, DEGREE)
b_vec = INT_TO_POLY(b, CHARACTERISTIC, DEGREE)
c_vec = (a_vec + b_vec) % CHARACTERISTIC
@@ -209,8 +197,6 @@ class GFpmMeta(FieldClass, DirMeta):
@staticmethod
@numba.extending.register_jitable
def _negative_calculate(a, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
- a = int(a)
-
a_vec = INT_TO_POLY(a, CHARACTERISTIC, DEGREE)
a_vec = (-a_vec) % CHARACTERISTIC
c = POLY_TO_INT(a_vec, CHARACTERISTIC, DEGREE)
@@ -220,9 +206,6 @@ class GFpmMeta(FieldClass, DirMeta):
@staticmethod
@numba.extending.register_jitable
def _subtract_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
- a = int(a)
- b = int(b)
-
a_vec = INT_TO_POLY(a, CHARACTERISTIC, DEGREE)
b_vec = INT_TO_POLY(b, CHARACTERISTIC, DEGREE)
c_vec = (a_vec - b_vec) % CHARACTERISTIC
@@ -233,9 +216,6 @@ class GFpmMeta(FieldClass, DirMeta):
@staticmethod
@numba.extending.register_jitable
def _multiply_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
- a = int(a)
- b = int(b)
-
a_vec = INT_TO_POLY(a, CHARACTERISTIC, DEGREE)
b_vec = INT_TO_POLY(b, CHARACTERISTIC, DEGREE)
@@ -279,7 +259,6 @@ class GFpmMeta(FieldClass, DirMeta):
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
ORDER = CHARACTERISTIC**DEGREE
- a = int(a)
exponent = ORDER - 2
result_s = a # The "squaring" part
@@ -303,9 +282,6 @@ class GFpmMeta(FieldClass, DirMeta):
if b == 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
- a = int(a)
- b = int(b)
-
if a == 0:
c = 0
else:
@@ -321,19 +297,16 @@ class GFpmMeta(FieldClass, DirMeta):
Square and Multiply Algorithm
a^13 = (1) * (a)^13
- = (a) * (a)^12
- = (a) * (a^2)^6
- = (a) * (a^4)^3
- = (a * a^4) * (a^4)^2
- = (a * a^4) * (a^8)
- = result_m * result_s
+ = (a) * (a)^12
+ = (a) * (a^2)^6
+ = (a) * (a^4)^3
+ = (a * a^4) * (a^4)^2
+ = (a * a^4) * (a^8)
+ = result_m * result_s
"""
if a == 0 and b < 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
- a = int(a)
- b = int(b)
-
if b == 0:
return 1
elif b < 0:
@@ -370,8 +343,6 @@ class GFpmMeta(FieldClass, DirMeta):
raise ArithmeticError("Cannot compute the discrete logarithm of 0 in a Galois field.")
ORDER = CHARACTERISTIC**DEGREE
- a = int(a)
- b = int(b)
# Naive algorithm
result = 1
diff --git a/galois/_fields/_main.py b/galois/_fields/_main.py
index bf5b187e7..d08808c02 100644
--- a/galois/_fields/_main.py
+++ b/galois/_fields/_main.py
@@ -1171,7 +1171,10 @@ class FieldArray(np.ndarray, metaclass=FieldClass):
cls._verify_scalar_value(array_like)
elif isinstance(array_like, cls):
# This was a previously-created and vetted array -- there's no need to re-verify
- pass
+ if array_like.ndim == 0:
+ # Ensure that in "large" fields with dtype=object that FieldArray objects aren't assigned to the array. The arithmetic
+ # functions are designed to operate on Python ints.
+ array_like = int(array_like)
elif isinstance(array_like, str):
array_like = cls._convert_to_element(array_like)
cls._verify_scalar_value(array_like)
@@ -2731,13 +2734,6 @@ class GF2Meta(FieldClass, DirMeta):
###############################################################################
# Arithmetic functions using explicit calculation
- #
- # NOTE: The ufunc inputs a and b are cast to integers at the beginning of each
- # ufunc to prevent the non-JIT-compiled invocations (used in "large"
- # fields with dtype=object) from performing infintely recursive
- # arithmetic. Instead, the intended arithmetic inside the ufuncs is
- # integer arithmetic.
- # See https://github.com/mhostetter/galois/issues/253.
###############################################################################
@staticmethod
@@ -2745,9 +2741,6 @@ class GF2Meta(FieldClass, DirMeta):
"""
Not actually used. `np.bitwise_xor()` is faster.
"""
- a = int(a)
- b = int(b)
-
return a ^ b
@staticmethod
@@ -2755,8 +2748,6 @@ class GF2Meta(FieldClass, DirMeta):
"""
Not actually used. `np.positive()` is faster.
"""
- a = int(a)
-
return a
@staticmethod
@@ -2764,9 +2755,6 @@ class GF2Meta(FieldClass, DirMeta):
"""
Not actually used. `np.bitwise_xor()` is faster.
"""
- a = int(a)
- b = int(b)
-
return a ^ b
@staticmethod
@@ -2774,9 +2762,6 @@ class GF2Meta(FieldClass, DirMeta):
"""
Not actually used. `np.bitwise_and()` is faster.
"""
- a = int(a)
- b = int(b)
-
return a & b
@staticmethod
@@ -2791,9 +2776,6 @@ class GF2Meta(FieldClass, DirMeta):
if b == 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
- a = int(a)
- b = int(b)
-
return a & b
@staticmethod
@@ -2802,9 +2784,6 @@ class GF2Meta(FieldClass, DirMeta):
if a == 0 and b < 0:
raise ZeroDivisionError("Cannot compute the multiplicative inverse of 0 in a Galois field.")
- a = int(a)
- b = int(b)
-
if b == 0:
return 1
|
mhostetter/galois
|
1d30fb95c6881b8c216afeb4efe2446ffcd3089c
|
diff --git a/tests/fields/test_assignment.py b/tests/fields/test_assignment.py
index aee3d3ad0..5124eb208 100644
--- a/tests/fields/test_assignment.py
+++ b/tests/fields/test_assignment.py
@@ -22,6 +22,14 @@ class TestConstantIndex:
with pytest.raises(ValueError):
a[0] = field.order
+ def test_always_int_object(self):
+ # Ensure when assigning FieldArray elements to an array they are converted to ints
+ GF = galois.GF(2**100)
+ a = GF.Random(10)
+ assert np.all(is_int(a))
+ a[0] = GF(10)
+ assert np.all(is_int(a))
+
class TestSliceIndex:
def test_constant_valid(self, field):
@@ -70,6 +78,14 @@ class TestSliceIndex:
with pytest.raises(ValueError):
a[0:2] = np.array([field.order, 1])
+ def test_always_int_object(self):
+ # Ensure when assigning FieldArray elements to an array they are converted to ints
+ GF = galois.GF(2**100)
+ a = GF.Random(10)
+ assert np.all(is_int(a))
+ a[0:3] = [GF(10), GF(20), GF(30)]
+ assert np.all(is_int(a))
+
class Test2DSliceIndex:
def test_list_valid(self, field):
@@ -103,3 +119,14 @@ class Test2DSliceIndex:
a = field.Random((10,10))
with pytest.raises(ValueError):
a[0:2, 0:2] = np.array([[field.order, 1], [1, 1]])
+
+ def test_always_int_object(self):
+ # Ensure when assigning FieldArray elements to an array they are converted to ints
+ GF = galois.GF(2**100)
+ a = GF.Random((10,10))
+ assert np.all(is_int(a))
+ a[0:2, 0:2] = [[GF(10), GF(20)], [GF(30), GF(40)]]
+ assert np.all(is_int(a))
+
+
+is_int = np.vectorize(lambda element: isinstance(element, int))
|
Better item assignment for "large" fields
I may be able to remove the requirement to cast elements to ints before arithmetic. This is required because "large" fields, which use `dtype=object` and Python ints, can get `FieldArray` objects in their arrays through assignment. For example, `x[0] = GF(10)` would assign a 0-D `FieldArray` scalar to index 0, *not* the integer 10.
With a smarter implementation of `__setitem__()`, I may be able to move the integer casting logic there and improve arithmetic performance.
See #253.
```python
@staticmethod
@numba.extending.register_jitable
def _multiply_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):
"""
a in GF(2^m), can be represented as a degree m-1 polynomial a(x) in GF(2)[x]
b in GF(2^m), can be represented as a degree m-1 polynomial b(x) in GF(2)[x]
p(x) in GF(2)[x] with degree m is the irreducible polynomial of GF(2^m)
a * b = c
= (a(x) * b(x)) % p(x) in GF(2)
= c(x)
= c
"""
ORDER = CHARACTERISTIC**DEGREE
a = int(a) # Remove this!
b = int(b) # Remove this!
# Re-order operands such that a > b so the while loop has less loops
if b > a:
a, b = b, a
c = 0
while b > 0:
if b & 0b1:
c ^= a # Add a(x) to c(x)
b >>= 1 # Divide b(x) by x
a <<= 1 # Multiply a(x) by x
if a >= ORDER:
a ^= IRREDUCIBLE_POLY # Compute a(x) % p(x)
return c
```
|
0.0
|
1d30fb95c6881b8c216afeb4efe2446ffcd3089c
|
[
"tests/fields/test_assignment.py::TestConstantIndex::test_always_int_object"
] |
[
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(2)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(2)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(2)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(2^2)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(2^2)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(2^2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(2^2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(2^2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(2^2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(2^2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(2^2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(2^2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(2^2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(2^2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(2^2)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(2^2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(2^2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(2^2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(2^2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(2^2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(2^2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(2^2)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(2^2)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(2^3)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(2^3)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(2^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(2^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(2^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(2^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(2^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(2^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(2^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(2^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(2^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(2^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(2^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(2^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(2^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(2^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(2^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(2^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(2^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(2^3)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(2^8)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(2^8)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(2^8)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(2^8)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(2^8)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(2^8)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(2^8)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(2^8)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(2^8)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(2^8)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(2^8)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(2^8)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(2^8)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(2^8)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(2^8)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(2^8)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(2^8)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(2^8)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(2^8)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(2^8)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(2^32)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(2^32)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(2^32)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(2^32)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(2^32)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(2^32)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(2^32)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(2^32)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(2^32)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(2^32)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(2^32)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(2^32)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(2^32)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(2^32)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(2^32)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(2^32)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(2^32)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(2^32)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(2^32)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(2^32)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(2^100)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(2^100)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(2^100)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(2^100)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(2^100)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(2^100)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(2^100)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(2^100)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(2^100)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(2^100)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(2^100)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(2^100)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(2^100)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(2^100)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(2^100)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(2^100)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(2^100)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(2^100)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(2^100)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(2^100)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(5)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(5)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(5)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(5)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(5)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(5)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(5)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(5)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(5)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(5)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(5)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(5)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(5)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(5)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(5)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(5)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(5)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(5)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(5)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(5)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(7)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(7)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(7)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(7)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(7)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(7)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(7)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(7)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(7)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(7)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(7)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(7)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(7)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(7)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(7)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(7)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(7)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(7)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(7)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(7)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(31)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(31)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(31)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(31)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(31)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(31)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(31)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(31)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(31)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(31)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(31)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(31)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(31)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(31)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(31)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(31)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(31)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(31)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(31)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(31)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(3191)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(3191)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(3191)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(3191)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(3191)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(3191)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(3191)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(3191)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(3191)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(3191)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(3191)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(3191)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(3191)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(3191)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(3191)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(3191)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(3191)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(3191)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(3191)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(3191)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(2147483647)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(2147483647)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(2147483647)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(2147483647)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(2147483647)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(2147483647)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(2147483647)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(2147483647)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(2147483647)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(2147483647)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(2147483647)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(2147483647)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(2147483647)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(2147483647)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(2147483647)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(2147483647)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(2147483647)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(2147483647)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(2147483647)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(2147483647)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(36893488147419103183)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(7^3)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(7^3)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(7^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(7^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(7^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(7^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(7^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(7^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(7^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(7^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(7^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(7^3)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(7^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(7^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(7^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(7^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(7^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(7^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(7^3)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(7^3)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_valid[GF(109987^4)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_invalid_type[GF(109987^4)]",
"tests/fields/test_assignment.py::TestConstantIndex::test_out_of_range[GF(109987^4)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_valid[GF(109987^4)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_invalid_type[GF(109987^4)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_constant_out_of_range[GF(109987^4)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_valid[GF(109987^4)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_invalid_type[GF(109987^4)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_list_out_of_range[GF(109987^4)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid[GF(109987^4)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_valid_small_dtype[GF(109987^4)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_invalid_type[GF(109987^4)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_array_out_of_range[GF(109987^4)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_valid[GF(109987^4)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_invalid_type[GF(109987^4)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_list_out_of_range[GF(109987^4)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid[GF(109987^4)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_valid_small_dtype[GF(109987^4)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_invalid_type[GF(109987^4)]",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_array_out_of_range[GF(109987^4)]",
"tests/fields/test_assignment.py::TestSliceIndex::test_always_int_object",
"tests/fields/test_assignment.py::Test2DSliceIndex::test_always_int_object"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-03-28 15:20:20+00:00
|
mit
| 3,888 |
|
mhostetter__galois-324
|
diff --git a/galois/_prime.py b/galois/_prime.py
index b713298ec..490cfb652 100644
--- a/galois/_prime.py
+++ b/galois/_prime.py
@@ -791,10 +791,15 @@ def factors(n: int) -> Tuple[List[int], List[int]]:
# Step 4
while n > 1 and not is_prime(n):
- f = pollard_rho(n) # A non-trivial factor
- while f is None:
- # Try again with a different random function f(x)
- f = pollard_rho(n, c=random.randint(2, n // 2))
+ while True:
+ c = 1
+ try:
+ f = pollard_rho(n, c=c) # A non-trivial factor
+ break # Found a factor
+ except RuntimeError:
+ # Could not find one -- keep searching
+ c = random.randint(2, n // 2)
+
if is_prime(f):
degree = 0
while n % f == 0:
@@ -975,7 +980,7 @@ def trial_division(n: int, B: Optional[int] = None) -> Tuple[List[int], List[int
@set_module("galois")
-def pollard_p1(n: int, B: int, B2: Optional[int] = None) -> Optional[int]:
+def pollard_p1(n: int, B: int, B2: Optional[int] = None) -> int:
r"""
Attempts to find a non-trivial factor of :math:`n` if it has a prime factor :math:`p` such that
:math:`p-1` is :math:`B`-smooth.
@@ -995,6 +1000,11 @@ def pollard_p1(n: int, B: int, B2: Optional[int] = None) -> Optional[int]:
:
A non-trivial factor of :math:`n`, if found. `None` if not found.
+ Raises
+ ------
+ RuntimeError
+ If a non-trivial factor cannot be found.
+
Notes
-----
For a given odd composite :math:`n` with a prime factor :math:`p`, Pollard's :math:`p-1` algorithm can discover a non-trivial factor
@@ -1022,6 +1032,7 @@ def pollard_p1(n: int, B: int, B2: Optional[int] = None) -> Optional[int]:
Searching with :math:`B=15` will not recover a prime factor.
.. ipython:: python
+ :okexcept:
galois.pollard_p1(p*q, 15)
@@ -1076,7 +1087,7 @@ def pollard_p1(n: int, B: int, B2: Optional[int] = None) -> Optional[int]:
if d not in [1, n]:
return d
if d == n:
- return None
+ raise RuntimeError(f"A non-trivial factor of {n} could not be found using the Pollard p-1 algorithm with smoothness bound {B} and secondary bound {B2}.")
# Try to find p such that p - 1 has a single prime factor larger than B
if B2 is not None:
@@ -1094,11 +1105,11 @@ def pollard_p1(n: int, B: int, B2: Optional[int] = None) -> Optional[int]:
if d not in [1, n]:
return d
- return None
+ raise RuntimeError(f"A non-trivial factor of {n} could not be found using the Pollard p-1 algorithm with smoothness bound {B} and secondary bound {B2}.")
# @functools.lru_cache(maxsize=1024)
-def pollard_rho(n: int, c: int = 1) -> Optional[int]:
+def pollard_rho(n: int, c: int = 1) -> int:
r"""
Attempts to find a non-trivial factor of :math:`n` using cycle detection.
@@ -1115,6 +1126,11 @@ def pollard_rho(n: int, c: int = 1) -> Optional[int]:
:
A non-trivial factor :math:`m` of :math:`n`, if found. `None` if not found.
+ Raises
+ ------
+ RuntimeError
+ If a non-trivial factor cannot be found.
+
Notes
-----
Pollard's :math:`\rho` algorithm seeks to find a non-trivial factor of :math:`n` by finding a cycle in a sequence
@@ -1162,7 +1178,7 @@ def pollard_rho(n: int, c: int = 1) -> Optional[int]:
d = math.gcd(a - b, n)
if d == n:
- return None
+ raise RuntimeError(f"A non-trivial factor of {n} could not be found using the Pollard Rho algorithm with f(x) = x^2 + {c}.")
return d
|
mhostetter/galois
|
9a3849841a592bc100999e78fd51b89baea4d3e8
|
diff --git a/tests/test_factor.py b/tests/test_factor.py
index 9ffc6cb96..cf1d28f22 100644
--- a/tests/test_factor.py
+++ b/tests/test_factor.py
@@ -174,19 +174,23 @@ def test_trial_division():
def test_pollard_p1():
p = 1458757 # p - 1 factors: [2, 3, 13, 1039], [2, 3, 1, 1]
q = 1326001 # q - 1 factors: [2, 3, 5, 13, 17], [4, 1, 3, 1, 1]
- assert galois.pollard_p1(p*q, 15) is None
+ with pytest.raises(RuntimeError):
+ assert galois.pollard_p1(p*q, 15)
assert galois.pollard_p1(p*q, 19) == q
assert galois.pollard_p1(p*q, 15, B2=100) == q
p = 1598442007 # p - 1 factors: [2, 3, 7, 38058143], [1, 1, 1, 1]
q = 1316659213 # q - 1 factors: [2, 3, 11, 83, 4451], [2, 4, 1, 1, 1]
- assert galois.pollard_p1(p*q, 31) is None
+ with pytest.raises(RuntimeError):
+ assert galois.pollard_p1(p*q, 31)
assert galois.pollard_p1(p*q, 31, B2=5000) == q
p = 1636344139 # p - 1 factors: [2, 3, 11, 13, 1381], [1, 1, 1, 1, 2]
q = 1476638609 # q - 1 factors: [2, 137, 673649], [4, 1, 1]
- assert galois.pollard_p1(p*q, 100) is None
- assert galois.pollard_p1(p*q, 100, B2=10_000) is None
+ with pytest.raises(RuntimeError):
+ assert galois.pollard_p1(p*q, 100)
+ with pytest.raises(RuntimeError):
+ assert galois.pollard_p1(p*q, 100, B2=10_000)
n = 2133861346249 # n factors: [37, 41, 5471, 257107], [1, 1, 1, 1]
assert galois.pollard_p1(n, 10) == 1517
|
Replace optional return values with raising `ValueError`s
There's no real reason to return `None` upon a function failing to find an answer. Instead, a `ValueError` may be raised. This way the type hints are more consistent.
Currently, a user needs to `if ret is not None:`. With raising an error, the user would need to `try`/`except`. Essentially it's the same thing.
|
0.0
|
9a3849841a592bc100999e78fd51b89baea4d3e8
|
[
"tests/test_factor.py::test_pollard_p1"
] |
[
"tests/test_factor.py::test_factors_small",
"tests/test_factor.py::test_factors_medium",
"tests/test_factor.py::test_factors_large",
"tests/test_factor.py::test_factors_extremely_large",
"tests/test_factor.py::test_perfect_power",
"tests/test_factor.py::test_trial_division",
"tests/test_factor.py::test_pollard_rho",
"tests/test_factor.py::test_divisors[0]",
"tests/test_factor.py::test_divisors[1]",
"tests/test_factor.py::test_divisors[2]",
"tests/test_factor.py::test_divisors[3]",
"tests/test_factor.py::test_divisors[4]",
"tests/test_factor.py::test_divisors[5]",
"tests/test_factor.py::test_divisors[31]",
"tests/test_factor.py::test_divisors[91]",
"tests/test_factor.py::test_divisors[120]",
"tests/test_factor.py::test_divisors[840]",
"tests/test_factor.py::test_divisors_random",
"tests/test_factor.py::test_divisors_number",
"tests/test_factor.py::test_divisor_sigma"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-03-28 17:00:01+00:00
|
mit
| 3,889 |
|
mhostetter__galois-361
|
diff --git a/galois/_polys/_poly.py b/galois/_polys/_poly.py
index 182703a7d..3d7c9bf76 100644
--- a/galois/_polys/_poly.py
+++ b/galois/_polys/_poly.py
@@ -1513,7 +1513,7 @@ class Poly:
if self._nonzero_degrees.size == 0:
self._degree = 0
else:
- self._degree = max(self._nonzero_degrees)
+ self._degree = int(max(self._nonzero_degrees))
elif hasattr(self, "_integer"):
if self._integer == 0:
self._degree = 0
|
mhostetter/galois
|
c5d930dad1cddad7d708dbe39b8ebd7e245717af
|
diff --git a/tests/polys/test_irreducible_polys.py b/tests/polys/test_irreducible_polys.py
index f3aefef69..de10d0934 100644
--- a/tests/polys/test_irreducible_polys.py
+++ b/tests/polys/test_irreducible_polys.py
@@ -111,3 +111,11 @@ def test_irreducible_polys_exceptions():
def test_irreducible_polys(order, degree):
LUT = eval(f"IRREDUCIBLE_POLYS_{order}_{degree}")
assert [f.coeffs.tolist() for f in galois.irreducible_polys(order, degree)] == LUT
+
+
+def test_large_degree():
+ """
+ See https://github.com/mhostetter/galois/issues/360.
+ """
+ f = galois.Poly.Degrees([233, 74, 0])
+ assert galois.is_irreducible(f)
|
galois.is_irreducible(poly) seems to return some false negatives in large odd exponent GFs
Hi Matt, hope all is well.
Even though it works fine for other groups (e.g. even groups likes GF(2^256) work well), still I 've been getting the following false negative result when using the galois.is_irreducible(poly) over odd groups tο produce irreducible polynomials over GF(2^233).
For example, we know from previous publications and research that x^233+x^74+1 is irreducible over GF(2) for creating the quotient ring of GF(2^233) but the following code returns Null for every polynomial (including the aforementioned one):
```
def loop_init_trinomial():
#initial detection loop
for k in range(1, 233):
poly = galois.Poly.Degrees([233, k, 0])
if galois.is_irreducible(poly):
print("Found one: ", k, poly, poly.string)
```
Care to share some thoughts?
|
0.0
|
c5d930dad1cddad7d708dbe39b8ebd7e245717af
|
[
"tests/polys/test_irreducible_polys.py::test_large_degree"
] |
[
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_exceptions",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[2-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[2-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[2-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[2-4]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[2-5]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[2-6]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[2-7]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[2-8]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[4-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[4-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[4-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[3-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[3-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[3-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[3-4]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[3-5]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[3-6]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[9-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[9-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[9-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[5-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[5-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[5-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[5-4]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[25-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_min[25-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[2-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[2-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[2-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[2-4]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[2-5]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[2-6]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[2-7]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[2-8]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[4-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[4-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[4-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[3-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[3-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[3-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[3-4]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[3-5]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[3-6]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[9-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[9-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[9-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[5-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[5-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[5-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[5-4]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[25-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_max[25-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[2-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[2-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[2-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[2-4]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[2-5]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[2-6]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[2-7]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[2-8]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[4-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[4-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[4-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[3-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[3-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[3-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[3-4]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[3-5]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[3-6]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[9-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[9-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[9-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[5-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[5-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[5-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[5-4]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[25-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_poly_random[25-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys_exceptions",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[2-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[2-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[2-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[2-4]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[2-5]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[2-6]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[2-7]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[2-8]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[4-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[4-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[4-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[3-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[3-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[3-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[3-4]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[3-5]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[3-6]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[9-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[9-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[9-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[5-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[5-2]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[5-3]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[5-4]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[25-1]",
"tests/polys/test_irreducible_polys.py::test_irreducible_polys[25-2]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-05-16 14:39:31+00:00
|
mit
| 3,890 |
|
mhostetter__galois-393
|
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 73e0f1216..4fa65a650 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,5 +1,5 @@
sphinx==5
-git+https://github.com/jbms/sphinx-immaterial@5e70669e8f42bdd1dfa87c2a5f1879b7ba73d25a
+git+https://github.com/jbms/sphinx-immaterial@bf169751fba59362bcd06da96bfa65b2728cbbae
myst-parser
sphinx-design
sphinxcontrib-details-directive
diff --git a/galois/_fields/_array.py b/galois/_fields/_array.py
index d8fb46cee..7dda7c9ae 100644
--- a/galois/_fields/_array.py
+++ b/galois/_fields/_array.py
@@ -47,6 +47,13 @@ class FieldArrayMeta(ArrayMeta):
# Construct the irreducible polynomial from its integer representation
cls._irreducible_poly = Poly.Int(cls._irreducible_poly_int, field=cls._prime_subfield)
+ def __repr__(cls) -> str:
+ # When FieldArray instances are created they are added to the `galois._fields._factory` module with a name
+ # like `FieldArray_<p>_<primitive_element>` or `FieldArray_<p>_<m>_<primitive_element>_<irreducible_poly>`.
+ # This is visually unappealing. So here we override the repr() to be more succinct and indicate how the class
+ # was created. So galois._fields._factory.FieldArray_31_3 is converted to galois.GF(31).
+ return f"<class 'galois.{cls.name}'>"
+
###############################################################################
# Class properties
###############################################################################
diff --git a/galois/_fields/_factory.py b/galois/_fields/_factory.py
index ef4dc113e..18f7579a3 100644
--- a/galois/_fields/_factory.py
+++ b/galois/_fields/_factory.py
@@ -3,6 +3,7 @@ A module to implement the Galois field class factory `GF()`.
"""
from __future__ import annotations
+import sys
import types
from typing import Union, Optional, Type
from typing_extensions import Literal
@@ -251,8 +252,6 @@ def _GF_prime(
"""
Class factory for prime fields GF(p).
"""
- name = f"GF({p})"
-
# Get default primitive element
if alpha is None:
alpha = primitive_root(p)
@@ -262,6 +261,7 @@ def _GF_prime(
raise ValueError(f"Argument `primitive_element` must be non-zero in the field 0 < x < {p}, not {alpha}.")
# If the requested field has already been constructed, return it
+ name = f"FieldArray_{p}_{alpha}"
key = (p, alpha)
if key in _GF_prime._classes:
field = _GF_prime._classes[key]
@@ -287,8 +287,9 @@ def _GF_prime(
"primitive_element": alpha,
})
- # Add the class to the "galois" namespace
- field.__module__ = "galois"
+ # Add the class to this module's namespace
+ field.__module__ = __name__
+ setattr(sys.modules[__name__], name, field)
# Since this is a new class, compile the ufuncs and set the display mode
field.compile("auto" if compile is None else compile)
@@ -317,7 +318,6 @@ def _GF_extension(
Class factory for extension fields GF(p^m).
"""
# pylint: disable=too-many-statements
- name = f"GF({p}^{m})"
prime_subfield = _GF_prime(p)
is_primitive_poly = None
verify_poly = verify
@@ -352,6 +352,7 @@ def _GF_extension(
raise ValueError(f"Argument `primitive_element` must have degree strictly less than {m}, not {alpha.degree}.")
# If the requested field has already been constructed, return it
+ name = f"FieldArray_{p}_{m}_{int(alpha)}_{int(irreducible_poly_)}"
key = (p, m, int(alpha), int(irreducible_poly_))
if key in _GF_extension._classes:
field = _GF_extension._classes[key]
@@ -379,8 +380,9 @@ def _GF_extension(
"prime_subfield": prime_subfield,
})
- # Add the class to the "galois" namespace
- field.__module__ = "galois"
+ # Add the class to this module's namespace
+ field.__module__ = __name__
+ setattr(sys.modules[__name__], name, field)
# Since this is a new class, compile the ufuncs and set the display mode
field.compile("auto" if compile is None else compile)
|
mhostetter/galois
|
d94c3b61dfb31127990d5811077e33861bc4953a
|
diff --git a/tests/fields/test_classes.py b/tests/fields/test_classes.py
index 740bb5ba3..f62ba0a0d 100644
--- a/tests/fields/test_classes.py
+++ b/tests/fields/test_classes.py
@@ -1,6 +1,8 @@
"""
A pytest module to test the class attributes of FieldArray subclasses.
"""
+import pickle
+
import pytest
import numpy as np
@@ -97,3 +99,37 @@ def test_is_primitive_poly():
poly = galois.conway_poly(3, 101)
GF = galois.GF(3**101, irreducible_poly=poly, primitive_element="x", verify=False)
assert GF.is_primitive_poly == True
+
+
+def test_pickle_class(tmp_path):
+ GF = galois.GF(13)
+ with open(tmp_path / "class.pkl", "wb") as f:
+ pickle.dump(GF, f)
+ with open(tmp_path / "class.pkl", "rb") as f:
+ GF_loaded = pickle.load(f)
+ assert GF is GF_loaded
+
+ GF = galois.GF(3**5)
+ with open(tmp_path / "class.pkl", "wb") as f:
+ pickle.dump(GF, f)
+ with open(tmp_path / "class.pkl", "rb") as f:
+ GF_loaded = pickle.load(f)
+ assert GF is GF_loaded
+
+
+def test_pickle_array(tmp_path):
+ GF = galois.GF(13)
+ x = GF.Random(10)
+ with open(tmp_path / "array.pkl", "wb") as f:
+ pickle.dump(x, f)
+ with open(tmp_path / "array.pkl", "rb") as f:
+ x_loaded = pickle.load(f)
+ assert np.array_equal(x, x_loaded)
+
+ GF = galois.GF(3**5)
+ x = GF.Random(10)
+ with open(tmp_path / "array.pkl", "wb") as f:
+ pickle.dump(x, f)
+ with open(tmp_path / "array.pkl", "rb") as f:
+ x_loaded = pickle.load(f)
+ assert np.array_equal(x, x_loaded)
|
Allow serialization of Galois field
I recently tried playing around with multithreading and noticed, that I can not pass the Galois field object to a thread, since the Galois class can not be pickled. An example error would be
PicklingError: Can't pickle <class 'galois.GF(47)'>: attribute lookup GF(47) on galois failed
|
0.0
|
d94c3b61dfb31127990d5811077e33861bc4953a
|
[
"tests/fields/test_classes.py::test_pickle_class",
"tests/fields/test_classes.py::test_pickle_array"
] |
[
"tests/fields/test_classes.py::test_repr_str",
"tests/fields/test_classes.py::test_properties",
"tests/fields/test_classes.py::test_dtypes[GF(2)]",
"tests/fields/test_classes.py::test_dtypes[GF(2^2)]",
"tests/fields/test_classes.py::test_dtypes[GF(2^3)]",
"tests/fields/test_classes.py::test_dtypes[GF(2^8)]",
"tests/fields/test_classes.py::test_dtypes[GF(2^32)]",
"tests/fields/test_classes.py::test_dtypes[GF(2^100)]",
"tests/fields/test_classes.py::test_dtypes[GF(5)]",
"tests/fields/test_classes.py::test_dtypes[GF(7)]",
"tests/fields/test_classes.py::test_dtypes[GF(31)]",
"tests/fields/test_classes.py::test_dtypes[GF(3191)]",
"tests/fields/test_classes.py::test_dtypes[GF(2147483647)]",
"tests/fields/test_classes.py::test_dtypes[GF(36893488147419103183)]",
"tests/fields/test_classes.py::test_dtypes[GF(7^3)]",
"tests/fields/test_classes.py::test_dtypes[GF(109987^4)]",
"tests/fields/test_classes.py::test_cant_set_attribute[name]",
"tests/fields/test_classes.py::test_cant_set_attribute[characteristic]",
"tests/fields/test_classes.py::test_cant_set_attribute[degree]",
"tests/fields/test_classes.py::test_cant_set_attribute[order]",
"tests/fields/test_classes.py::test_cant_set_attribute[irreducible_poly]",
"tests/fields/test_classes.py::test_cant_set_attribute[is_primitive_poly]",
"tests/fields/test_classes.py::test_cant_set_attribute[primitive_element]",
"tests/fields/test_classes.py::test_cant_set_attribute[primitive_elements]",
"tests/fields/test_classes.py::test_cant_set_attribute[is_prime_field]",
"tests/fields/test_classes.py::test_cant_set_attribute[is_extension_field]",
"tests/fields/test_classes.py::test_cant_set_attribute[prime_subfield]",
"tests/fields/test_classes.py::test_cant_set_attribute[dtypes]",
"tests/fields/test_classes.py::test_cant_set_attribute[display_mode]",
"tests/fields/test_classes.py::test_cant_set_attribute[ufunc_mode]",
"tests/fields/test_classes.py::test_cant_set_attribute[ufunc_modes]",
"tests/fields/test_classes.py::test_is_primitive_poly"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-26 19:52:01+00:00
|
mit
| 3,891 |
|
michaelbukachi__flask-vuesfc-4
|
diff --git a/README.rst b/README.rst
index 9f1ab59..5cd44a6 100644
--- a/README.rst
+++ b/README.rst
@@ -122,6 +122,13 @@ escape it. So be sure to always use the ``safe`` filter when rendering the compo
Feel free to checkout the examples folder for other examples.
+Escaping
+--------
+Occasionally, you might need a variable to be parsed by ``jinja`` instead of ``vue``. To achieve this, use triple
+parenthesis i.e ``{{{ variable }}}``.
+
+Checkout **examples/app_with_escaping.py**.
+
--------------
Configuration
--------------
diff --git a/example/app_with_escaping.py b/example/app_with_escaping.py
new file mode 100644
index 0000000..9be1b30
--- /dev/null
+++ b/example/app_with_escaping.py
@@ -0,0 +1,27 @@
+from flask import Flask, render_template
+
+from flask_vue_sfc import VueSFC
+from flask_vue_sfc.helpers import render_vue_component
+
+
+class Config:
+ SECRET_KEY = 'some-very-long-secret'
+
+
+def create_app():
+ app = Flask(__name__)
+ app.config.from_object(Config)
+
+ VueSFC(app)
+
+ @app.route('/')
+ def example1():
+ component = render_vue_component('index4.vue', message='This is rendered by flask')
+ return render_template('example.html', component=component)
+
+ return app
+
+
+if __name__ == '__main__':
+ application = create_app()
+ application.run(debug=True)
diff --git a/example/templates/index4.vue b/example/templates/index4.vue
new file mode 100644
index 0000000..5702999
--- /dev/null
+++ b/example/templates/index4.vue
@@ -0,0 +1,15 @@
+<template>
+ <div>{{ message }}</div>
+ <div>{{{ message }}}</div>
+</template>
+<script>
+
+export default {
+ name: 'App',
+ data() {
+ return {
+ message: 'This is rendered by Vue',
+ }
+ }
+}
+</script>
\ No newline at end of file
diff --git a/flask_vue_sfc/helpers.py b/flask_vue_sfc/helpers.py
index b7b6bb0..ecc10de 100644
--- a/flask_vue_sfc/helpers.py
+++ b/flask_vue_sfc/helpers.py
@@ -10,10 +10,11 @@ def _create_random_id():
return 'vue-sfc-' + secrets.token_hex(6)
-def _load_template(template_name):
+def _load_template(template_name, **context):
ctx = _app_ctx_stack.top
+ ctx.app.update_template_context(context)
t = ctx.app.jinja_env.get_or_select_template(template_name)
- vue = t.render()
+ vue = t.render(context)
parsed = ctx.g.v8.call('VueTemplateCompiler.parseComponent', vue)
component = {
@@ -25,7 +26,7 @@ def _load_template(template_name):
return component
-def _render_component(template_name):
+def _render_component(template_name, **context):
ctx = _app_ctx_stack.top
if 'sfc_cache' in ctx.g:
@@ -33,7 +34,7 @@ def _render_component(template_name):
if sfc:
return sfc
- src = _load_template(template_name)
+ src = _load_template(template_name, **context)
component = VueComponent(src, _create_random_id, _load_template)
sfc = component.render(ctx.g.v8)
sfc = str(sfc)
@@ -46,11 +47,12 @@ def _render_component(template_name):
def render_vue_component(template_name, **context):
is_page = context.get('is_page', False)
- component = _render_component(template_name)
+ component = _render_component(template_name, **context)
if is_page:
return render_template('page.html', component=component)
return component
-def render_vue_page(template_name):
- return render_vue_component(template_name, is_page=True)
+def render_vue_page(template_name, **context):
+ context['is_page'] = True
+ return render_vue_component(template_name, **context)
diff --git a/flask_vue_sfc/utils.py b/flask_vue_sfc/utils.py
index 81871d9..f4c6bbc 100644
--- a/flask_vue_sfc/utils.py
+++ b/flask_vue_sfc/utils.py
@@ -17,7 +17,9 @@ class VueLoader(FileSystemLoader):
if template and template.lower().endswith('.vue'):
# We don't want jinja to touch {{ }}
contents, filename, uptodate = super(VueLoader, self).get_source(environment, template)
- contents = '{% raw %}\n' + contents.replace('</template>', '</template>\n{% endraw %}')
+ contents = _change_delimiters(contents)
+ # contents = '{% raw %}\n' + contents.replace('</template>', '</template>\n{% endraw %}')
+ # print(contents)
return contents, filename, uptodate
return super(VueLoader, self).get_source(environment, template)
@@ -234,10 +236,8 @@ class HtmlTemplate:
)
html = html_minify(html)
# Handler delimiters replacement to prevent conflicts with jinja
- if '{{' in html:
- html = html.replace('{{', '[[')
- html = html.replace('}}', ']]')
return html
+ # return _change_delimiters(html)
class ChildHtmlTemplate(HtmlTemplate):
@@ -255,10 +255,8 @@ class ChildHtmlTemplate(HtmlTemplate):
)
html = html_minify(html)
# Handler delimiters replacement to prevent conflicts with jinja
- if '{{' in html:
- html = html.replace('{{', '[[')
- html = html.replace('}}', ']]')
return html
+ # return _change_delimiters(html)
class CssStyling:
@@ -330,3 +328,17 @@ class VueChildComponent(ChildVueScript, ChildHtmlTemplate, CssStyling):
def _get_file_contents(path):
with open(path, 'r') as fp:
return fp.read()
+
+
+def _change_delimiters(html):
+ if '{{' in html:
+ html = html.replace('{{{', 'op_par')
+ html = html.replace('}}}', 'cl_par')
+
+ html = html.replace('{{', '[[')
+ html = html.replace('}}', ']]')
+
+ html = html.replace('op_par', '{{')
+ html = html.replace('cl_par', '}}')
+
+ return html
|
michaelbukachi/flask-vuesfc
|
6871322cb72f44bc45c8b88a358935289139e7f7
|
diff --git a/tests/test_utils.py b/tests/test_utils.py
index c4ba0d6..7044407 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -1,4 +1,4 @@
-from flask_vue_sfc.utils import VueComponent, VueChildComponent, SFC
+from flask_vue_sfc.utils import VueComponent, VueChildComponent, SFC, _change_delimiters
def test_render_html():
@@ -185,3 +185,9 @@ def test_render_sfc__with_child():
"</style>\n"
)
assert str(sfc) == expected
+
+
+def test_change_delimiters__escape_syntax():
+ expected = '{{ test }}'
+ html = _change_delimiters('{{{ test }}}')
+ assert expected == html
|
Add support for escaping variable placeholders
Currently, the extension automatically converts `{{ }}` to `[[ ]]` so that it doesn't get parsed by flask's template engine. This is convenient most of the time, however, there are times when we the placeholder to parsed by flask instead of Vue, This issue proposes the introduction of triple parenthesis `{{{ }}}` for such cases. When the converter meets triple parenthesis it converts them to double parenthesis where it will be parsed by the flask parser.
|
0.0
|
6871322cb72f44bc45c8b88a358935289139e7f7
|
[
"tests/test_utils.py::test_render_html",
"tests/test_utils.py::test_render_child_html",
"tests/test_utils.py::test_render_css",
"tests/test_utils.py::test_render_sfc__no_child",
"tests/test_utils.py::test_render_sfc__with_child",
"tests/test_utils.py::test_change_delimiters__escape_syntax"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-01-11 12:37:41+00:00
|
mit
| 3,892 |
|
microsoft__debugpy-1147
|
diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_save_locals.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_save_locals.py
index fa1a1252..c6bc3754 100644
--- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_save_locals.py
+++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_save_locals.py
@@ -70,6 +70,8 @@ def make_save_locals_impl():
save_locals_impl = make_save_locals_impl()
+_SENTINEL = [] # Any mutable will do.
+
def update_globals_and_locals(updated_globals, initial_globals, frame):
# We don't have the locals and passed all in globals, so, we have to
@@ -83,8 +85,11 @@ def update_globals_and_locals(updated_globals, initial_globals, frame):
# one that enabled creating and using variables during the same evaluation.
assert updated_globals is not None
f_locals = None
+
+ removed = set(initial_globals).difference(updated_globals)
+
for key, val in updated_globals.items():
- if initial_globals.get(key) is not val:
+ if val is not initial_globals.get(key, _SENTINEL):
if f_locals is None:
# Note: we call f_locals only once because each time
# we call it the values may be reset.
@@ -92,5 +97,17 @@ def update_globals_and_locals(updated_globals, initial_globals, frame):
f_locals[key] = val
+ if removed:
+ if f_locals is None:
+ # Note: we call f_locals only once because each time
+ # we call it the values may be reset.
+ f_locals = frame.f_locals
+
+ for key in removed:
+ try:
+ del f_locals[key]
+ except KeyError:
+ pass
+
if f_locals is not None:
save_locals(frame)
diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py
index 2634714a..cdbe3085 100644
--- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py
+++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py
@@ -523,6 +523,10 @@ def evaluate_expression(py_db, frame, expression, is_exec):
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals)
+ if 'globals' not in updated_globals:
+ # If the user explicitly uses 'globals()' then we provide the
+ # frame globals (unless he has shadowed it already).
+ updated_globals['globals'] = lambda: frame.f_globals
initial_globals = updated_globals.copy()
|
microsoft/debugpy
|
bf5c644bc39a1040d83ea74a40ef87b0d6ae9344
|
diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py b/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py
index adc4ba26..123fb639 100644
--- a/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py
+++ b/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py
@@ -122,6 +122,47 @@ def test_evaluate_expression_4(disable_critical_log):
assert 'email' not in sys._getframe().f_globals
+def test_evaluate_expression_access_globals(disable_critical_log):
+ from _pydevd_bundle.pydevd_vars import evaluate_expression
+
+ def check(frame):
+ eval_txt = '''globals()['global_variable'] = 22'''
+ evaluate_expression(None, frame, eval_txt, is_exec=True)
+ assert 'global_variable' not in frame.f_locals
+ assert 'global_variable' in frame.f_globals
+
+ check(next(iter(obtain_frame())))
+ assert 'global_variable' in sys._getframe().f_globals
+ assert 'global_variable' not in sys._getframe().f_locals
+
+
+def test_evaluate_expression_create_none(disable_critical_log):
+ from _pydevd_bundle.pydevd_vars import evaluate_expression
+
+ def check(frame):
+ eval_txt = 'x = None'
+ evaluate_expression(None, frame, eval_txt, is_exec=True)
+ assert 'x' in frame.f_locals
+ assert 'x' not in frame.f_globals
+
+ check(next(iter(obtain_frame())))
+
+
+def test_evaluate_expression_delete_var(disable_critical_log):
+ from _pydevd_bundle.pydevd_vars import evaluate_expression
+
+ def check(frame):
+ eval_txt = 'x = 22'
+ evaluate_expression(None, frame, eval_txt, is_exec=True)
+ assert 'x' in frame.f_locals
+
+ eval_txt = 'del x'
+ evaluate_expression(None, frame, eval_txt, is_exec=True)
+ assert 'x' not in frame.f_locals
+
+ check(next(iter(obtain_frame())))
+
+
def test_evaluate_expression_5(disable_critical_log):
from _pydevd_bundle.pydevd_vars import evaluate_expression
|
Set a variable from a different frame
Is there a way to copy a local variable (in a function) to the global scope (module), in order to inspect it after debugging ?
In my experience, sending a `setExpression` request with a global variable as expression does not create a global variable:
```
type: "request",
command: "setExpression",
arguments: {
expression: "globals()['global_variable']",
value: "local_variable",
frameId: local_function_frame_id
},
seq: XXX,
```
The above request will create a variable within the frame of the function only. This variable will not be accessible from the frame of the module.
I may have misunderstood how frames and scopes work, I'd be happy to have some clarification if this is the case.
|
0.0
|
bf5c644bc39a1040d83ea74a40ef87b0d6ae9344
|
[
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_access_globals",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_create_none",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_delete_var"
] |
[
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_basic",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_1",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_2",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_3",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_4",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_5",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_async_exec",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_async_exec_as_eval",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_async_exec_error",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_async_eval",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_async_eval_error",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_name_mangling"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-08 14:17:52+00:00
|
mit
| 3,893 |
|
microsoft__debugpy-570
|
diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_safe_repr.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_safe_repr.py
index be1e61e9..56b384a6 100644
--- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_safe_repr.py
+++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_safe_repr.py
@@ -4,7 +4,7 @@
# Gotten from ptvsd for supporting the format expected there.
import sys
-from _pydevd_bundle.pydevd_constants import IS_PY2
+from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY36_OR_GREATER
import locale
from _pydev_bundle import pydev_log
@@ -248,10 +248,15 @@ class SafeRepr(object):
count = self.maxcollection[level]
yield_comma = False
- try:
- sorted_keys = sorted(obj)
- except Exception:
+ if IS_PY36_OR_GREATER:
+ # On Python 3.6 (onwards) dictionaries now keep
+ # insertion order.
sorted_keys = list(obj)
+ else:
+ try:
+ sorted_keys = sorted(obj)
+ except Exception:
+ sorted_keys = list(obj)
for key in sorted_keys:
if yield_comma:
|
microsoft/debugpy
|
2ebc4e71b50a0dfb636bb0649417ee2d31697bcf
|
diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py b/src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py
index c03a07c1..95d4370e 100644
--- a/src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py
+++ b/src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py
@@ -5,7 +5,7 @@ import re
import pytest
from _pydevd_bundle.pydevd_safe_repr import SafeRepr
import json
-from _pydevd_bundle.pydevd_constants import IS_JYTHON, IS_PY2
+from _pydevd_bundle.pydevd_constants import IS_JYTHON, IS_PY2, IS_PY36_OR_GREATER
try:
import numpy as np
@@ -400,7 +400,10 @@ class TestDicts(SafeReprTestBase):
d1['c'] = None
d1['b'] = None
d1['a'] = None
- self.assert_saferepr(d1, "{'a': None, 'b': None, 'c': None}")
+ if IS_PY36_OR_GREATER:
+ self.assert_saferepr(d1, "{'c': None, 'b': None, 'a': None}")
+ else:
+ self.assert_saferepr(d1, "{'a': None, 'b': None, 'c': None}")
@pytest.mark.skipif(sys.version_info < (3, 0), reason='Py3 specific test')
def test_unsortable_keys(self):
|
VS Code shows wrong order of dict
The issue has been closed, but the bug is still not resolved:
https://github.com/microsoft/vscode-python/issues/3236
https://github.com/microsoft/vscode-python/issues/7787
## Environment data
- VS Code version: 1.54.3
- Extension version (available under the Extensions sidebar): v2021.3.658691958
- OS and version: Win 10
- Python version (& distribution if applicable, e.g. Anaconda): 3.6
- Type of virtual environment used (N/A | venv | virtualenv | conda | ...): conda
- Value of the `python.languageServer` setting: pylance v2021.3.2
## Expected behaviour
The debugger shows the dict element in the right order.
## Actual behaviour
The debugger shows the dict elements in a wrong order.
## Steps to reproduce:
Debug the code by inserting a breakpoint at the print statement:
```python
atoms = {"H": 0, "He": 1, "Li": 2, "Be": 3}
print(atoms)
```
The debugger will show an alphabetic order. The function print produces the right result though.

|
0.0
|
2ebc4e71b50a0dfb636bb0649417ee2d31697bcf
|
[
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestDicts::test_sorted"
] |
[
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestSafeRepr::test_collection_types",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestSafeRepr::test_largest_repr",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestStrings::test_str_small",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestStrings::test_str_large",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestStrings::test_str_largest_unchanged",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestStrings::test_str_smallest_changed",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestStrings::test_str_list_largest_unchanged",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestStrings::test_str_list_smallest_changed",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestStrings::test_bytes_small",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestStrings::test_bytes_large",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestTuples::test_large_flat",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestTuples::test_large_nested",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestTuples::test_boundary",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestTuples::test_nested",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestLists::test_large_flat",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestLists::test_large_nested",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestLists::test_boundary",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestLists::test_nested",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestLists::test_directly_recursive",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestLists::test_indirectly_recursive",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestFrozensets::test_nested",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestSets::test_large_flat",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestSets::test_boundary",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestDicts::test_large_key",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestDicts::test_large_value",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestDicts::test_large_both",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestDicts::test_nested_value",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestDicts::test_empty",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestDicts::test_unsortable_keys",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestDicts::test_directly_recursive",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestDicts::test_indirectly_recursive",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestOtherPythonTypes::test_range_small",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestOtherPythonTypes::test_range_large_stop_only",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestOtherPythonTypes::test_range_large_with_start",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestUserDefinedObjects::test_broken_repr",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestUserDefinedObjects::test_large",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestUserDefinedObjects::test_inherit_repr",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestUserDefinedObjects::test_custom_repr",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestUserDefinedObjects::test_custom_repr_many_items",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::TestUserDefinedObjects::test_custom_repr_large_item",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::test_py3_str_slicing[True-params0]",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::test_py3_str_slicing[True-params1]",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::test_py3_str_slicing[True-params2]",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::test_py3_str_slicing[False-params0]",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::test_py3_str_slicing[False-params1]",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::test_py3_str_slicing[False-params2]",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::test_raw_bytes",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::test_raw_unicode",
"src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py::test_no_repr"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-25 14:14:00+00:00
|
mit
| 3,894 |
|
microsoft__debugpy-683
|
diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_reload.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_reload.py
index b7b897c4..b279eda2 100644
--- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_reload.py
+++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_reload.py
@@ -193,8 +193,16 @@ class Reload:
def __init__(self, mod, mod_name=None, mod_filename=None):
self.mod = mod
- self.mod_name = (mod_name or mod.__name__) if mod_name else None
- self.mod_filename = (mod_filename or mod.__file__) if mod else None
+ if mod_name:
+ self.mod_name = mod_name
+ else:
+ self.mod_name = mod.__name__ if mod is not None else None
+
+ if mod_filename:
+ self.mod_filename = mod_filename
+ else:
+ self.mod_filename = mod.__file__ if mod is not None else None
+
self.found_change = False
def apply(self):
|
microsoft/debugpy
|
8aa184e0040dc1a1782e0a181795e20428ccfdda
|
diff --git a/src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py b/src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py
index aad8c66e..067f3da6 100644
--- a/src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py
+++ b/src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py
@@ -50,7 +50,17 @@ class Test(unittest.TestCase):
pass
def make_mod(self, name="x", repl=None, subst=None, sample=SAMPLE_CODE):
- fn = os.path.join(self.tempdir, name + ".py")
+ basedir = self.tempdir
+ if '.' in name:
+ splitted = name.split('.')
+ basedir = os.path.join(self.tempdir, *splitted[:-1])
+ name = splitted[-1]
+ try:
+ os.makedirs(basedir)
+ except:
+ pass
+
+ fn = os.path.join(basedir, name + ".py")
f = open(fn, "w")
if repl is not None and subst is not None:
sample = sample.replace(repl, subst)
@@ -531,3 +541,33 @@ def method():
# do it).
assert str(x.global_numpy) == '[1 2 3]'
+ def test_reload_relative(self):
+ MODULE_CODE = """
+def add_text(s):
+ return s + " module"
+"""
+ MODULE1_CODE = """
+from . import module
+
+def add_more_text(s):
+ s = module.add_text(s)
+ return s + ' module1'
+"""
+
+ MODULE1_CODE_V2 = """
+from . import module
+
+def add_more_text(s):
+ s = module.add_text(s)
+ return s + ' module1V2'
+"""
+
+ self.make_mod(sample='', name='package.__init__')
+ self.make_mod(sample=MODULE_CODE, name='package.module')
+ self.make_mod(sample=MODULE1_CODE, name='package.module1')
+ from package import module1 # @UnresolvedImport
+ assert module1.add_more_text('1') == '1 module module1'
+
+ self.make_mod(sample=MODULE1_CODE_V2, name='package.module1')
+ pydevd_reload.xreload(module1)
+ assert module1.add_more_text('1') == '1 module module1V2'
|
Reloading modules with relative imports fails
Structure to reproduce:
```
main.py
package/
module.py
module1.py
```
main.py:
-----
```
from package.module1 import add_more_text
def main():
while True:
s = input()
print(add_more_text(s))
if __name__=="__main__":
main()
```
module.py
------
```
def add_text(s):
return s + " foo bar"
```
module1.py:
------
```
from . import module
def add_more_text(s):
s = module.add_text(s)
return s + ' zoom'
```
|
0.0
|
8aa184e0040dc1a1782e0a181795e20428ccfdda
|
[
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_reload_relative"
] |
[
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_change_hierarchy",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_change_hierarchy_old_style",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_create_class",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_create_class2",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_if_code_obj_equals",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_metaclass",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_parent_function",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_pydevd_reload",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_pydevd_reload2",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_pydevd_reload3",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_pydevd_reload4",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_reload_custom_code_after_changes",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_reload_custom_code_after_changes_in_class",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_update_constant",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_update_constant_with_custom_code",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_update_constant_with_custom_code2",
"src/debugpy/_vendored/pydevd/test_pydevd_reload/test_pydevd_reload.py::Test::test_update_with_slots"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2021-07-30 13:55:06+00:00
|
mit
| 3,895 |
|
microsoft__debugpy-918
|
diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py
index f214fa7c..1ed89636 100644
--- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py
+++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py
@@ -377,13 +377,17 @@ def _update_globals_and_locals(updated_globals, initial_globals, frame):
# Still, the approach to have a single namespace was chosen because it was the only
# one that enabled creating and using variables during the same evaluation.
assert updated_globals is not None
- changed = False
+ f_locals = None
for key, val in updated_globals.items():
if initial_globals.get(key) is not val:
- changed = True
- frame.f_locals[key] = val
+ if f_locals is None:
+ # Note: we call f_locals only once because each time
+ # we call it the values may be reset.
+ f_locals = frame.f_locals
- if changed:
+ f_locals[key] = val
+
+ if f_locals is not None:
pydevd_save_locals.save_locals(frame)
|
microsoft/debugpy
|
7be59933b1df69d22e838f99e54fcdb5afc8cff8
|
diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py b/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py
index 881f55a5..1adcce34 100644
--- a/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py
+++ b/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py
@@ -9,6 +9,8 @@ global_frame = sys._getframe()
def obtain_frame():
+ A = 1
+ B = 2
yield sys._getframe()
@@ -116,3 +118,15 @@ def test_evaluate_expression_4(disable_critical_log):
assert 'email' in sys._getframe().f_globals
del sys._getframe().f_globals['email']
assert 'email' not in sys._getframe().f_globals
+
+
+def test_evaluate_expression_5(disable_critical_log):
+ from _pydevd_bundle.pydevd_vars import evaluate_expression
+
+ def check(frame):
+ eval_txt = 'A, B = 5, 6'
+ evaluate_expression(None, frame, eval_txt, is_exec=True)
+ assert frame.f_locals['A'] == 5
+ assert frame.f_locals['B'] == 6
+
+ check(next(iter(obtain_frame())))
|
Some variables not assigned when using debug console
Issue Type: <b>Bug</b>
Tested on VSCode 1.66.x. Does not seem to occur with 1.65.x
1. Run some code
2. stop on a break point.
3. some variables assigned by code prior to beakpoint (e.g. A = 10, B = 30)
4. execute a function that returns multiple variable and put into existing variables in local context (e.g. A, B = some_fun())
5. A does not get assigned with a new value.
Here is some code that reproduces the problem (with line numbers). Set a breakpoint on line
```
import numpy as np
def some_fun():
return np.random.randn(), np.random.randn()
def some_fun_2():
return np.random.randn()
def main():
A = 10
B = 34
C = 11
A, B = some_fun() # this assignment works when stepping
print(A) # break on this line
```
Once the code has stopped on breakpoint, execute A, B = some_fun() in the Debug Console to experiment the bug.
These have the same erroneous behaviour:
`A, B = np.random.randn(), np.random.randn() `
`B, A = np.random.randn(), np.random.randn() `
`B, A = some_fun() `
However, `A = some_fun_2()` works fine.
python 3.8.10
Extension version: 2022.4.1
VS Code version: Code 1.66.2 (dfd34e8260c270da74b5c2d86d61aee4b6d56977, 2022-04-11T07:49:20.994Z)
OS version: Darwin x64 20.6.0
Restricted Mode: No
<details>
<summary>System Info</summary>
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i7-8850H CPU @ 2.60GHz (12 x 2600)|
|GPU Status|2d_canvas: enabled<br>canvas_oop_rasterization: disabled_off<br>direct_rendering_display_compositor: disabled_off_ok<br>gpu_compositing: enabled<br>metal: disabled_off<br>multiple_raster_threads: enabled_on<br>oop_rasterization: enabled<br>opengl: enabled_on<br>rasterization: enabled<br>raw_draw: disabled_off_ok<br>skia_renderer: enabled_on<br>video_decode: enabled<br>video_encode: enabled<br>webgl: enabled<br>webgl2: enabled|
|Load (avg)|3, 3, 3|
|Memory (System)|32.00GB (0.06GB free)|
|Process Argv|--crash-reporter-id 2a273040-cc5d-49bc-b505-46f4293dc6a3 --crash-reporter-id 2a273040-cc5d-49bc-b505-46f4293dc6a3|
|Screen Reader|no|
|VM|0%|
</details><details>
<summary>A/B Experiments</summary>
```
vsliv368cf:30146710
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
pythonvspyl392:30443607
pythontb:30283811
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscoreces:30445986
pythondataviewer:30285071
vscod805cf:30301675
pythonvspyt200:30340761
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593cf:30376535
vsc1dst:30438360
pythonvs932:30410667
wslgetstarted:30449410
pythonvsnew555:30457759
vscscmwlcmt:30465135
vscaat:30438848
```
</details>
<!-- generated by issue reporter -->
|
0.0
|
7be59933b1df69d22e838f99e54fcdb5afc8cff8
|
[
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_5"
] |
[
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_basic",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_1",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_2",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_3",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_4"
] |
{
"failed_lite_validators": [
"has_git_commit_hash"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-04-21 19:17:16+00:00
|
mit
| 3,896 |
|
microsoft__electionguard-python-156
|
diff --git a/setup.py b/setup.py
index 185688b..9e2d308 100644
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,7 @@ with open(join(current_directory, "README.md"), encoding="utf-8") as readme_file
LONG_DESCRIPTION = readme_file.read()
NAME = "electionguard"
-VERSION = "1.1.2"
+VERSION = "1.1.3"
LICENSE = "MIT"
DESCRIPTION = "ElectionGuard: Support for e2e verified elections."
LONG_DESCRIPTION_CONTENT_TYPE = "text/markdown"
diff --git a/src/electionguard/serializable.py b/src/electionguard/serializable.py
index c795bb8..ffd69ca 100644
--- a/src/electionguard/serializable.py
+++ b/src/electionguard/serializable.py
@@ -1,11 +1,13 @@
from dataclasses import dataclass
from datetime import datetime
from os import path
-from typing import cast, TypeVar, Generic
+from typing import Any, cast, TypeVar, Generic
from jsons import (
+ dump,
dumps,
NoneType,
+ load,
loads,
JsonsError,
set_deserializer,
@@ -22,7 +24,7 @@ WRITE: str = "w"
READ: str = "r"
JSON_PARSE_ERROR = '{"error": "Object could not be parsed due to json issue"}'
# TODO Issue #??: Jsons library incorrectly dumps class method
-FROM_JSON_FILE = '"from_json_file": {}, '
+KEYS_TO_REMOVE = ["from_json", "from_json_file", "from_json_object"]
@dataclass
@@ -33,18 +35,19 @@ class Serializable(Generic[T]):
def to_json(self, strip_privates: bool = True) -> str:
"""
- Serialize to json
+ Serialize to json string
+ :param strip_privates: strip private variables
+ :return: the json string representation of this object
+ """
+ return write_json(self, strip_privates)
+
+ def to_json_object(self, strip_privates: bool = True) -> Any:
+ """
+ Serialize to json object
:param strip_privates: strip private variables
:return: the json representation of this object
"""
- set_serializers()
- suppress_warnings()
- try:
- return cast(
- str, dumps(self, strip_privates=strip_privates, strip_nulls=True)
- ).replace(FROM_JSON_FILE, "")
- except JsonsError:
- return JSON_PARSE_ERROR
+ return write_json_object(self, strip_privates)
def to_json_file(
self, file_name: str, file_path: str = "", strip_privates: bool = True
@@ -55,12 +58,32 @@ class Serializable(Generic[T]):
:param file_path: File path
:param strip_privates: Strip private variables
"""
- write_json_file(self.to_json(strip_privates), file_name, file_path)
+ write_json_file(self, file_name, file_path, strip_privates)
+
+ @classmethod
+ def from_json(cls, data: str) -> T:
+ """
+ Deserialize the provided data string into the specified instance
+ :param data: JSON string
+ """
+ set_deserializers()
+ return cast(T, loads(data, cls))
+
+ @classmethod
+ def from_json_object(cls, data: object) -> T:
+ """
+ Deserialize the provided data object into the specified instance
+ :param data: JSON object
+ """
+ set_deserializers()
+ return cast(T, load(data, cls))
@classmethod
def from_json_file(cls, file_name: str, file_path: str = "") -> T:
"""
Deserialize the provided file into the specified instance
+ :param file_name: File name
+ :param file_path: File path
"""
json_file_path: str = path.join(file_path, file_name + JSON_FILE_EXTENSION)
with open(json_file_path, READ) as json_file:
@@ -68,22 +91,82 @@ class Serializable(Generic[T]):
target = cls.from_json(data)
return target
- @classmethod
- def from_json(cls, data: str) -> T:
- """
- Deserialize the provided data string into the specified instance
- """
- set_deserializers()
- return cast(T, loads(data, cls))
-
-def write_json_file(json_data: str, file_name: str, file_path: str = "") -> None:
+def _remove_key(obj: Any, key_to_remove: str) -> Any:
+ """
+ Remove key from object recursively
+ :param obj: Any object
+ :param key_to_remove: key to remove
+ """
+ if isinstance(obj, dict):
+ for key in list(obj.keys()):
+ if key == key_to_remove:
+ del obj[key]
+ else:
+ _remove_key(obj[key], key_to_remove)
+ elif isinstance(obj, list):
+ for i in reversed(range(len(obj))):
+ if obj[i] == key_to_remove:
+ del obj[i]
+ else:
+ _remove_key(obj[i], key_to_remove)
+
+
+def write_json(object_to_write: object, strip_privates: bool = True) -> str:
+ """
+ Serialize to json string
+ :param object_to_write: object to write to json
+ :param strip_privates: strip private variables
+ :return: the json string representation of this object
+ """
+ set_serializers()
+ suppress_warnings()
+ try:
+ json_object = write_json_object(object_to_write, strip_privates)
+ json_string = cast(
+ str, dumps(json_object, strip_privates=strip_privates, strip_nulls=True)
+ )
+ return json_string
+ except JsonsError:
+ return JSON_PARSE_ERROR
+
+
+def write_json_object(object_to_write: object, strip_privates: bool = True) -> object:
+ """
+ Serialize to json object
+ :param object_to_write: object to write to json
+ :param strip_privates: strip private variables
+ :return: the json representation of this object
+ """
+ set_serializers()
+ suppress_warnings()
+ try:
+ json_object = dump(
+ object_to_write, strip_privates=strip_privates, strip_nulls=True
+ )
+ for key in KEYS_TO_REMOVE:
+ _remove_key(json_object, key)
+ return json_object
+ except JsonsError:
+ return JSON_PARSE_ERROR
+
+
+def write_json_file(
+ object_to_write: object,
+ file_name: str,
+ file_path: str = "",
+ strip_privates: bool = True,
+) -> None:
"""
- Write json data string to json file
+ Serialize json data string to json file
+ :param object_to_write: object to write to json
+ :param file_name: File name
+ :param file_path: File path
+ :param strip_privates: strip private variables
"""
json_file_path: str = path.join(file_path, file_name + JSON_FILE_EXTENSION)
with open(json_file_path, WRITE) as json_file:
- json_file.write(json_data)
+ json_file.write(write_json(object_to_write, strip_privates))
def set_serializers() -> None:
|
microsoft/electionguard-python
|
a8de1884eddc460342bf7cfca22d8a85c77fa99b
|
diff --git a/tests/test_serializable.py b/tests/test_serializable.py
index 98ad413..e64ab95 100644
--- a/tests/test_serializable.py
+++ b/tests/test_serializable.py
@@ -5,13 +5,61 @@ from electionguard.serializable import (
set_deserializers,
set_serializers,
write_json_file,
+ write_json_object,
+ write_json,
)
class TestSerializable(TestCase):
+ def test_write_json(self) -> None:
+ # Arrange
+ json_data = {
+ "from_json_file": {},
+ "test": 1,
+ "nested": {"from_json_file": {}, "test": 1},
+ "array": [{"from_json_file": {}, "test": 1}],
+ }
+ expected_json_string = (
+ '{"test": 1, "nested": {"test": 1}, "array": [{"test": 1}]}'
+ )
+
+ # Act
+ json_string = write_json(json_data)
+
+ # Assert
+ self.assertEqual(json_string, expected_json_string)
+
+ def test_write_json_object(self) -> None:
+ # Arrange
+ json_data = {
+ "from_json_file": {},
+ "test": 1,
+ "nested": {"from_json_file": {}, "test": 1},
+ "array": [{"from_json_file": {}, "test": 1}],
+ }
+ expected_json_object = {
+ "test": 1,
+ "nested": {"test": 1},
+ "array": [{"test": 1}],
+ }
+
+ # Act
+ json_object = write_json_object(json_data)
+
+ # Assert
+ self.assertEqual(json_object, expected_json_object)
+
def test_write_json_file(self) -> None:
# Arrange
- json_data = '{ "test" : 1 }'
+ json_data = {
+ "from_json_file": {},
+ "test": 1,
+ "nested": {"from_json_file": {}, "test": 1},
+ "array": [{"from_json_file": {}, "test": 1}],
+ }
+ expected_json_data = (
+ '{"test": 1, "nested": {"test": 1}, "array": [{"test": 1}]}'
+ )
file_name = "json_write_test"
json_file = file_name + ".json"
@@ -20,7 +68,7 @@ class TestSerializable(TestCase):
# Assert
with open(json_file) as reader:
- self.assertEqual(reader.read(), json_data)
+ self.assertEqual(reader.read(), expected_json_data)
# Cleanup
remove(json_file)
|
Iterate to Release 1.1.3
Migrate to next version `1.1.3`
|
0.0
|
a8de1884eddc460342bf7cfca22d8a85c77fa99b
|
[
"tests/test_serializable.py::TestSerializable::test_setup_deserialization",
"tests/test_serializable.py::TestSerializable::test_setup_serialization",
"tests/test_serializable.py::TestSerializable::test_write_json",
"tests/test_serializable.py::TestSerializable::test_write_json_file",
"tests/test_serializable.py::TestSerializable::test_write_json_object"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-31 20:07:29+00:00
|
mit
| 3,897 |
|
microsoft__electionguard-python-163
|
diff --git a/src/electionguard/group.py b/src/electionguard/group.py
index 95d8fe5..adfd67f 100644
--- a/src/electionguard/group.py
+++ b/src/electionguard/group.py
@@ -23,6 +23,16 @@ class ElementModQ(NamedTuple):
elem: mpz
+ def to_hex(self) -> str:
+ """
+ Converts from the element to the hex representation of bytes. This is preferable to directly
+ accessing `elem`, whose representation might change.
+ """
+ h = format(self.elem, "02x")
+ if len(h) % 2:
+ h = "0" + h
+ return h
+
def to_int(self) -> int:
"""
Converts from the element to a regular integer. This is preferable to directly
@@ -65,6 +75,16 @@ class ElementModP(NamedTuple):
elem: mpz
+ def to_hex(self) -> str:
+ """
+ Converts from the element to the hex representation of bytes. This is preferable to directly
+ accessing `elem`, whose representation might change.
+ """
+ h = format(self.elem, "02x")
+ if len(h) % 2:
+ h = "0" + h
+ return h
+
def to_int(self) -> int:
"""
Converts from the element to a regular integer. This is preferable to directly
@@ -125,6 +145,19 @@ ElementModQorInt = Union[ElementModQ, int]
ElementModPorInt = Union[ElementModP, int]
+def hex_to_q(input: str) -> Optional[ElementModQ]:
+ """
+ Given a hex string representing bytes, returns an ElementModQ.
+ Returns `None` if the number is out of the allowed
+ [0,Q) range.
+ """
+ i = int(input, 16)
+ if 0 <= i < Q:
+ return ElementModQ(mpz(i))
+ else:
+ return None
+
+
def int_to_q(input: Union[str, int]) -> Optional[ElementModQ]:
"""
Given a Python integer, returns an ElementModQ.
diff --git a/src/electionguard/guardian.py b/src/electionguard/guardian.py
index 4c1f072..c2eed67 100644
--- a/src/electionguard/guardian.py
+++ b/src/electionguard/guardian.py
@@ -7,7 +7,7 @@ from .elgamal import ElGamalCiphertext
from .group import (
ElementModP,
ElementModQ,
- int_to_q,
+ hex_to_q,
mult_p,
pow_q,
pow_p,
@@ -498,7 +498,7 @@ class Guardian(ElectionObjectBase):
f"compensate decrypt guardian {self.object_id} failed decryption for {missing_guardian_id}"
)
return None
- partial_secret_key = get_optional(int_to_q(int(decrypted_value)))
+ partial_secret_key = get_optional(hex_to_q(decrypted_value))
# 𝑀_{𝑖,l} = 𝐴^P𝑖_{l}
partial_decryption = elgamal.partial_decrypt(partial_secret_key)
diff --git a/src/electionguard/hash.py b/src/electionguard/hash.py
index d244214..9d13192 100644
--- a/src/electionguard/hash.py
+++ b/src/electionguard/hash.py
@@ -80,15 +80,15 @@ def hash_elems(*a: CRYPTO_HASHABLE_ALL) -> ElementModQ:
hash_me = "null"
elif isinstance(x, ElementModP) or isinstance(x, ElementModQ):
- hash_me = str(x.to_int())
+ hash_me = x.to_hex()
elif isinstance(x, CryptoHashable):
- hash_me = str(x.crypto_hash().to_int())
+ hash_me = x.crypto_hash().to_hex()
elif isinstance(x, str):
# strings are iterable, so it's important to handle them before the following check
hash_me = x
elif isinstance(x, Sequence):
# The simplest way to deal with lists, tuples, and such are to crunch them recursively.
- hash_me = str(hash_elems(*x).to_int())
+ hash_me = hash_elems(*x).to_hex()
else:
hash_me = str(x)
h.update((hash_me + "|").encode("utf-8"))
diff --git a/src/electionguard/key_ceremony.py b/src/electionguard/key_ceremony.py
index 58ac9ff..a2d4b92 100644
--- a/src/electionguard/key_ceremony.py
+++ b/src/electionguard/key_ceremony.py
@@ -19,7 +19,7 @@ from .elgamal import (
elgamal_combine_public_keys,
elgamal_keypair_random,
)
-from .group import int_to_q, rand_q, ElementModP, ElementModQ
+from .group import hex_to_q, rand_q, ElementModP, ElementModQ
from .rsa import rsa_keypair, rsa_decrypt, rsa_encrypt
from .schnorr import SchnorrProof, make_schnorr_proof
from .serializable import Serializable
@@ -143,8 +143,7 @@ def generate_elgamal_auxiliary_key_pair() -> AuxiliaryKeyPair:
"""
elgamal_key_pair = elgamal_keypair_random()
return AuxiliaryKeyPair(
- str(elgamal_key_pair.secret_key.to_int()),
- str(elgamal_key_pair.public_key.to_int()),
+ elgamal_key_pair.secret_key.to_hex(), elgamal_key_pair.public_key.to_hex(),
)
@@ -190,7 +189,7 @@ def generate_election_partial_key_backup(
value = compute_polynomial_coordinate(
auxiliary_public_key.sequence_order, polynomial
)
- encrypted_value = encrypt(str(value.to_int()), auxiliary_public_key.key)
+ encrypted_value = encrypt(value.to_hex(), auxiliary_public_key.key)
if encrypted_value is None:
return None
return ElectionPartialKeyBackup(
@@ -240,7 +239,7 @@ def verify_election_partial_key_backup(
return ElectionPartialKeyVerification(
backup.owner_id, backup.designated_id, verifier_id, False
)
- value = get_optional(int_to_q(int(decrypted_value)))
+ value = get_optional(hex_to_q(decrypted_value))
return ElectionPartialKeyVerification(
backup.owner_id,
backup.designated_id,
diff --git a/src/electionguard/rsa.py b/src/electionguard/rsa.py
index f2e41df..265d2b8 100644
--- a/src/electionguard/rsa.py
+++ b/src/electionguard/rsa.py
@@ -68,11 +68,9 @@ def rsa_encrypt(message: str, public_key: str) -> Optional[str]:
"""
data = bytes(public_key, ISO_ENCODING)
rsa_public_key: RSAPublicKey = load_pem_public_key(data, backend=default_backend())
- integer = int(message)
- bits = count_set_bits(integer)
- if bits > MAX_BITS:
+ plaintext = bytes.fromhex(message)
+ if len(plaintext) > MAX_BITS:
return None
- plaintext = integer.to_bytes(bits, BYTE_ORDER)
ciphertext = rsa_public_key.encrypt(plaintext, PKCS1v15())
return str(ciphertext, ISO_ENCODING)
@@ -95,8 +93,8 @@ def rsa_decrypt(encrypted_message: str, private_key: str) -> Optional[str]:
plaintext = rsa_private_key.decrypt(ciphertext, PKCS1v15())
except ValueError:
return None
- integer = int.from_bytes(plaintext, BYTE_ORDER)
- return str(integer)
+ hex_str = plaintext.hex()
+ return hex_str
def count_set_bits(n: int) -> int:
|
microsoft/electionguard-python
|
682ac7f19dbeb3025380a06d93ae74810f9d5c78
|
diff --git a/tests/test_rsa.py b/tests/test_rsa.py
index da39b89..4f7a44a 100644
--- a/tests/test_rsa.py
+++ b/tests/test_rsa.py
@@ -6,7 +6,7 @@ from electionguard.rsa import rsa_decrypt, rsa_encrypt, rsa_keypair
class TestRSA(TestCase):
def test_rsa_encrypt(self) -> None:
# Arrange
- message = "1118632206964768372384343373859700232583178373031391293942056347262996938448167273037401292830794700541756937515976417908858473208686448118280677278101719098670646913045584007219899471676906742553167177135624664615778816843133781654175330682468454244343379"
+ message = "9893e1c926521dc595d501056d03c4387b87986089539349bed6eb1018229b2e0029dd38647bfc80746726b3710c8ac3f69187da2234b438370a4348a784791813b9857446eb14afc676eece5b789a207bcf633ba1676d3410913ae46dd247166c6a682cb0ccc5ecde53"
# Act
key_pair = rsa_keypair()
|
Better handling of JSON serialization of large integers
Right now, the JSON serialization output for bigints sometimes uses strings and sometimes uses ints. This isn't a problem when working in Python3, since all ints are bigints. The rest of the world isn't so pleasant. If we want our structures to play nicely in JavaScript and elsewhere, we should make sure that ElementModP and ElementModQ always serialize to strings.
|
0.0
|
682ac7f19dbeb3025380a06d93ae74810f9d5c78
|
[
"tests/test_rsa.py::TestRSA::test_rsa_encrypt"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-08 18:52:32+00:00
|
mit
| 3,898 |
|
microsoft__electionguard-python-169
|
diff --git a/src/electionguard/serializable.py b/src/electionguard/serializable.py
index ffd69ca..25ce0d5 100644
--- a/src/electionguard/serializable.py
+++ b/src/electionguard/serializable.py
@@ -1,7 +1,7 @@
from dataclasses import dataclass
from datetime import datetime
from os import path
-from typing import Any, cast, TypeVar, Generic
+from typing import Any, cast, Type, TypeVar
from jsons import (
dump,
@@ -17,6 +17,7 @@ from jsons import (
default_nonetype_deserializer,
)
+S = TypeVar("S", bound="Serializable")
T = TypeVar("T")
JSON_FILE_EXTENSION: str = ".json"
@@ -28,7 +29,7 @@ KEYS_TO_REMOVE = ["from_json", "from_json_file", "from_json_object"]
@dataclass
-class Serializable(Generic[T]):
+class Serializable:
"""
Serializable class with methods to convert to json
"""
@@ -61,35 +62,29 @@ class Serializable(Generic[T]):
write_json_file(self, file_name, file_path, strip_privates)
@classmethod
- def from_json(cls, data: str) -> T:
+ def from_json(cls: Type[S], data: str) -> S:
"""
Deserialize the provided data string into the specified instance
:param data: JSON string
"""
- set_deserializers()
- return cast(T, loads(data, cls))
+ return read_json(data, cls)
@classmethod
- def from_json_object(cls, data: object) -> T:
+ def from_json_object(cls: Type[S], data: object) -> S:
"""
Deserialize the provided data object into the specified instance
:param data: JSON object
"""
- set_deserializers()
- return cast(T, load(data, cls))
+ return read_json_object(data, cls)
@classmethod
- def from_json_file(cls, file_name: str, file_path: str = "") -> T:
+ def from_json_file(cls: Type[S], file_name: str, file_path: str = "") -> S:
"""
Deserialize the provided file into the specified instance
:param file_name: File name
:param file_path: File path
"""
- json_file_path: str = path.join(file_path, file_name + JSON_FILE_EXTENSION)
- with open(json_file_path, READ) as json_file:
- data = json_file.read()
- target = cls.from_json(data)
- return target
+ return read_json_file(cls, file_name, file_path)
def _remove_key(obj: Any, key_to_remove: str) -> Any:
@@ -169,6 +164,44 @@ def write_json_file(
json_file.write(write_json(object_to_write, strip_privates))
+def read_json(data: Any, class_out: Type[T]) -> T:
+ """
+ Deserialize json file to object
+ :param data: Json file data
+ :param class_out: Object type
+ :return: Deserialized object
+ """
+ set_deserializers()
+ return cast(T, loads(data, class_out))
+
+
+def read_json_object(data: Any, class_out: Type[T]) -> T:
+ """
+ Deserialize json file to object
+ :param data: Json file data
+ :param class_out: Object type
+ :return: Deserialized object
+ """
+ set_deserializers()
+ return cast(T, load(data, class_out))
+
+
+def read_json_file(class_out: Type[T], file_name: str, file_path: str = "") -> T:
+ """
+ Deserialize json file to object
+ :param class_out: Object type
+ :param file_name: File name
+ :param file_path: File path
+ :return: Deserialized object
+ """
+ set_deserializers()
+ json_file_path: str = path.join(file_path, file_name + JSON_FILE_EXTENSION)
+ with open(json_file_path, READ) as json_file:
+ data = json_file.read()
+ target: T = read_json(data, class_out)
+ return target
+
+
def set_serializers() -> None:
"""Set serializers for jsons to use to cast specific classes"""
|
microsoft/electionguard-python
|
2de8ec2656fbee60e332dadfba2a82e496dd98b4
|
diff --git a/tests/test_serializable.py b/tests/test_serializable.py
index e64ab95..c4a2682 100644
--- a/tests/test_serializable.py
+++ b/tests/test_serializable.py
@@ -1,74 +1,93 @@
+from dataclasses import dataclass
from unittest import TestCase
+from typing import Any, List, Optional
from os import remove
from electionguard.serializable import (
set_deserializers,
set_serializers,
+ read_json,
+ read_json_file,
+ read_json_object,
+ write_json,
write_json_file,
write_json_object,
- write_json,
)
+@dataclass
+class NestedModel:
+ """Nested model for testing"""
+
+ test: int
+ from_json_file: Optional[Any] = None
+
+
+@dataclass
+class DataModel:
+ """Data model for testing"""
+
+ test: int
+ nested: NestedModel
+ array: List[NestedModel]
+ from_json_file: Optional[Any] = None
+
+
+JSON_DATA: DataModel = DataModel(
+ test=1, nested=NestedModel(test=1), array=[NestedModel(test=1)]
+)
+EXPECTED_JSON_STRING = '{"array": [{"test": 1}], "nested": {"test": 1}, "test": 1}'
+EXPECTED_JSON_OBJECT = {
+ "test": 1,
+ "nested": {"test": 1},
+ "array": [{"test": 1}],
+}
+
+
class TestSerializable(TestCase):
- def test_write_json(self) -> None:
- # Arrange
- json_data = {
- "from_json_file": {},
- "test": 1,
- "nested": {"from_json_file": {}, "test": 1},
- "array": [{"from_json_file": {}, "test": 1}],
- }
- expected_json_string = (
- '{"test": 1, "nested": {"test": 1}, "array": [{"test": 1}]}'
- )
+ def test_read_and_write_json(self) -> None:
+ # Act
+ json_string = write_json(JSON_DATA)
+
+ # Assert
+ self.assertEqual(json_string, EXPECTED_JSON_STRING)
# Act
- json_string = write_json(json_data)
+ read_json_data = read_json(json_string, DataModel)
# Assert
- self.assertEqual(json_string, expected_json_string)
+ self.assertEqual(read_json_data, JSON_DATA)
- def test_write_json_object(self) -> None:
- # Arrange
- json_data = {
- "from_json_file": {},
- "test": 1,
- "nested": {"from_json_file": {}, "test": 1},
- "array": [{"from_json_file": {}, "test": 1}],
- }
- expected_json_object = {
- "test": 1,
- "nested": {"test": 1},
- "array": [{"test": 1}],
- }
+ def test_read_and_write_json_object(self) -> None:
+ # Act
+ json_object = write_json_object(JSON_DATA)
+
+ # Assert
+ self.assertEqual(json_object, EXPECTED_JSON_OBJECT)
# Act
- json_object = write_json_object(json_data)
+ read_json_data = read_json_object(json_object, DataModel)
# Assert
- self.assertEqual(json_object, expected_json_object)
+ self.assertEqual(read_json_data, JSON_DATA)
- def test_write_json_file(self) -> None:
+ def test_read_and_write_json_file(self) -> None:
# Arrange
- json_data = {
- "from_json_file": {},
- "test": 1,
- "nested": {"from_json_file": {}, "test": 1},
- "array": [{"from_json_file": {}, "test": 1}],
- }
- expected_json_data = (
- '{"test": 1, "nested": {"test": 1}, "array": [{"test": 1}]}'
- )
file_name = "json_write_test"
json_file = file_name + ".json"
# Act
- write_json_file(json_data, file_name)
+ write_json_file(JSON_DATA, file_name)
# Assert
with open(json_file) as reader:
- self.assertEqual(reader.read(), expected_json_data)
+ self.assertEqual(reader.read(), EXPECTED_JSON_STRING)
+
+ # Act
+ read_json_data = read_json_file(DataModel, file_name)
+
+ # Assert
+ self.assertEqual(read_json_data, JSON_DATA)
# Cleanup
remove(json_file)
|
Add functional methods for deserialization
Serialization has functional methods that allow for parsing out to objects but we are missing the reverse to parse into objects.
```python
def read_json_object(data: Any, generic_type: T) -> T:
set_deserializers()
return cast(T, load(data, generic_type))
```
|
0.0
|
2de8ec2656fbee60e332dadfba2a82e496dd98b4
|
[
"tests/test_serializable.py::TestSerializable::test_read_and_write_json",
"tests/test_serializable.py::TestSerializable::test_read_and_write_json_file",
"tests/test_serializable.py::TestSerializable::test_read_and_write_json_object",
"tests/test_serializable.py::TestSerializable::test_setup_deserialization",
"tests/test_serializable.py::TestSerializable::test_setup_serialization"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-10 12:54:54+00:00
|
mit
| 3,899 |
|
microsoft__electionguard-python-189
|
diff --git a/src/electionguard/ballot.py b/src/electionguard/ballot.py
index b45c4fe..cbe7fc4 100644
--- a/src/electionguard/ballot.py
+++ b/src/electionguard/ballot.py
@@ -850,7 +850,7 @@ def make_ciphertext_ballot(
contest_hashes = [contest.crypto_hash for contest in contests]
contest_hash = hash_elems(object_id, description_hash, *contest_hashes)
- timestamp = to_ticks(datetime.utcnow()) if timestamp is None else timestamp
+ timestamp = to_ticks(datetime.now()) if timestamp is None else timestamp
if previous_tracking_hash is None:
previous_tracking_hash = description_hash
if tracking_hash is None:
diff --git a/src/electionguard/utils.py b/src/electionguard/utils.py
index 20b12c8..11deea4 100644
--- a/src/electionguard/utils.py
+++ b/src/electionguard/utils.py
@@ -1,4 +1,4 @@
-from datetime import datetime
+from datetime import datetime, timezone
from os import mkdir, path
from re import sub
from typing import Callable, Optional, TypeVar
@@ -66,14 +66,18 @@ def flatmap_optional(optional: Optional[T], mapper: Callable[[T], U]) -> Optiona
def to_ticks(date_time: datetime) -> int:
"""
- Return the number of ticks for a date time
+ Return the number of ticks for a date time.
+ Ticks are defined here as number of seconds since the unix epoch (00:00:00 UTC on 1 January 1970)
:param date_time: Date time to convert
:return: number of ticks
"""
- t0 = datetime(1, 1, 1)
- seconds = int((date_time - t0).total_seconds())
- ticks = seconds * 10 ** 7
- return ticks
+
+ ticks = (
+ date_time.timestamp()
+ if date_time.tzinfo
+ else date_time.astimezone(timezone.utc).timestamp()
+ )
+ return int(ticks)
def space_between_capitals(base: str) -> str:
|
microsoft/electionguard-python
|
e5c66332dedf116e5f905dcb84648ceef83a61f4
|
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 1b284eb..9d978ca 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -1,13 +1,53 @@
from unittest import TestCase
-from datetime import datetime
+from datetime import datetime, timedelta, timezone
from electionguard.utils import to_ticks
class TestUtils(TestCase):
- def test_conversion_to_ticks(self):
+ def test_conversion_to_ticks_from_utc(self):
# Act
- ticks = to_ticks(datetime.utcnow())
+ ticks = to_ticks(datetime.now(timezone.utc))
self.assertIsNotNone(ticks)
self.assertGreater(ticks, 0)
+
+ def test_conversion_to_ticks_from_local(self):
+ # Act
+ ticks = to_ticks(datetime.now())
+
+ self.assertIsNotNone(ticks)
+ self.assertGreater(ticks, 0)
+
+ def test_conversion_to_ticks_with_tz(self):
+ # Arrange
+ now = datetime.now()
+ now_with_tz = (now).astimezone()
+ now_utc_with_tz = now_with_tz.astimezone(timezone.utc)
+
+ # Act
+ ticks_now = to_ticks(now)
+ ticks_local = to_ticks(now_with_tz)
+ ticks_utc = to_ticks(now_utc_with_tz)
+
+ # Assert
+ self.assertIsNotNone(ticks_now)
+ self.assertIsNotNone(ticks_local)
+ self.assertIsNotNone(ticks_utc)
+
+ # Ensure all three tick values are the same
+ unique_ticks = set([ticks_now, ticks_local, ticks_utc])
+ self.assertEqual(1, len(unique_ticks))
+ self.assertTrue(ticks_now in unique_ticks)
+
+ def test_conversion_to_ticks_produces_valid_epoch(self):
+ # Arrange
+ now = datetime.now()
+
+ # Act
+ ticks_now = to_ticks(now)
+ now_from_ticks = datetime.fromtimestamp(ticks_now)
+
+ # Assert
+ # Values below seconds are dropped from the epoch
+ self.assertEqual(now.replace(microsecond=0), now_from_ticks)
|
🐛 Datetime tick generation is inconsistent
When a ballot is encrypted using this library, and a `timestamp` is not provided, the tracking hash is computed with `utils.to_ticks(datetime.utcnow())`. Both `to_ticks` and `utcnow` have some problems:
- `to_ticks` only works on naive timezone-unaware datetimes (e.g. created with `now` or `utcnow`). Using it on deserialized ISO-8601 timestamps will fail
- `to_ticks` is based on the date `0001-01-01T00:00:00` in naive timezone-unaware form, which:
- creates very large tick values and risks int overflow
- is the absolute minimum that datetime can represent, and attempts to normalize to a timezone cause errors
- `to_ticks` returns the number of seconds * 10e7, which is one one hundred millionth of a second - in between microseconds and nanoseconds. This inflates the size of the tick number massively, and is a non-standard unit.
- `datetime.utcnow` produces the date/time in UTC, [but doesn't capture the timezone](https://docs.python.org/3/library/datetime.html#datetime.datetime.utcnow). Most other `datetime` logic (including the time math currently in `to_ticks`) assumes this is a local time, and performs the wrong calculations.
To address this, we've decided to switch to using the unix epoch (seconds since January 1, 1970 UTC). It's unambiguous and very common.
|
0.0
|
e5c66332dedf116e5f905dcb84648ceef83a61f4
|
[
"tests/test_utils.py::TestUtils::test_conversion_to_ticks_from_utc",
"tests/test_utils.py::TestUtils::test_conversion_to_ticks_produces_valid_epoch",
"tests/test_utils.py::TestUtils::test_conversion_to_ticks_with_tz"
] |
[
"tests/test_utils.py::TestUtils::test_conversion_to_ticks_from_local"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-28 19:05:25+00:00
|
mit
| 3,900 |
|
microsoft__electionguard-python-191
|
diff --git a/src/electionguard/serializable.py b/src/electionguard/serializable.py
index 25ce0d5..e22cedf 100644
--- a/src/electionguard/serializable.py
+++ b/src/electionguard/serializable.py
@@ -1,5 +1,6 @@
from dataclasses import dataclass
from datetime import datetime
+import re
from os import path
from typing import Any, cast, Type, TypeVar
@@ -236,4 +237,14 @@ def set_deserializers() -> None:
NoneType,
)
- set_deserializer(lambda dt, cls, **_: datetime.fromisoformat(dt), datetime)
+ set_deserializer(lambda dt, cls, **_: _deserialize_datetime(dt), datetime)
+
+
+def _deserialize_datetime(value: str) -> datetime:
+ """
+ The `fromisoformat` function doesn't recognize the Z (Zulu) suffix
+ to indicate UTC. For compatibility with more external clients, we
+ should allow it.
+ """
+ tz_corrected = re.sub("Z$", "+00:00", value)
+ return datetime.fromisoformat(tz_corrected)
|
microsoft/electionguard-python
|
35a76a43cbe08515f94fd69b9637555699555978
|
diff --git a/tests/test_serializable.py b/tests/test_serializable.py
index c4a2682..1c86bf0 100644
--- a/tests/test_serializable.py
+++ b/tests/test_serializable.py
@@ -1,4 +1,5 @@
from dataclasses import dataclass
+from datetime import datetime, timezone
from unittest import TestCase
from typing import Any, List, Optional
from os import remove
@@ -30,21 +31,48 @@ class DataModel:
test: int
nested: NestedModel
array: List[NestedModel]
+ datetime: datetime
from_json_file: Optional[Any] = None
JSON_DATA: DataModel = DataModel(
- test=1, nested=NestedModel(test=1), array=[NestedModel(test=1)]
+ test=1,
+ nested=NestedModel(test=1),
+ datetime=datetime(2020, 9, 28, 20, 11, 31, tzinfo=timezone.utc),
+ array=[NestedModel(test=1)],
)
-EXPECTED_JSON_STRING = '{"array": [{"test": 1}], "nested": {"test": 1}, "test": 1}'
+EXPECTED_JSON_STRING = '{"array": [{"test": 1}], "datetime": "2020-09-28T20:11:31+00:00", "nested": {"test": 1}, "test": 1}'
EXPECTED_JSON_OBJECT = {
"test": 1,
+ "datetime": "2020-09-28T20:11:31+00:00",
"nested": {"test": 1},
"array": [{"test": 1}],
}
class TestSerializable(TestCase):
+ def test_read_iso_date(self) -> None:
+ # Arrange
+ target_date = datetime(2020, 9, 28, 20, 11, 31, tzinfo=timezone.utc)
+ representations = [
+ # UTC
+ '"2020-09-28T20:11:31+00:00"',
+ '"2020-09-28T20:11:31.000+00:00"',
+ '"2020-09-28T20:11:31.000Z"',
+ '"2020-09-28T20:11:31Z"',
+ # Other time zone
+ '"2020-09-28T21:11:31+01:00"',
+ '"2020-09-28T21:11:31.000+01:00"',
+ ]
+
+ # Act
+ results = [read_json(value, datetime) for value in representations]
+
+ # Assert
+ # expected_timestamp = target_date.timestamp()
+ for result in results:
+ self.assertEqual(target_date, result)
+
def test_read_and_write_json(self) -> None:
# Act
json_string = write_json(JSON_DATA)
|
✨ Deserialize ISO-8601 dates with `Z`
We're using `isoformat` and `fromisoformat` to serialize/deserialize datetimes, [which unfortunately doesn't handle the `Z` formatting for UTC](https://stackoverflow.com/questions/19654578/python-utc-datetime-objects-iso-format-doesnt-include-z-zulu-or-zero-offset).
When deserializing datetimes, we would like **both** offsets (e.g. `-05:00`) and `Z` to be supported.
|
0.0
|
35a76a43cbe08515f94fd69b9637555699555978
|
[
"tests/test_serializable.py::TestSerializable::test_read_iso_date"
] |
[
"tests/test_serializable.py::TestSerializable::test_read_and_write_json",
"tests/test_serializable.py::TestSerializable::test_read_and_write_json_file",
"tests/test_serializable.py::TestSerializable::test_read_and_write_json_object",
"tests/test_serializable.py::TestSerializable::test_setup_deserialization",
"tests/test_serializable.py::TestSerializable::test_setup_serialization"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-28 20:31:30+00:00
|
mit
| 3,901 |
|
microsoft__electionguard-python-616
|
diff --git a/src/electionguard/__init__.py b/src/electionguard/__init__.py
index d3df433..81e0c42 100644
--- a/src/electionguard/__init__.py
+++ b/src/electionguard/__init__.py
@@ -211,6 +211,7 @@ from electionguard.elgamal import (
hashed_elgamal_encrypt,
)
from electionguard.encrypt import (
+ ContestData,
EncryptionDevice,
EncryptionMediator,
contest_from,
@@ -338,6 +339,9 @@ from electionguard.schnorr import (
make_schnorr_proof,
)
from electionguard.serialize import (
+ PAD_INDICATOR_SIZE,
+ PaddedDataSize,
+ TruncationError,
construct_path,
from_file,
from_file_wrapper,
@@ -345,6 +349,8 @@ from electionguard.serialize import (
from_list_in_file_wrapper,
from_raw,
get_schema,
+ padded_decode,
+ padded_encode,
to_file,
to_raw,
)
@@ -371,6 +377,9 @@ from electionguard.type import (
VerifierId,
)
from electionguard.utils import (
+ BYTE_ENCODING,
+ BYTE_ORDER,
+ ContestErrorType,
flatmap_optional,
get_optional,
get_or_else_optional,
@@ -384,6 +393,8 @@ from electionguard.utils import (
__all__ = [
"AnnotatedString",
+ "BYTE_ENCODING",
+ "BYTE_ORDER",
"BackupVerificationState",
"BallotBox",
"BallotBoxState",
@@ -415,8 +426,10 @@ __all__ = [
"Configuration",
"ConstantChaumPedersenProof",
"ContactInformation",
+ "ContestData",
"ContestDescription",
"ContestDescriptionWithPlaceholders",
+ "ContestErrorType",
"ContestId",
"CryptoHashCheckable",
"CryptoHashable",
@@ -476,6 +489,8 @@ __all__ = [
"NO_VOTE",
"Nonces",
"OrderedObjectBase",
+ "PAD_INDICATOR_SIZE",
+ "PaddedDataSize",
"Party",
"PlaintextBallot",
"PlaintextBallotContest",
@@ -503,6 +518,7 @@ __all__ = [
"SelectionId",
"Singleton",
"SubmittedBallot",
+ "TruncationError",
"VerifierId",
"VoteVariationType",
"YES_VOTE",
@@ -654,6 +670,8 @@ __all__ = [
"mult_q",
"negate_q",
"nonces",
+ "padded_decode",
+ "padded_encode",
"partially_decrypt",
"pow_p",
"pow_q",
diff --git a/src/electionguard/encrypt.py b/src/electionguard/encrypt.py
index 3177377..a42df32 100644
--- a/src/electionguard/encrypt.py
+++ b/src/electionguard/encrypt.py
@@ -1,8 +1,9 @@
from datetime import datetime
-from dataclasses import dataclass
-from typing import List, Optional
+from dataclasses import dataclass, field
+from typing import Dict, List, Optional, Type, TypeVar
from uuid import getnode
+
from .ballot import (
CiphertextBallot,
CiphertextBallotContest,
@@ -18,6 +19,7 @@ from .ballot import (
from .ballot_code import get_hash_for_device
from .election import CiphertextElectionContext
from .elgamal import ElGamalPublicKey, elgamal_encrypt
+from .serialize import PaddedDataSize, padded_decode, padded_encode
from .group import ElementModQ, rand_q
from .logs import log_info, log_warning
from .manifest import (
@@ -27,7 +29,28 @@ from .manifest import (
SelectionDescription,
)
from .nonces import Nonces
-from .utils import get_optional, get_or_else_optional_func
+from .type import SelectionId
+from .utils import get_optional, get_or_else_optional_func, ContestErrorType
+
+
+_T = TypeVar("_T", bound="ContestData")
+CONTEST_DATA_SIZE: PaddedDataSize = PaddedDataSize.Bytes_512
+
+
+@dataclass
+class ContestData:
+ """Contests errors and extended data from the selections on the contest."""
+
+ error: Optional[ContestErrorType] = field(default=None)
+ error_data: Optional[List[SelectionId]] = field(default=None)
+ write_ins: Optional[Dict[SelectionId, str]] = field(default=None)
+
+ @classmethod
+ def from_bytes(cls: Type[_T], data: bytes) -> _T:
+ return padded_decode(cls, data, CONTEST_DATA_SIZE)
+
+ def to_bytes(self) -> bytes:
+ return padded_encode(self, CONTEST_DATA_SIZE)
@dataclass
diff --git a/src/electionguard/hash.py b/src/electionguard/hash.py
index 9ecc52b..3b0daf6 100644
--- a/src/electionguard/hash.py
+++ b/src/electionguard/hash.py
@@ -12,6 +12,7 @@ from typing import (
)
from .constants import get_small_prime
+from .utils import BYTE_ENCODING, BYTE_ORDER
from .group import (
ElementModPOrQ,
ElementModQ,
@@ -68,7 +69,7 @@ def hash_elems(*a: CryptoHashableAll) -> ElementModQ:
:return: A cryptographic hash of these elements, concatenated.
"""
h = sha256()
- h.update("|".encode("utf-8"))
+ h.update("|".encode(BYTE_ENCODING))
for x in a:
# We could just use str(x) for everything, but then we'd have a resulting string
# that's a bit Python-specific, and we'd rather make it easier for other languages
@@ -95,6 +96,8 @@ def hash_elems(*a: CryptoHashableAll) -> ElementModQ:
else:
hash_me = str(x)
- h.update((hash_me + "|").encode("utf-8"))
+ h.update((hash_me + "|").encode(BYTE_ENCODING))
- return ElementModQ(int.from_bytes(h.digest(), byteorder="big") % get_small_prime())
+ return ElementModQ(
+ int.from_bytes(h.digest(), byteorder=BYTE_ORDER) % get_small_prime()
+ )
diff --git a/src/electionguard/serialize.py b/src/electionguard/serialize.py
index 91943bc..b9b3c4e 100644
--- a/src/electionguard/serialize.py
+++ b/src/electionguard/serialize.py
@@ -1,4 +1,5 @@
from datetime import datetime
+from enum import IntEnum
from io import TextIOWrapper
import json
import os
@@ -7,26 +8,32 @@ from typing import Any, List, Type, TypeVar, Union
from dacite import Config, from_dict
from pydantic.json import pydantic_encoder
-from pydantic.tools import parse_raw_as, schema_json_of
+from pydantic.tools import schema_json_of, parse_raw_as
from .big_integer import BigInteger
from .ballot_box import BallotBoxState
-from .manifest import ElectionType, ReportingUnitType, VoteVariationType
from .group import ElementModP, ElementModQ
+from .manifest import ElectionType, ReportingUnitType, VoteVariationType
from .proof import ProofUsage
+from .utils import BYTE_ENCODING, BYTE_ORDER, ContestErrorType
+
+
+_PAD_BYTE = b"\x00"
+PAD_INDICATOR_SIZE = 2
_T = TypeVar("_T")
_indent = 2
-_encoding = "utf-8"
_file_extension = "json"
_config = Config(
cast=[
datetime,
BigInteger,
+ ContestErrorType,
ElementModP,
ElementModQ,
+ ElectionType,
BallotBoxState,
ElectionType,
ReportingUnitType,
@@ -37,6 +44,50 @@ _config = Config(
)
+class PaddedDataSize(IntEnum):
+ """Define the sizes for padded data."""
+
+ Bytes_512 = 512 - PAD_INDICATOR_SIZE
+
+
+class TruncationError(ValueError):
+ """A specific truncation error to indicate when padded data is truncated."""
+
+
+def padded_encode(data: Any, size: PaddedDataSize) -> bytes:
+ return _add_padding(to_raw(data).encode(BYTE_ENCODING), size)
+
+
+def padded_decode(type_: Type[_T], padded_data: bytes, size: PaddedDataSize) -> _T:
+ return from_raw(type_, _remove_padding(padded_data, size))
+
+
+def _add_padding(
+ message: bytes, size: PaddedDataSize, allow_truncation: bool = False
+) -> bytes:
+ """Add padding to message in bytes."""
+ message_length = len(message)
+ if message_length > size:
+ if allow_truncation:
+ message_length = size
+ else:
+ raise TruncationError(
+ "Padded data exceeds allowed padded data size of {size}."
+ )
+ padding_length = size - message_length
+ leading_byte = padding_length.to_bytes(PAD_INDICATOR_SIZE, byteorder=BYTE_ORDER)
+ padded = leading_byte + message[:message_length] + _PAD_BYTE * padding_length
+ return padded
+
+
+def _remove_padding(padded: bytes, size: PaddedDataSize) -> bytes:
+ """Remove padding from padded message in bytes."""
+
+ padding_length = int.from_bytes(padded[:PAD_INDICATOR_SIZE], byteorder=BYTE_ORDER)
+ message_end = size + PAD_INDICATOR_SIZE - padding_length
+ return padded[PAD_INDICATOR_SIZE:message_end]
+
+
def construct_path(
target_file_name: str,
target_path: str = "",
@@ -48,13 +99,15 @@ def construct_path(
return os.path.join(target_path, target_file)
-def from_raw(type_: Type[_T], obj: Any) -> _T:
- """Deserialize raw as type."""
+def from_raw(type_: Type[_T], raw: Union[str, bytes]) -> _T:
+ """Deserialize raw json string as type."""
- return parse_raw_as(type_, obj)
+ if type_ is datetime:
+ return parse_raw_as(type_, raw)
+ return from_dict(type_, json.loads(raw), _config)
-def to_raw(data: Any) -> Any:
+def to_raw(data: Any) -> str:
"""Serialize data to raw json format."""
return json.dumps(data, indent=_indent, default=pydantic_encoder)
@@ -70,7 +123,7 @@ def from_file_wrapper(type_: Type[_T], file: TextIOWrapper) -> _T:
def from_file(type_: Type[_T], path: Union[str, Path]) -> _T:
"""Deserialize json file as type."""
- with open(path, "r", encoding=_encoding) as json_file:
+ with open(path, "r", encoding=BYTE_ENCODING) as json_file:
data = json.load(json_file)
return from_dict(type_, data, _config)
@@ -78,7 +131,7 @@ def from_file(type_: Type[_T], path: Union[str, Path]) -> _T:
def from_list_in_file(type_: Type[_T], path: Union[str, Path]) -> List[_T]:
"""Deserialize json file that has an array of certain type."""
- with open(path, "r", encoding=_encoding) as json_file:
+ with open(path, "r", encoding=BYTE_ENCODING) as json_file:
data = json.load(json_file)
ls: List[_T] = []
for item in data:
@@ -110,7 +163,7 @@ def to_file(
with open(
path,
"w",
- encoding=_encoding,
+ encoding=BYTE_ENCODING,
) as outfile:
json.dump(data, outfile, indent=_indent, default=pydantic_encoder)
return path
diff --git a/src/electionguard/utils.py b/src/electionguard/utils.py
index e2c9b8a..eb28ffa 100644
--- a/src/electionguard/utils.py
+++ b/src/electionguard/utils.py
@@ -1,4 +1,5 @@
from datetime import datetime, timezone
+from enum import Enum
from re import sub
from typing import Callable, Optional, TypeVar
from base64 import b16decode
@@ -6,6 +7,17 @@ from base64 import b16decode
_T = TypeVar("_T")
_U = TypeVar("_U")
+BYTE_ORDER = "big"
+BYTE_ENCODING = "utf-8"
+
+
+class ContestErrorType(Enum):
+ """Various errors that can occur on ballots contest after voting."""
+
+ NullVote = "nullvote"
+ UnderVote = "undervote"
+ OverVote = "overvote"
+
def get_optional(optional: Optional[_T]) -> _T:
"""
|
microsoft/electionguard-python
|
93efee11d2e80c15b15212aac37ec4d4a9f22f2b
|
diff --git a/tests/unit/test_encrypt.py b/tests/unit/test_encrypt.py
new file mode 100644
index 0000000..c163877
--- /dev/null
+++ b/tests/unit/test_encrypt.py
@@ -0,0 +1,51 @@
+from unittest import TestCase
+
+from electionguard.encrypt import ContestData, ContestErrorType
+from electionguard.serialize import TruncationError, to_raw
+
+
+class TestEncrypt(TestCase):
+ """Test encryption"""
+
+ def test_contest_data_conversion(self) -> None:
+ """Test contest data encoding to padded to bytes then decoding."""
+
+ # Arrange
+ error = ContestErrorType.OverVote
+ error_data = ["overvote-id-1", "overvote-id-2", "overvote-id-3"]
+ write_ins = {
+ "writein-id-1": "Teri Dactyl",
+ "writein-id-2": "Allie Grater",
+ "writein-id-3": "Anna Littlical",
+ "writein-id-4": "Polly Wannakrakouer",
+ }
+ overflow_error_data = ["overflow-id" * 50]
+
+ empty_contest_data = ContestData()
+ write_in_contest_data = ContestData(write_ins=write_ins)
+ overvote_contest_data = ContestData(error, error_data)
+ overvote_and_write_in_contest_data = ContestData(error, error_data, write_ins)
+ overflow_contest_data = ContestData(error, overflow_error_data, write_ins)
+
+ # Act & Assert
+ self._padding_cycle(empty_contest_data)
+ self._padding_cycle(write_in_contest_data)
+ self._padding_cycle(overvote_contest_data)
+ self._padding_cycle(overvote_and_write_in_contest_data)
+ self._padding_cycle(overflow_contest_data)
+
+ def _padding_cycle(self, data: ContestData) -> None:
+ """Run full cycle of padding and unpadding."""
+ EXPECTED_PADDED_LENGTH = 512
+
+ try:
+ padded = data.to_bytes()
+ unpadded = ContestData.from_bytes(padded)
+
+ self.assertEqual(EXPECTED_PADDED_LENGTH, len(padded))
+ self.assertEqual(data, unpadded)
+
+ except TruncationError:
+ # Validate JSON exceeds allowed length
+ json = to_raw(data)
+ self.assertLess(EXPECTED_PADDED_LENGTH, len(json))
|
✨ Contest Data and Encoding
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Suggestion
Contest Data and Padding are a prerequisites for issues regarding #385 and secret sharing. This also should help center some of the encoding information to a single source.
### Possible Implementation
_No response_
### Anything else?
_No response_
|
0.0
|
93efee11d2e80c15b15212aac37ec4d4a9f22f2b
|
[
"tests/unit/test_encrypt.py::TestEncrypt::test_contest_data_conversion"
] |
[] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-04-29 20:56:18+00:00
|
mit
| 3,902 |
|
microsoft__electionguard-python-654
|
diff --git a/src/electionguard/big_integer.py b/src/electionguard/big_integer.py
index efacaa5..80f72e9 100644
--- a/src/electionguard/big_integer.py
+++ b/src/electionguard/big_integer.py
@@ -9,7 +9,9 @@ from .utils import BYTE_ORDER
def _hex_to_int(input: str) -> int:
"""Given a hex string representing bytes, returns an int."""
- return int(input, 16)
+ valid_bytes = input[1:] if (len(input) % 2 != 0 and input[0] == "0") else input
+ hex_bytes = bytes.fromhex(valid_bytes)
+ return int.from_bytes(hex_bytes, BYTE_ORDER)
def _int_to_hex(input: int) -> str:
@@ -33,8 +35,8 @@ _zero = mpz(0)
def _convert_to_element(data: Union[int, str]) -> Tuple[str, int]:
"""Convert element to consistent types"""
if isinstance(data, str):
- hex = data
integer = _hex_to_int(data)
+ hex = _int_to_hex(integer)
else:
hex = _int_to_hex(data)
integer = data
|
microsoft/electionguard-python
|
f3c02b233c2d46d7d7b6e66e402f8c7a131e356d
|
diff --git a/tests/property/test_hash.py b/tests/property/test_hash.py
index 899dc79..540648e 100644
--- a/tests/property/test_hash.py
+++ b/tests/property/test_hash.py
@@ -1,10 +1,13 @@
from typing import List, Optional
from hypothesis import given
+from hypothesis.strategies import integers
+
from tests.base_test_case import BaseTestCase
-from electionguard.group import ElementModQ
+from electionguard.big_integer import BigInteger
+from electionguard.group import ElementModP, ElementModQ
from electionguard.hash import hash_elems
from electionguard_tools.strategies.group import elements_mod_p, elements_mod_q
@@ -28,6 +31,43 @@ class TestHash(BaseTestCase):
if ha != hb:
self.assertNotEqual(a, b)
+ @given(elements_mod_p(), integers(min_value=0, max_value=10))
+ def test_hash_of_big_integer_with_leading_zero_bytes(
+ self, input: ElementModP, multiplier: int
+ ) -> None:
+ """Test hashing of larger integers with leading zero bytes"""
+
+ # Arrange.
+ zero_byte = "00"
+ input_hash = hash_elems(input)
+ leading_zeroes = zero_byte * multiplier + input.to_hex()
+
+ # Act.
+ leading_zeroes_big_int = BigInteger(leading_zeroes)
+ leading_zeroes_hash = hash_elems(leading_zeroes_big_int)
+
+ # Assert.
+ self.assertEqual(input, leading_zeroes_big_int)
+ self.assertEqual(input_hash, leading_zeroes_hash)
+
+ @given(elements_mod_p())
+ def test_hash_of_big_integer_with_single_leading_zero(
+ self, input: ElementModP
+ ) -> None:
+ """Test hashing of big integer with a single leading zero creating an invalid hex byte reprsentation."""
+
+ # Arrange.
+ invalid_hex = "0" + input.to_hex()
+ input_hash = hash_elems(input)
+
+ # Act.
+ invalid_hex_big_int = BigInteger(invalid_hex)
+ invalid_hex_hash = hash_elems(invalid_hex_big_int)
+
+ # Assert.
+ self.assertEqual(input, invalid_hex_big_int)
+ self.assertEqual(input_hash, invalid_hex_hash)
+
def test_hash_for_zero_number_is_zero_string(self):
self.assertEqual(hash_elems(0), hash_elems("0"))
|
For ElementModQ, q1 == q2 does not imply that hash(q1) == hash(q2)
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
For ElementModQ, q1 == q2 does not imply that hash(q1) == hash(q2)
```
def testHexToQ(self) -> None :
s1q = hex_to_q("C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206")
s2q = hex_to_q("0C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206")
self.assertEqual(s1q, s2q)
self.assertNotEqual(hash_elems(s1q), hash_elems(s2q))
self.assertNotEqual(s1q.to_hex(), s2q.to_hex())
```
its suprising that leading zeroes are retained, even past the 64 char maximum:
```
def testHexString(self) -> None :
self.show("A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206")
self.show("9A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206")
self.show("49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206")
self.show("C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206")
self.show("0C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206")
self.show("00C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206")
self.show("000C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206")
self.show("0000C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206")
def show(self, s1 : str) -> None :
s1q = hex_to_q(s1)
print(f" len = {len(s1)} hex = {s1q.to_hex()}")
```
gives:
```
len = 61 hex = A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206
len = 62 hex = 9A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206
len = 63 hex = 49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206
len = 64 hex = C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206
len = 65 hex = 0C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206
len = 66 hex = 00C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206
len = 67 hex = 000C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206
len = 68 hex = 0000C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206
```
### Expected Behavior
Should be:
```
def testHexToQ(self) -> None :
s1q = hex_to_q("C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206")
s2q = hex_to_q("0C49A1E8053FBA95F6B7CD3F3B30B101CDD595C435A46AECF2872F47F1C601206")
self.assertEqual(s1q, s2q)
self.assertEqual(hash_elems(s1q), hash_elems(s2q))
self.assertEqual(s1q.to_hex(), s2q.to_hex())
```
would recommend that the hex value be exactly 64 characters, with leading zeros truncated or padded as needed.
### Steps To Reproduce
see above
### Environment
```markdown
- OS: all
```
### Anything else?
The same is probably true for ElementModP
In general, the hash function is underspecified.
Porting it to other languages is problematic, since there is no specification other than the python code.
There is a small enough set of argument types to hash_elems() that specifying it should not be too onerous to do.
Then one can test to look for other possible edge cases and make sure there is no ambiguity.
|
0.0
|
f3c02b233c2d46d7d7b6e66e402f8c7a131e356d
|
[
"tests/property/test_hash.py::TestHash::test_hash_of_big_integer_with_single_leading_zero",
"tests/property/test_hash.py::TestHash::test_hash_of_big_integer_with_leading_zero_bytes"
] |
[
"tests/property/test_hash.py::TestHash::test_hash_value_from_nested_list_and_result_of_hashed_list_by_taking_the_hex",
"tests/property/test_hash.py::TestHash::test_hash_for_non_zero_number_string_same_as_explicit_number",
"tests/property/test_hash.py::TestHash::test_same_answer_twice_in_a_row",
"tests/property/test_hash.py::TestHash::test_hash_of_save_values_in_list_are_same_hash",
"tests/property/test_hash.py::TestHash::test_hash_for_none_same_as_null_string",
"tests/property/test_hash.py::TestHash::test_basic_hash_properties",
"tests/property/test_hash.py::TestHash::test_hash_for_zero_number_is_zero_string",
"tests/property/test_hash.py::TestHash::test_hash_not_null_equivalents",
"tests/property/test_hash.py::TestHash::test_hash_null_equivalents",
"tests/property/test_hash.py::TestHash::test_different_strings_casing_not_the_same_hash"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-06-07 13:46:16+00:00
|
mit
| 3,903 |
|
microsoft__electionguard-python-756
|
diff --git a/src/electionguard_gui/models/decryption_dto.py b/src/electionguard_gui/models/decryption_dto.py
index 9326cf7..2d12795 100644
--- a/src/electionguard_gui/models/decryption_dto.py
+++ b/src/electionguard_gui/models/decryption_dto.py
@@ -49,7 +49,7 @@ class DecryptionDto:
can_join: Optional[bool]
decryption_shares: list[Any]
plaintext_tally: Optional[str]
- plaintext_spoiled_ballots: Optional[dict[str, str]]
+ plaintext_spoiled_ballots: dict[str, str]
lagrange_coefficients: Optional[str]
ciphertext_tally: Optional[str]
completed_at_utc: Optional[datetime]
@@ -69,7 +69,9 @@ class DecryptionDto:
self.guardians_joined = _get_list(decryption, "guardians_joined")
self.decryption_shares = _get_list(decryption, "decryption_shares")
self.plaintext_tally = decryption.get("plaintext_tally")
- self.plaintext_spoiled_ballots = decryption.get("plaintext_spoiled_ballots")
+ self.plaintext_spoiled_ballots = _get_dict(
+ decryption, "plaintext_spoiled_ballots"
+ )
self.lagrange_coefficients = decryption.get("lagrange_coefficients")
self.ciphertext_tally = decryption.get("ciphertext_tally")
self.completed_at_utc = decryption.get("completed_at")
@@ -132,8 +134,6 @@ class DecryptionDto:
return from_raw(PlaintextTally, self.plaintext_tally)
def get_plaintext_spoiled_ballots(self) -> list[PlaintextTally]:
- if not self.plaintext_spoiled_ballots:
- raise ValueError("No plaintext spoiled ballots found")
return [
from_raw(PlaintextTally, tally)
for tally in self.plaintext_spoiled_ballots.values()
@@ -157,6 +157,13 @@ def _get_list(decryption: dict[str, Any], name: str) -> list:
return []
+def _get_dict(decryption: dict[str, Any], name: str) -> dict:
+ value = decryption.get(name)
+ if value:
+ return dict(value)
+ return {}
+
+
def _get_int(decryption: dict[str, Any], name: str, default: int) -> int:
value = decryption.get(name)
if value:
|
microsoft/electionguard-python
|
be31aa28c5a7fb2f707f9e1b280100055fa607ee
|
diff --git a/tests/unit/electionguard_gui/test_decryption_dto.py b/tests/unit/electionguard_gui/test_decryption_dto.py
index 6ff8786..190fb11 100644
--- a/tests/unit/electionguard_gui/test_decryption_dto.py
+++ b/tests/unit/electionguard_gui/test_decryption_dto.py
@@ -9,6 +9,16 @@ from tests.base_test_case import BaseTestCase
class TestDecryptionDto(BaseTestCase):
"""Test the DecryptionDto class"""
+ def test_no_spoiled_ballots(self) -> None:
+ # ARRANGE
+ decryption_dto = DecryptionDto({})
+
+ # ACT
+ spoiled_ballots = decryption_dto.get_plaintext_spoiled_ballots()
+
+ # ASSERT
+ self.assertEqual(0, len(spoiled_ballots))
+
def test_get_status_with_no_guardians(self) -> None:
# ARRANGE
decryption_dto = DecryptionDto(
|
🐞 Error Occurs Exporting Ballots if No Spoiled Ballots
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
If there are no spoiled ballots and you export an election record it will produce this error:
```
DEBUG:eel_log_service.py.debug:#L35: getting ballots for 62ff94aa397efaddb14ac8db
Traceback (most recent call last):
File "C:\dev\ElectionGuard\electionguard-python\.venv\lib\site-packages\eel\__init__.py", line 281, in _process_message
return_val = _exposed_functions[message['name']](*message['args'])
File "C:\dev\ElectionGuard\electionguard-python\src\electionguard_gui\components\export_election_record_component.py", line 64, in export_election_record
spoiled_ballots = decryption.get_plaintext_spoiled_ballots()
File "C:\dev\ElectionGuard\electionguard-python\src\electionguard_gui\models\decryption_dto.py", line 136, in get_plaintext_spoiled_ballots
raise ValueError("No plaintext spoiled ballots found")
ValueError: No plaintext spoiled ballots found
```
### Expected Behavior
_No response_
### Steps To Reproduce
_No response_
### Environment
```markdown
- OS:
```
### Anything else?
_No response_
|
0.0
|
be31aa28c5a7fb2f707f9e1b280100055fa607ee
|
[
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_no_spoiled_ballots"
] |
[
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_users_can_join_key_ceremony_if_not_already_joined",
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_admins_can_not_join_key_ceremony",
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_users_cant_join_twice",
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_get_status_with_all_guardians_joined_but_not_completed",
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_get_status_with_no_guardians",
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_get_status_with_all_guardians_joined_and_completed"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-08-19 14:04:51+00:00
|
mit
| 3,904 |
|
microsoft__electionguard-python-759
|
diff --git a/src/electionguard_gui/services/plaintext_ballot_service.py b/src/electionguard_gui/services/plaintext_ballot_service.py
index 6cdf70c..47c7122 100644
--- a/src/electionguard_gui/services/plaintext_ballot_service.py
+++ b/src/electionguard_gui/services/plaintext_ballot_service.py
@@ -14,36 +14,49 @@ def get_plaintext_ballot_report(
selection_write_ins = _get_candidate_write_ins(manifest)
parties = _get_selection_parties(manifest)
tally_report = {}
- for tally_contest in plaintext_ballot.contests.values():
- contest_name = contest_names.get(tally_contest.object_id, "n/a")
- # non-write-in selections
- non_write_in_selections = [
- selection
- for selection in tally_contest.selections.values()
- if not selection_write_ins[selection.object_id]
- ]
- non_write_in_total = sum(
- [selection.tally for selection in non_write_in_selections]
- )
- non_write_in_selections_report = _get_selections_report(
- non_write_in_selections, selection_names, parties, non_write_in_total
+ contests = plaintext_ballot.contests.values()
+ for tally_contest in contests:
+ selections = list(tally_contest.selections.values())
+ contest_details = _get_contest_details(
+ selections, selection_names, selection_write_ins, parties
)
+ contest_name = contest_names.get(tally_contest.object_id, "n/a")
+ tally_report[contest_name] = contest_details
+ return tally_report
- # write-in selections
- write_ins_total = sum(
- [
- selection.tally
- for selection in tally_contest.selections.values()
- if selection_write_ins[selection.object_id]
- ]
- )
- tally_report[contest_name] = {
- "selections": non_write_in_selections_report,
- "nonWriteInTotal": non_write_in_total,
- "writeInTotal": write_ins_total,
- }
- return tally_report
+def _get_contest_details(
+ selections: list[PlaintextTallySelection],
+ selection_names: dict[str, str],
+ selection_write_ins: dict[str, bool],
+ parties: dict[str, str],
+) -> dict[str, Any]:
+
+ # non-write-in selections
+ non_write_in_selections = [
+ selection
+ for selection in selections
+ if not selection_write_ins[selection.object_id]
+ ]
+ non_write_in_total = sum([selection.tally for selection in non_write_in_selections])
+ non_write_in_selections_report = _get_selections_report(
+ non_write_in_selections, selection_names, parties, non_write_in_total
+ )
+
+ # write-in selections
+ write_ins = [
+ selection.tally
+ for selection in selections
+ if selection_write_ins[selection.object_id]
+ ]
+ any_write_ins = len(write_ins) > 0
+ write_ins_total = sum(write_ins) if any_write_ins else None
+
+ return {
+ "selections": non_write_in_selections_report,
+ "nonWriteInTotal": non_write_in_total,
+ "writeInTotal": write_ins_total,
+ }
def _get_selection_parties(manifest: Manifest) -> dict[str, str]:
@@ -65,14 +78,18 @@ def _get_selection_parties(manifest: Manifest) -> dict[str, str]:
def _get_candidate_write_ins(manifest: Manifest) -> dict[str, bool]:
- candidates = {
+ """
+ Returns a dictionary where the key is a selection's object_id and the value is a boolean
+ indicating whether the selection's candidate is marked as a write-in.
+ """
+ write_in_candidates = {
candidate.object_id: candidate.is_write_in is True
for candidate in manifest.candidates
}
contest_write_ins = {}
for contest in manifest.contests:
for selection in contest.ballot_selections:
- candidate_is_write_in = candidates[selection.candidate_id]
+ candidate_is_write_in = write_in_candidates[selection.candidate_id]
contest_write_ins[selection.object_id] = candidate_is_write_in
return contest_write_ins
diff --git a/src/electionguard_gui/web/components/shared/view-plaintext-ballot-component.js b/src/electionguard_gui/web/components/shared/view-plaintext-ballot-component.js
index 8a33315..2cabe77 100644
--- a/src/electionguard_gui/web/components/shared/view-plaintext-ballot-component.js
+++ b/src/electionguard_gui/web/components/shared/view-plaintext-ballot-component.js
@@ -27,7 +27,7 @@ export default {
<td class="text-end"><strong>{{contestContents.nonWriteInTotal}}</strong></td>
<td class="text-end"><strong>100.00%</strong></td>
</tr>
- <tr v-if="contestContents.writeInTotal">
+ <tr v-if="contestContents.writeInTotal !== null">
<td></td>
<td class="text-end">Write-Ins</td>
<td class="text-end">{{contestContents.writeInTotal}}</td>
|
microsoft/electionguard-python
|
fc3ce34ad1143726917f3ee272c7167b7e7bde4c
|
diff --git a/tests/unit/electionguard_gui/test_decryption_dto.py b/tests/unit/electionguard_gui/test_decryption_dto.py
index 190fb11..56a6bbe 100644
--- a/tests/unit/electionguard_gui/test_decryption_dto.py
+++ b/tests/unit/electionguard_gui/test_decryption_dto.py
@@ -63,7 +63,7 @@ class TestDecryptionDto(BaseTestCase):
self.assertEqual(status, "decryption complete")
@patch("electionguard_gui.services.authorization_service.AuthorizationService")
- def test_admins_can_not_join_key_ceremony(self, auth_service: MagicMock):
+ def test_admins_can_not_join_key_ceremony(self, auth_service: MagicMock) -> None:
# ARRANGE
decryption_dto = DecryptionDto({"guardians_joined": []})
@@ -80,7 +80,7 @@ class TestDecryptionDto(BaseTestCase):
@patch("electionguard_gui.services.authorization_service.AuthorizationService")
def test_users_can_join_key_ceremony_if_not_already_joined(
self, auth_service: MagicMock
- ):
+ ) -> None:
# ARRANGE
decryption_dto = DecryptionDto({"guardians_joined": []})
@@ -95,7 +95,7 @@ class TestDecryptionDto(BaseTestCase):
self.assertTrue(decryption_dto.can_join)
@patch("electionguard_gui.services.authorization_service.AuthorizationService")
- def test_users_cant_join_twice(self, auth_service: MagicMock):
+ def test_users_cant_join_twice(self, auth_service: MagicMock) -> None:
# ARRANGE
decryption_dto = DecryptionDto({"guardians_joined": ["user1"]})
diff --git a/tests/unit/electionguard_gui/test_plaintext_ballot_service.py b/tests/unit/electionguard_gui/test_plaintext_ballot_service.py
new file mode 100644
index 0000000..82a27f3
--- /dev/null
+++ b/tests/unit/electionguard_gui/test_plaintext_ballot_service.py
@@ -0,0 +1,100 @@
+from unittest.mock import MagicMock, patch
+from electionguard.tally import PlaintextTallySelection
+from electionguard_gui.services.plaintext_ballot_service import _get_contest_details
+from tests.base_test_case import BaseTestCase
+
+
+class TestPlaintextBallotService(BaseTestCase):
+ """Test the ElectionDto class"""
+
+ def test_zero_sections(self) -> None:
+ # ARRANGE
+ selections: list[PlaintextTallySelection] = []
+ selection_names: dict[str, str] = {}
+ selection_write_ins: dict[str, bool] = {}
+ parties: dict[str, str] = {}
+
+ # ACT
+ result = _get_contest_details(
+ selections, selection_names, selection_write_ins, parties
+ )
+
+ # ASSERT
+ self.assertEqual(0, result["nonWriteInTotal"])
+ self.assertEqual(None, result["writeInTotal"])
+ self.assertEqual(0, len(result["selections"]))
+
+ @patch("electionguard.tally.PlaintextTallySelection")
+ def test_one_non_write_in(self, plaintext_tally_selection: MagicMock) -> None:
+ # ARRANGE
+ plaintext_tally_selection.object_id = "AL"
+ plaintext_tally_selection.tally = 2
+ selections: list[PlaintextTallySelection] = [plaintext_tally_selection]
+ selection_names: dict[str, str] = {
+ "AL": "Abraham Lincoln",
+ }
+ selection_write_ins: dict[str, bool] = {
+ "AL": False,
+ }
+ parties: dict[str, str] = {
+ "AL": "National Union Party",
+ }
+
+ # ACT
+ result = _get_contest_details(
+ selections, selection_names, selection_write_ins, parties
+ )
+
+ # ASSERT
+ self.assertEqual(2, result["nonWriteInTotal"])
+ self.assertEqual(None, result["writeInTotal"])
+ self.assertEqual(1, len(result["selections"]))
+ selection = result["selections"][0]
+ self.assertEqual("Abraham Lincoln", selection["name"])
+ self.assertEqual(2, selection["tally"])
+ self.assertEqual("National Union Party", selection["party"])
+ self.assertEqual(1, selection["percent"])
+
+ @patch("electionguard.tally.PlaintextTallySelection")
+ def test_one_write_in(self, plaintext_tally_selection: MagicMock) -> None:
+ # ARRANGE
+ plaintext_tally_selection.object_id = "ST"
+ plaintext_tally_selection.tally = 1
+ selections: list[PlaintextTallySelection] = [plaintext_tally_selection]
+ selection_names: dict[str, str] = {}
+ selection_write_ins: dict[str, bool] = {
+ "ST": True,
+ }
+ parties: dict[str, str] = {}
+
+ # ACT
+ result = _get_contest_details(
+ selections, selection_names, selection_write_ins, parties
+ )
+
+ # ASSERT
+ self.assertEqual(0, result["nonWriteInTotal"])
+ self.assertEqual(1, result["writeInTotal"])
+ self.assertEqual(0, len(result["selections"]))
+
+ @patch("electionguard.tally.PlaintextTallySelection")
+ def test_zero_write_in(self, plaintext_tally_selection: MagicMock) -> None:
+ # ARRANGE
+ plaintext_tally_selection.object_id = "ST"
+ plaintext_tally_selection.tally = 0
+ selections: list[PlaintextTallySelection] = [plaintext_tally_selection]
+ selection_names: dict[str, str] = {}
+ selection_write_ins: dict[str, bool] = {
+ "ST": True,
+ }
+ parties: dict[str, str] = {}
+
+ # ACT
+ result = _get_contest_details(
+ selections, selection_names, selection_write_ins, parties
+ )
+
+ # ASSERT
+ self.assertEqual(0, result["nonWriteInTotal"])
+ self.assertEqual(0, result["writeInTotal"])
+ self.assertEqual(0, len(result["selections"]))
|
🐞 Contests with zero write-in do not show zero write-ins
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
ElectionGuard tally does not report the number of Write-ins if write-ins received 0 votes
**Setup:**
1. Load an election on ElectionGuard Scan and open polls
2. Scan some ballots that include regular office contests that have write-in but the write-ins are never marked (0 votes received)
3. Close polls
4. Take the EGDrive to the EG Admin machine and upload ballots
5. Decrypt the ballots and tally
6. Compare the number of votes for write-ins on the screen to the Scan tally
**Expected Results:**
1. Write-ins will be listed as having 0 votes
**Actual Results:**
1. The write-in choice is not reported on the EG tally
**Further testing:**
1. The write-in choice is listed if the write-in recieves at least 1 vote
**Impact:**
1. Medium - Missing data, but at least it is reported if a vote is received by the write-in
**Defect Details**
Subject Saving Data
Reproducible Y
Severity 2-Medium
Priority 3-High
Detected on Date 2022-08-17
Defect Type Product Defect
Modified 2022-08-17 16:30:05
Detected in Release ElectionGuard
Detected in Cycle Drop 4
Target Release ElectionGuard
### Expected Behavior
Write-ins will be listed as having 0 votes
### Steps To Reproduce
_No response_
### Environment
```markdown
- OS:
```
### Anything else?
_No response_
|
0.0
|
fc3ce34ad1143726917f3ee272c7167b7e7bde4c
|
[
"tests/unit/electionguard_gui/test_plaintext_ballot_service.py::TestPlaintextBallotService::test_one_write_in",
"tests/unit/electionguard_gui/test_plaintext_ballot_service.py::TestPlaintextBallotService::test_zero_sections",
"tests/unit/electionguard_gui/test_plaintext_ballot_service.py::TestPlaintextBallotService::test_zero_write_in",
"tests/unit/electionguard_gui/test_plaintext_ballot_service.py::TestPlaintextBallotService::test_one_non_write_in",
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_no_spoiled_ballots",
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_users_can_join_key_ceremony_if_not_already_joined",
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_get_status_with_all_guardians_joined_but_not_completed",
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_get_status_with_all_guardians_joined_and_completed",
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_admins_can_not_join_key_ceremony",
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_get_status_with_no_guardians",
"tests/unit/electionguard_gui/test_decryption_dto.py::TestDecryptionDto::test_users_cant_join_twice"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-08-19 21:38:04+00:00
|
mit
| 3,905 |
|
microsoft__electionguard-python-83
|
diff --git a/src/electionguard/tracker.py b/src/electionguard/tracker.py
index 751b8f5..0560a9c 100644
--- a/src/electionguard/tracker.py
+++ b/src/electionguard/tracker.py
@@ -1,4 +1,4 @@
-from typing import List
+from typing import List, Optional
from .hash import hash_elems
from .group import ElementModQ, q_to_bytes, bytes_to_q
@@ -32,29 +32,39 @@ def get_rotating_tracker_hash(
def tracker_hash_to_words(
tracker_hash: ElementModQ, seperator: str = DEFAULT_SEPERATOR
-) -> str:
+) -> Optional[str]:
"""
Convert tracker has to human readable / friendly words
:param hash: Tracker hash
- :return: Human readable tracker string
+ :return: Human readable tracker string or None
"""
segments = q_to_bytes(tracker_hash)
- words = [get_word(value) for value in segments]
- # TODO Reduce length of segments
+ words: List[str] = []
+ for value in segments:
+ word = get_word(value)
+ if word is None:
+ return None
+ words.append(word)
+ # FIXME ISSUE #82 Minimize length of tracker
return seperator.join(words)
def tracker_words_to_hash(
tracker_words: str, seperator: str = DEFAULT_SEPERATOR
-) -> ElementModQ:
+) -> Optional[ElementModQ]:
"""
Convert tracker from human readable / friendly words to hash
:param tracker_words: Tracker words
:param seperator: Seperator used between words
- :return: Tracker hash
+ :return: Tracker hash or None
"""
words = tracker_words.split(seperator)
- int_values = [get_index_from_word(word) for word in words]
+ int_values: List[int] = []
+ for word in words:
+ index = get_index_from_word(word)
+ if index is None:
+ return None
+ int_values.append(index)
value = bytes(int_values)
return bytes_to_q(value)
diff --git a/src/electionguard/words.py b/src/electionguard/words.py
index 59d05cc..8dd1531 100644
--- a/src/electionguard/words.py
+++ b/src/electionguard/words.py
@@ -1,27 +1,29 @@
+from typing import Optional
+
MIN_INDEX = 0
MAX_INDEX = 4095
-def get_word(index: int) -> str:
+def get_word(index: int) -> Optional[str]:
"""
Get word (4096 options) based on 16 bit index for use with trackers.
- :param index: index of word
- :return: word
+ :param index: index of word between 0 and 4095
+ :return: word or None if index outside bounds
"""
if index < MIN_INDEX:
- return words[MIN_INDEX]
+ return None
if index > MAX_INDEX:
- return words[MAX_INDEX]
+ return None
return words[index]
-def get_index_from_word(word: str) -> int:
+def get_index_from_word(word: str) -> Optional[int]:
"""
Get the index of a word (4096 options) based on 16 bit index for use with trackers.
:param word: word
- :return: index of word
+ :return: index of word or None if not found
"""
- return words.index(word)
+ return words.index(word) if word in words else None
words = [
|
microsoft/electionguard-python
|
c73f6dc5912f7d8cf2af3dc879ea509822be2e9b
|
diff --git a/tests/test_words.py b/tests/test_words.py
index ed18d08..c4ad51c 100644
--- a/tests/test_words.py
+++ b/tests/test_words.py
@@ -30,8 +30,20 @@ class TestWord(TestCase):
INDEX_BELOW_MIN = -1
INDEX_ABOVE_MAX = 4096
- word_min = get_word(INDEX_BELOW_MIN)
- word_max = get_word(INDEX_ABOVE_MAX)
+ # Act
+ word_past_min = get_word(INDEX_BELOW_MIN)
+ word_past_max = get_word(INDEX_ABOVE_MAX)
- self.assertEqual(word_min, "aardvark")
- self.assertEqual(word_max, "prospect")
+ # Assert
+ self.assertIsNone(word_past_min)
+ self.assertIsNone(word_past_max)
+
+ def test_get_index_of_word_not_in_list(self):
+ # Arrange
+ FAILING_WORD = "thiswordshouldfail"
+
+ # Act
+ failed_index = get_index_from_word(FAILING_WORD)
+
+ # Assert
+ self.assertIsNone(failed_index)
|
error handling for words.py
`src/electionguard/words.py`: the methods here don't handle bad input, and will instead implicitly raise errors or have undesirable behavior. Perhaps these should return `Optional` results, and require error checking.
|
0.0
|
c73f6dc5912f7d8cf2af3dc879ea509822be2e9b
|
[
"tests/test_words.py::TestWord::test_get_index_of_word_not_in_list",
"tests/test_words.py::TestWord::test_get_word_when_out_of_range"
] |
[
"tests/test_words.py::TestWord::test_get_word"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-06 19:36:52+00:00
|
mit
| 3,906 |
|
microsoft__nutter-44
|
diff --git a/README.md b/README.md
index 9567b1e..2c92850 100644
--- a/README.md
+++ b/README.md
@@ -224,8 +224,8 @@ export DATABRICKS_TOKEN=<TOKEN>
Windows PowerShell
``` cmd
-$env DATABRICKS_HOST="HOST"
-$env DATABRICKS_TOKEN="TOKEN"
+$env:DATABRICKS_HOST="HOST"
+$env:DATABRICKS_TOKEN="TOKEN"
```
__Note:__ For more information about personal access tokens review [Databricks API Authentication](https://docs.azuredatabricks.net/dev-tools/api/latest/authentication.html).
diff --git a/runtime/nutterfixture.py b/runtime/nutterfixture.py
index f9a3cb2..6e2db0b 100644
--- a/runtime/nutterfixture.py
+++ b/runtime/nutterfixture.py
@@ -54,6 +54,10 @@ class NutterFixture(object):
return TestExecResults(self.test_results)
def __load_fixture(self):
+ if hasattr(self, 'data_loader') == False:
+ raise InitializationException("If you have an __init__ method in your test class, make sure you make a call to initialize the parent class. For example: super().__init__()")
+
+
test_case_dict = self.data_loader.load_fixture(self)
if test_case_dict is None:
logging.fatal("Invalid Test Fixture")
@@ -72,4 +76,9 @@ class NutterFixture(object):
class InvalidTestFixtureException(Exception):
- pass
+ def __init__(self, message):
+ super().__init__(message)
+
+class InitializationException(Exception):
+ def __init__(self, message):
+ super().__init__(message)
|
microsoft/nutter
|
3813d6dd071e8a6d3e76e3f9743ef2265164d08e
|
diff --git a/tests/runtime/test_nutterfixture.py b/tests/runtime/test_nutterfixture.py
index 0e19b93..5c81412 100644
--- a/tests/runtime/test_nutterfixture.py
+++ b/tests/runtime/test_nutterfixture.py
@@ -4,7 +4,7 @@ Licensed under the MIT license.
"""
import pytest
-from runtime.nutterfixture import NutterFixture, tag, InvalidTestFixtureException
+from runtime.nutterfixture import NutterFixture, tag, InvalidTestFixtureException, InitializationException
from runtime.testcase import TestCase
from runtime.fixtureloader import FixtureLoader
from common.testresult import TestResult, TestResults
@@ -239,6 +239,14 @@ def test__execute_tests__test_names_not_in_order_in_class__tests_executed_in_alp
# Assert
assert '1wxyz' == fix.get_method_order()
+def test__execute_tests__subclass_init_does_not_call_NutterFixture_init__throws_InitializationException():
+ # Arrange
+ fix = TestFixtureThatDoesNotCallBaseCtor()
+
+ # Act
+ with pytest.raises(InitializationException):
+ fix.execute_tests()
+
def test__run_test_method__has_list_tag_decorator__list_set_on_method():
# Arrange
class Wrapper(NutterFixture):
@@ -376,3 +384,10 @@ class OutOfOrderTestFixture(NutterFixture):
def get_method_order(self):
return self.__method_order
+
+class TestFixtureThatDoesNotCallBaseCtor(NutterFixture):
+ def __init__(self):
+ pass
+
+ def assertion_test_case(self):
+ assert 1 == 1
|
Documentation issue : Incorrect commands to set the environment variable
Below command wont run in **windows power shell** to set the environment variable
```
$env DATABRICKS_HOST="HOST"
$env DATABRICKS_TOKEN="TOKEN"
```
Instead it should be like below
```
$env:DATABRICKS_HOST="HOST"
$env:DATABRICKS_TOKEN="TOKEN"
```
|
0.0
|
3813d6dd071e8a6d3e76e3f9743ef2265164d08e
|
[
"tests/runtime/test_nutterfixture.py::test__ctor__creates_fixture_loader",
"tests/runtime/test_nutterfixture.py::test__execute_tests__calls_load_fixture_on_fixture_loader",
"tests/runtime/test_nutterfixture.py::test__execute_tests__data_loader_returns_none__throws_invalidfixtureexception",
"tests/runtime/test_nutterfixture.py::test__execute_tests__data_loader_returns_empty_dictionary__returns_empty_results",
"tests/runtime/test_nutterfixture.py::test__execute_tests__before_all_set_and_data_loader_returns_empty_dictionary__does_not_call_before_all",
"tests/runtime/test_nutterfixture.py::test__execute_tests__before_all_none_and_data_loader_returns_empty_dictionary__does_not_call_before_all",
"tests/runtime/test_nutterfixture.py::test__execute_tests__before_all_set_and_data_loader_returns_dictionary_with_testcases__calls_before_all",
"tests/runtime/test_nutterfixture.py::test__execute_tests__after_all_set_and_data_loader_returns_empty_dictionary__does_not_call_after_all",
"tests/runtime/test_nutterfixture.py::test__execute_tests__after_all_none_and_data_loader_returns_empty_dictionary__does_not_call_after_all",
"tests/runtime/test_nutterfixture.py::test__execute_tests__after_all_set_and_data_loader_returns_dictionary_with_testcases__calls_after_all",
"tests/runtime/test_nutterfixture.py::test__execute_tests__data_loader_returns_dictionary_with_testcases__iterates_over_dictionary_and_calls_execute",
"tests/runtime/test_nutterfixture.py::test__execute_tests__returns_test_result__calls_append_on_testresults",
"tests/runtime/test_nutterfixture.py::test__execute_tests__two_test_cases__returns_test_results_with_2_test_results",
"tests/runtime/test_nutterfixture.py::test__execute_tests__test_names_not_in_order_in_class__tests_executed_in_alphabetical_order",
"tests/runtime/test_nutterfixture.py::test__execute_tests__subclass_init_does_not_call_NutterFixture_init__throws_InitializationException",
"tests/runtime/test_nutterfixture.py::test__run_test_method__has_list_tag_decorator__list_set_on_method",
"tests/runtime/test_nutterfixture.py::test__run_test_method__has_str_tag_decorator__str_set_on_method",
"tests/runtime/test_nutterfixture.py::test__run_test_method__has_tag_decorator_not_list__raises_value_error",
"tests/runtime/test_nutterfixture.py::test__run_test_method__has_tag_decorator_not_listhas_invalid_tag_decorator_none__raises_value_error",
"tests/runtime/test_nutterfixture.py::test__non_run_test_method__valid_tag_on_non_run_method__raises_value_error",
"tests/runtime/test_nutterfixture.py::test__run_test_method__has_invalid_tag_decorator_not_list_or_str_using_class_not_builder__raises_value_error",
"tests/runtime/test_nutterfixture.py::test__run_test_method__has_valid_tag_decorator_in_class__tag_set_on_method"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-08 00:34:18+00:00
|
mit
| 3,907 |
|
microsoft__pybryt-149
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 82de47d..2eee6ed 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,6 +9,7 @@ All notable changes to this project will be documented in this file, and this pr
* Added `TimeComplexityChecker` for analyzing complexity without annotations per [#137](https://github.com/microsoft/pybryt/issues/137)
* Fixed annotation message filtering bug per [#145](https://github.com/microsoft/pybryt/issues/145)
* Added `dataclasses` backport to required modules for Python < 3.7
+* Added a `group` argument to the `check` context manager per [#146](https://github.com/microsoft/pybryt/issues/146)
## 0.3.1 - 2021-12-01
diff --git a/pybryt/student.py b/pybryt/student.py
index ba56688..b0067dd 100644
--- a/pybryt/student.py
+++ b/pybryt/student.py
@@ -310,9 +310,10 @@ class check:
Args:
ref (``Union[str, ReferenceImplementation, list[str], list[ReferenceImplementation]]``): the
reference(s) to check against or the path(s) to them
+ group (``str``, optional): the name of the group of annotations to run in each reference
report_on_error (``bool``, optional): whether to print the report when an error is thrown
by the block
- show_only (one of ``{'satisified', 'unsatisfied', None}``, optional): which types of
+ show_only (one of ``{'satisfied', 'unsatisfied', None}``, optional): which types of
reference results to include in the report; if ``None``, all are included
**kwargs: additional keyword arguments passed to ``pybryt.execution.create_collector``
"""
@@ -320,11 +321,14 @@ class check:
_ref: List[ReferenceImplementation]
"""the references being checked against"""
+ _group: Optional[str]
+ """the group of annotations in the references to run"""
+
_report_on_error: bool
"""whether to print the report when an error is thrown by the block"""
_show_only: Optional[str]
- """which types of eference results to include in the report"""
+ """which types of reference results to include in the report"""
_frame_tracer: Optional[FrameTracer]
"""the frame tracer being used to manage tracing"""
@@ -339,8 +343,13 @@ class check:
"""whether this check is disbaled (because PyBryt is already tracing)"""
def __init__(
- self, ref: Union[str, ReferenceImplementation, List[str], List[ReferenceImplementation]],
- report_on_error: bool = True, show_only: Optional[str] = None, cache: bool = True, **kwargs
+ self,
+ ref: Union[str, ReferenceImplementation, List[str], List[ReferenceImplementation]],
+ group: Optional[str] = None,
+ report_on_error: bool = True,
+ show_only: Optional[str] = None,
+ cache: bool = True,
+ **kwargs,
):
if isinstance(ref, str):
ref = ReferenceImplementation.load(ref)
@@ -357,6 +366,7 @@ class check:
raise TypeError("Invalid values provided for reference(s)")
self._ref = ref
+ self._group = group
self._kwargs = kwargs
self._show_only = show_only
self._report_on_error = report_on_error
@@ -396,7 +406,7 @@ class check:
if exc_type is None or self._report_on_error:
footprint = self._frame_tracer.get_footprint()
stu = StudentImplementation.from_footprint(footprint)
- res = stu.check(self._ref)
+ res = stu.check(self._ref, group=self._group)
report = generate_report(res, show_only=self._show_only)
if report:
print(report)
|
microsoft/pybryt
|
4d4b34a9f3ee89c9a825e6dd110d6938a763e59b
|
diff --git a/tests/test_student.py b/tests/test_student.py
index 56983cd..bf904e0 100644
--- a/tests/test_student.py
+++ b/tests/test_student.py
@@ -193,6 +193,17 @@ def test_check_cm(capsys):
with pytest.raises(TypeError, match="Invalid values in the reference list"):
check([ref, "path", 1])
+ # check by annotation group
+ with mock.patch.object(StudentImplementation, "from_footprint") as mocked_ff, \
+ mock.patch("pybryt.student.FrameTracer"), \
+ mock.patch("pybryt.student.generate_report"):
+ ref = ReferenceImplementation("groups", [])
+ for run_group in ["1", "2", None]:
+ with check(ref, group=run_group):
+ pass
+
+ mocked_ff.return_value.check.assert_called_with([ref], group=run_group)
+
# check caching
with mock.patch("pybryt.student.FrameTracer") as mocked_frame_tracer:
with mock.patch("pybryt.student.StudentImplementation") as mocked_stu, \
|
Make `check` context accept an annotation group parameter
`StudentImplementation`s and `ReferenceImplementation`s support checking specific groups of annotations in a single reference using a `group` parameter. This feature should be added to the `check` context.
|
0.0
|
4d4b34a9f3ee89c9a825e6dd110d6938a763e59b
|
[
"tests/test_student.py::test_check_cm"
] |
[
"tests/test_student.py::test_load_and_dump",
"tests/test_student.py::test_check",
"tests/test_student.py::test_errors",
"tests/test_student.py::test_from_cache"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-01-06 20:02:02+00:00
|
mit
| 3,908 |
|
microsoft__pybryt-150
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2eee6ed..0ae0743 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,6 +10,7 @@ All notable changes to this project will be documented in this file, and this pr
* Fixed annotation message filtering bug per [#145](https://github.com/microsoft/pybryt/issues/145)
* Added `dataclasses` backport to required modules for Python < 3.7
* Added a `group` argument to the `check` context manager per [#146](https://github.com/microsoft/pybryt/issues/146)
+* Moved named annotation filtering into `ReferenceImplementation` constructor per [#147](https://github.com/microsoft/pybryt/issues/147)
## 0.3.1 - 2021-12-01
diff --git a/pybryt/annotations/annotation.py b/pybryt/annotations/annotation.py
index f386995..5abfe34 100644
--- a/pybryt/annotations/annotation.py
+++ b/pybryt/annotations/annotation.py
@@ -4,13 +4,12 @@ __all__ = ["Annotation", "AnnotationResult"]
from abc import ABC, abstractmethod
from dataclasses import dataclass
-from typing import Any, Dict, List, Optional, Tuple
+from typing import Any, Dict, List, Optional
from ..execution import is_complexity_tracing_enabled, MemoryFootprint
_TRACKED_ANNOTATIONS = []
-_GROUP_INDICES = {}
_ANNOTATION_COUNTER = 0
@@ -71,23 +70,12 @@ class Annotation(ABC):
def _track(self) -> None:
"""
- Tracks this annotation in ``_TRACKED_ANNOTATIONS`` and updates ``_GROUP_INDICES`` with the
- index of the annotation if ``self.group`` is present. If the annotation has children
+ Tracks this annotation in ``_TRACKED_ANNOTATIONS``. If the annotation has children
(returned by ``self.children``), the children are removed from ``_TRACKED_ANNOTATIONS``.
"""
if is_complexity_tracing_enabled():
return
- global _GROUP_INDICES, _TRACKED_ANNOTATIONS
-
- idx = len(_TRACKED_ANNOTATIONS)
- if self.name not in _GROUP_INDICES:
- _GROUP_INDICES[self.name] = []
- if self.limit is not None and len(_GROUP_INDICES[self.name]) >= self.limit:
- return
- else:
- _GROUP_INDICES[self.name].append(idx)
-
for child in self.children:
try:
_TRACKED_ANNOTATIONS.remove(child)
@@ -112,9 +100,8 @@ class Annotation(ABC):
Resets the list of tracked annotations and the mapping of group names to indices in that
list.
"""
- global _ANNOTATION_COUNTER, _GROUP_INDICES, _TRACKED_ANNOTATIONS
+ global _ANNOTATION_COUNTER, _TRACKED_ANNOTATIONS
_TRACKED_ANNOTATIONS.clear()
- _GROUP_INDICES.clear()
_ANNOTATION_COUNTER = 0
@property
diff --git a/pybryt/reference.py b/pybryt/reference.py
index a635c10..4e0772b 100644
--- a/pybryt/reference.py
+++ b/pybryt/reference.py
@@ -38,7 +38,19 @@ class ReferenceImplementation(Serializable):
if not all(isinstance(ann, Annotation) for ann in annotations):
raise TypeError("Found non-annotation in annotations")
- self.annotations = annotations
+ self.annotations, name_counts = [], {}
+ for ann in annotations:
+ track = True
+ if ann.name is not None:
+ count = name_counts.get(ann.name, 0)
+ if ann.limit is None or ann.limit > count:
+ name_counts[ann.name] = count + 1
+ else:
+ track = False
+
+ if track:
+ self.annotations.append(ann)
+
self.name = name
def __eq__(self, other: Any) -> bool:
|
microsoft/pybryt
|
45a89cc8b0e1bbca74d05b6941524457513768b3
|
diff --git a/tests/annotations/test_annotation.py b/tests/annotations/test_annotation.py
index 96f0b14..6653d56 100644
--- a/tests/annotations/test_annotation.py
+++ b/tests/annotations/test_annotation.py
@@ -18,9 +18,8 @@ def test_name_group_limit():
vs.append(pybryt.Value(val, name="foo", limit=11))
tracked = pybryt.Annotation.get_tracked_annotations()
- assert len(tracked) == 11, "Too many tracked annotations"
- assert tracked == vs[:11], "Wrong tracked annotations"
- assert all(v.name == "foo" and v.limit == 11 for v in vs)
+ assert len(tracked) == 100
+ assert tracked == vs, "Wrong tracked annotations"
res = vs[-1].check(footprint)
assert_object_attrs(res, {
diff --git a/tests/test_reference.py b/tests/test_reference.py
index e77644b..d8745cc 100644
--- a/tests/test_reference.py
+++ b/tests/test_reference.py
@@ -125,6 +125,21 @@ def test_reference_construction():
ref2 = more_refs[0]
assert ref2 == expected_ref2
+ # check filtering named annotations (#147)
+ annots = [
+ Value(0),
+ Value(1, name="1"),
+ Value(2, name="1"),
+ Value(3, name="1"),
+ Value(4, name="2", limit=2),
+ Value(5, name="2", limit=2),
+ Value(6, name="2", limit=2),
+ Value(7, name="3", limit=2),
+ ]
+ ref = ReferenceImplementation("named-annotations", annots)
+ assert len(ref.annotations) == 7
+ assert annots[-2] not in ref.annotations
+
def test_construction_errors():
"""
|
Limiting the number of annotations in a reference only works when compiling a notebook, not when constructing the object manually
Consider the example:
```python
max_ref = []
def maximum(l, track=False):
m = max(l)
if track:
max_ref.append(pybryt.Value(
m,
name="list-maximum",
limit=5,
success_message="Found the max!",
failure_message="Did not find the max",
))
return m
for _ in range(1000):
test_list = np.random.normal(size=100)
maximum(test_list, track=True)
max_ref = pybryt.ReferenceImplementation("maximum", max_ref)
max_ref.annotations
```
`max_ref.annotations` here has length 1000 even though the limit is set of 5 for all annotations in the reference. The filtering behavior should be moved into the reference implementation constructor instead of being in `Annotation._track`.
|
0.0
|
45a89cc8b0e1bbca74d05b6941524457513768b3
|
[
"tests/annotations/test_annotation.py::test_name_group_limit",
"tests/test_reference.py::test_reference_construction"
] |
[
"tests/annotations/test_annotation.py::test_get_reset_tracked_annotations",
"tests/annotations/test_annotation.py::test_messages",
"tests/annotations/test_annotation.py::test_bitwise_ops",
"tests/test_reference.py::test_construction_errors",
"tests/test_reference.py::test_run_and_results",
"tests/test_reference.py::test_generate_report"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-01-06 21:30:48+00:00
|
mit
| 3,909 |
|
microsoft__pybryt-57
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8065050..797b2cd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,10 @@
All notable changes to this project will be documented in this file, and this project adheres to
[Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## Unreleased
+
+* Added the context manager `pybryt.no_tracing`
+
## 0.1.0 - 2021-05-27
* Added time complexity annotations and checking
diff --git a/docs/student_implementations.rst b/docs/student_implementations.rst
index 453d92c..9f58be4 100644
--- a/docs/student_implementations.rst
+++ b/docs/student_implementations.rst
@@ -44,6 +44,16 @@ statements that show the values at various stages of execution.
stu = pybryt.StudentImplementation("subm.ipynb", output="executed-subm.ipynb")
+If there is code in a student notebook that should not be traced by PyBryt, wrap it PyBryt's
+:py:class:`no_tracing<pybryt.execution.no_tracing>` context manager. Any code inside this context
+will not be traced (if PyBryt is tracing the call stack). If no tracing is occurring, no action is
+taken.
+
+.. code-block:: python
+
+ with pybryt.no_tracing():
+ foo(1)
+
Checking Implementations
------------------------
diff --git a/pybryt/execution/__init__.py b/pybryt/execution/__init__.py
index 89ce8a2..64be0d7 100644
--- a/pybryt/execution/__init__.py
+++ b/pybryt/execution/__init__.py
@@ -1,6 +1,6 @@
"""Submission execution internals for PyBryt"""
-__all__ = ["check_time_complexity", "tracing_off", "tracing_on"]
+__all__ = ["check_time_complexity", "no_tracing"]
import os
import dill
@@ -13,7 +13,9 @@ from typing import Any, List, Tuple, Optional
from textwrap import dedent
from .complexity import check_time_complexity, TimeComplexityResult
-from .tracing import create_collector, _get_tracing_frame, tracing_off, tracing_on, TRACING_VARNAME
+from .tracing import (
+ create_collector, _get_tracing_frame, no_tracing, tracing_off, tracing_on, TRACING_VARNAME
+)
from ..preprocessors import IntermediateVariablePreprocessor
from ..utils import make_secret
diff --git a/pybryt/execution/tracing.py b/pybryt/execution/tracing.py
index c733705..a45f047 100644
--- a/pybryt/execution/tracing.py
+++ b/pybryt/execution/tracing.py
@@ -216,3 +216,28 @@ def tracing_on(frame=None, tracing_func=None):
vn2 = f"sys_{make_secret()}"
frame.f_globals[vn] = tracing_func
exec(f"import sys as {vn2}\n{vn2}.settrace({vn})", frame.f_globals, frame.f_locals)
+
+
+class no_tracing:
+ """
+ A context manager for turning tracing off for a block of code in a submission.
+
+ If PyBryt is tracing code, any code inside this context will not be traced for values in memory.
+ If PyBryt is not tracing, no action is taken.
+
+ .. code-block:: python
+
+ with pybryt.no_tracing():
+ # this code is not traced
+ foo(1)
+
+ # this code is traced
+ foo(2)
+ """
+
+ def __enter__(self):
+ tracing_off()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ tracing_on()
+ return False
|
microsoft/pybryt
|
40580d2dbc0fc6bb2b5697cba14e21fda01c82c5
|
diff --git a/tests/execution/test_tracing.py b/tests/execution/test_tracing.py
index cd61c2a..46e722c 100644
--- a/tests/execution/test_tracing.py
+++ b/tests/execution/test_tracing.py
@@ -7,7 +7,7 @@ import numpy as np
from unittest import mock
from pybryt import *
-from pybryt.execution import create_collector, TRACING_VARNAME
+from pybryt.execution import create_collector, tracing_off, tracing_on, TRACING_VARNAME
from .utils import generate_mocked_frame
@@ -144,4 +144,13 @@ def test_tracing_control():
tracing_on()
mocked_settrace.assert_not_called()
- # assert inspect.currentframe().f_trace is not trace
+
+def test_tracing_context_manager():
+ """
+ """
+ with mock.patch("pybryt.execution.tracing.tracing_off") as mocked_off, \
+ mock.patch("pybryt.execution.tracing.tracing_on") as mocked_on:
+ with no_tracing():
+ mocked_off.assert_called()
+ mocked_on.assert_not_called()
+ mocked_on.assert_called()
|
Change the use of `tracing_off` + `tracing_on` to be a context manager
Basically, the idea is to make calling `tracing_off` and `tracing_on` directly an anti-pattern and use context managers to control tracing. Currently, the time complexity check (#33) and individual question checks (#38) use context managers, and a unified approach is probably the best idea.
For example, to stop code from being traced during grading, something like
```python
with pybryt.no_tracing():
# some code
```
|
0.0
|
40580d2dbc0fc6bb2b5697cba14e21fda01c82c5
|
[
"tests/execution/test_tracing.py::test_tracing_context_manager"
] |
[
"tests/execution/test_tracing.py::test_trace_function",
"tests/execution/test_tracing.py::test_tracing_control"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-03 05:35:04+00:00
|
mit
| 3,910 |
|
microsoft__pybryt-92
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8a45ca0..4ddb41b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,11 @@
All notable changes to this project will be documented in this file, and this project adheres to
[Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## 0.1.6 - 2021-06-23
+
+* Added function call tracking to trace function and student implementations
+* Fixed bug for Markdown cells in `pybryt.StudentImplementation.errors`
+
## 0.1.5 - 2021-06-22
* Added tracking and warnings for errors in student notebook execution
diff --git a/docs/student_implementations.rst b/docs/student_implementations.rst
index 0266b7d..4352852 100644
--- a/docs/student_implementations.rst
+++ b/docs/student_implementations.rst
@@ -23,7 +23,7 @@ The constructor reads the notebook file and stores the student's code. It then p
the student's notebook using ``nbformat``'s ``ExecutePreprocessor``. The memory footprint of the
student's code is constructed by executing the notebook with a trace function that tracks every
value created and accessed **by the student's code** and the timestamps at which those values were
-observed.
+observed. PyBryt also tracks all of the function calls that occur during execution.
To trace into code written in specific files, use the ``addl_filenames`` argument of the constructor
to pass a list of absolute paths of files to trace inside of. This can be useful for cases in which
diff --git a/pybryt/execution/__init__.py b/pybryt/execution/__init__.py
index 1c70931..4ab61aa 100644
--- a/pybryt/execution/__init__.py
+++ b/pybryt/execution/__init__.py
@@ -24,7 +24,8 @@ NBFORMAT_VERSION = 4
def execute_notebook(nb: nbformat.NotebookNode, nb_path: str, addl_filenames: List[str] = [],
- output: Optional[str] = None) -> Tuple[int, List[Tuple[Any, int]], nbformat.NotebookNode]:
+ output: Optional[str] = None) -> Tuple[int, List[Tuple[Any, int]], List[Tuple[str, str]], \
+ nbformat.NotebookNode]:
"""
Executes a submission using ``nbconvert`` and returns the memory footprint.
@@ -41,8 +42,9 @@ def execute_notebook(nb: nbformat.NotebookNode, nb_path: str, addl_filenames: Li
output (``str``, optional): a file path at which to write the executed notebook
Returns:
- ``tuple[int, list[tuple[object, int]], nbformat.NotebookNode]``: the number of execution steps,
- the memory footprint, and the executed notebook
+ ``tuple[int, list[tuple[object, int]], list[tuple[str, str]], nbformat.NotebookNode]``: the
+ number of execution steps, the memory footprint, the list of function calls, and the
+ executed notebook
"""
nb = deepcopy(nb)
preprocessor = IntermediateVariablePreprocessor()
@@ -55,7 +57,7 @@ def execute_notebook(nb: nbformat.NotebookNode, nb_path: str, addl_filenames: Li
first_cell = nbformat.v4.new_code_cell(dedent(f"""\
import sys
from pybryt.execution import create_collector
- observed_{secret}, cir = create_collector(addl_filenames={addl_filenames})
+ cir_results_{secret}, cir = create_collector(addl_filenames={addl_filenames})
sys.settrace(cir)
{TRACING_VARNAME} = True
%cd {nb_dir}
@@ -65,9 +67,9 @@ def execute_notebook(nb: nbformat.NotebookNode, nb_path: str, addl_filenames: Li
sys.settrace(None)
import dill
from pybryt.utils import filter_picklable_list
- filter_picklable_list(observed_{secret})
+ filter_picklable_list(cir_results_{secret}[0])
with open("{observed_fp}", "wb+") as f:
- dill.dump(observed_{secret}, f)
+ dill.dump(cir_results_{secret}, f)
"""))
nb['cells'].insert(0, first_cell)
@@ -82,10 +84,10 @@ def execute_notebook(nb: nbformat.NotebookNode, nb_path: str, addl_filenames: Li
nbformat.write(nb, f)
with open(observed_fp, "rb") as f:
- observed = dill.load(f)
+ observed, calls = dill.load(f)
os.remove(observed_fp)
n_steps = max([t[1] for t in observed])
- return n_steps, observed, nb
+ return n_steps, observed, calls, nb
diff --git a/pybryt/execution/tracing.py b/pybryt/execution/tracing.py
index 7aab9f4..03b44f3 100644
--- a/pybryt/execution/tracing.py
+++ b/pybryt/execution/tracing.py
@@ -17,7 +17,7 @@ TRACING_FUNC = None
def create_collector(skip_types: List[type] = [type, type(len), ModuleType, FunctionType], addl_filenames: List[str] = []) -> \
- Tuple[List[Tuple[Any, int]], Callable[[FrameType, str, Any], Callable]]:
+ Tuple[Tuple[List[Tuple[Any, int]], List[Tuple[str, str]]], Callable[[FrameType, str, Any], Callable]]:
"""
Creates a list to collect observed values and a trace function.
@@ -32,11 +32,14 @@ def create_collector(skip_types: List[type] = [type, type(len), ModuleType, Func
IPython
Returns:
- ``tuple[list[tuple[object, int]], callable[[frame, str, object], callable]]``: the list
- of tuples of observed objects and their timestamps, and the trace function
+ ``tuple[tuple[list[tuple[object, int]], list[tuple[str, str]]], callable[[frame, str,
+ object], callable]]``: a 2-tuple containing a 2-tuple with the list of tuples of observed
+ objects and their timestamps and the list of call filenames andfunction names, and the trace
+ function
"""
global _COLLECTOR_RET
observed = []
+ calls = []
vars_not_found = {}
hashes = set()
counter = [0]
@@ -68,6 +71,15 @@ def create_collector(skip_types: List[type] = [type, type(len), ModuleType, Func
except:
return
+ def track_call(frame):
+ """
+ Tracks a call in ``calls`` as a tuple of ``(filename, function name)``.
+
+ Args:
+ frame (``types.FrameType``): the frame of the call
+ """
+ calls.append((frame.f_code.co_filename, frame.f_code.co_name))
+
# TODO: a way to track the cell of execution
def collect_intermidiate_results(frame: FrameType, event: str, arg: Any):
"""
@@ -76,6 +88,10 @@ def create_collector(skip_types: List[type] = [type, type(len), ModuleType, Func
if frame.f_code.co_filename.startswith("<ipython") or frame.f_code.co_filename in addl_filenames:
counter[0] += 1 # increment student code step counter
+ if event == "call":
+ track_call(frame)
+ return collect_intermidiate_results
+
# return if tracking is disabled by a compelxity check
from .complexity import _TRACKING_DISABLED
if _TRACKING_DISABLED:
@@ -137,7 +153,7 @@ def create_collector(skip_types: List[type] = [type, type(len), ModuleType, Func
return collect_intermidiate_results
_COLLECTOR_RET = (observed, counter, collect_intermidiate_results)
- return observed, collect_intermidiate_results
+ return (observed, calls), collect_intermidiate_results
def _get_tracing_frame():
diff --git a/pybryt/student.py b/pybryt/student.py
index 996dbd9..456a839 100644
--- a/pybryt/student.py
+++ b/pybryt/student.py
@@ -49,6 +49,9 @@ class StudentImplementation(Serializable):
values: List[Tuple[Any, int]]
"""the memory footprint (a list of tuples of objects and their timestamps)"""
+ calls: List[Tuple[str, str]]
+ """the list of all function calls from the student code"""
+
steps: int
"""number of execution steps"""
@@ -59,7 +62,7 @@ class StudentImplementation(Serializable):
self, path_or_nb: Optional[Union[str, nbformat.NotebookNode]], addl_filenames: List[str] = [],
output: Optional[str] = None
):
- self .executed_nb = None
+ self.executed_nb = None
if path_or_nb is None:
self.nb = None
self.nb_path = None
@@ -84,7 +87,7 @@ class StudentImplementation(Serializable):
execution
output (``str``, optional): a path at which to write executed notebook
"""
- self.steps, self.values, self.executed_nb = execute_notebook(
+ self.steps, self.values, self.calls, self.executed_nb = execute_notebook(
self.nb, self.nb_path, addl_filenames=addl_filenames, output=output
)
@@ -105,14 +108,17 @@ class StudentImplementation(Serializable):
errors = []
for cell in self.executed_nb['cells']:
- for out in cell['outputs']:
- if out['output_type'] == "error":
- errors.append(out)
+ if cell['cell_type'] == "code":
+ for out in cell['outputs']:
+ if out['output_type'] == "error":
+ errors.append(out)
return errors
@classmethod
- def from_footprint(cls, footprint: List[Tuple[Any, int]], steps: int) -> 'StudentImplementation':
+ def from_footprint(
+ cls, footprint: List[Tuple[Any, int]], calls: List[Tuple[str, str]], steps: int
+ ) -> 'StudentImplementation':
"""
Create a student implementation object from a memory footprint directly, rather than by
executing a notebook. Leaves the ``nb`` and ``nb_path`` instance variables of the resulting
@@ -120,11 +126,13 @@ class StudentImplementation(Serializable):
Args:
footprint (``list[tuple[object, int]]``): the memory footprint
+ calls (``list[tuple[str, str]]``): the list of function calls
steps (``int``): the number of execution steps
"""
stu = cls(None)
stu.steps = steps
stu.values = footprint
+ stu.calls = calls
return stu
@classmethod
@@ -145,10 +153,12 @@ class StudentImplementation(Serializable):
Returns:
``StudentImplementation``: the combined implementation
"""
- new_mfp = [] # the new memory footprint
- seen = set() # set to track which values we've seen
+ new_mfp = [] # the new memory footprint
+ new_calls = [] # the new list of calls
+ seen = set() # set to track which values we've seen
timestamp_offset = 0 # offset for timestamps in the new memory footprint
for impl in impls:
+ new_calls.extend(impl.calls)
for obj, ts in impl.values:
h = pickle_and_hash(obj)
if h not in seen:
@@ -156,7 +166,7 @@ class StudentImplementation(Serializable):
new_mfp.append((obj, ts))
seen.add(h)
timestamp_offset += impl.steps
- return cls.from_footprint(new_mfp, timestamp_offset)
+ return cls.from_footprint(new_mfp, new_calls, timestamp_offset)
@classmethod
def from_cache(cls, cache_dir=CACHE_DIR_NAME, combine=True) -> \
@@ -191,7 +201,7 @@ class StudentImplementation(Serializable):
footprint, the same number of steps, and the same source notebook.
"""
return isinstance(other, type(self)) and self.values == other.values and \
- self.steps == other.steps and self.nb == other.nb
+ self.steps == other.steps and self.nb == other.nb and self.calls == other.calls
@property
def _default_dump_dest(self) -> str:
@@ -337,6 +347,9 @@ class check:
_observed: Optional[List[Tuple[Any, int]]]
"""the memory footprint"""
+ _calls: Optional[List[Tuple[str, str]]]
+ """the list of calls from tracing"""
+
_cache: bool
"""whether to cache the memory footprint and results"""
@@ -366,6 +379,7 @@ class check:
self._show_only = show_only
self._frame = None
self._observed = None
+ self._calls = None
self._report_on_error = report_on_error
self._cache = cache
@@ -396,7 +410,7 @@ class check:
return # if already tracing, no action required
else:
- self._observed, cir = create_collector(**self._kwargs)
+ (self._observed, self._calls), cir = create_collector(**self._kwargs)
self._frame = inspect.currentframe().f_back
self._frame.f_globals[TRACING_VARNAME] = True
@@ -409,7 +423,7 @@ class check:
self._frame.f_globals[TRACING_VARNAME] = False
if exc_type is None or self._report_on_error:
- stu = StudentImplementation.from_footprint(self._observed, max(t[1] for t in self._observed))
+ stu = StudentImplementation.from_footprint(self._observed, self._calls, max(t[1] for t in self._observed))
res = stu.check(self._ref)
report = generate_report(res, show_only=self._show_only)
if report:
|
microsoft/pybryt
|
87c4deff9ab0f0a021b7d49749321b48025ff3ac
|
diff --git a/tests/execution/test_complexity.py b/tests/execution/test_complexity.py
index 90ab541..1074838 100644
--- a/tests/execution/test_complexity.py
+++ b/tests/execution/test_complexity.py
@@ -16,7 +16,7 @@ def test_time_complexity():
2 * 2
return n
- observed, cir = create_collector()
+ (observed, _), cir = create_collector()
for e in range(1, 9):
n = 10 ** e
diff --git a/tests/execution/test_notebook_execution.py b/tests/execution/test_notebook_execution.py
index 7ce895a..2584552 100644
--- a/tests/execution/test_notebook_execution.py
+++ b/tests/execution/test_notebook_execution.py
@@ -42,6 +42,8 @@ def test_notebook_execution():
with mock.patch("pybryt.execution.mkstemp") as mocked_tempfile:
mocked_tempfile.return_value = (None, observed_ntf.name)
- n_steps, observed, _ = execute_notebook(nb, "", output=ntf.name)
+ n_steps, observed, calls, _ = execute_notebook(nb, "", output=ntf.name)
assert len(ntf.read()) > 0
assert n_steps == max(t[1] for t in observed)
+ assert isinstance(calls, list) and isinstance(calls[0], tuple) and \
+ isinstance(calls[0][0], str) and isinstance(calls[0][1], str)
diff --git a/tests/execution/test_tracing.py b/tests/execution/test_tracing.py
index 46e722c..a8d26ba 100644
--- a/tests/execution/test_tracing.py
+++ b/tests/execution/test_tracing.py
@@ -23,7 +23,7 @@ def test_trace_function():
tracked_filepath = "/path/to/tracked/file.py"
frame = generate_mocked_frame("<ipython-abc123>", "foo", 3)
- observed, cir = create_collector(addl_filenames=[tracked_filepath])
+ (observed, calls), cir = create_collector(addl_filenames=[tracked_filepath])
arr = np.random.uniform(-100, 100, size=(100, 100))
cir(frame, "return", arr)
@@ -42,6 +42,12 @@ def test_trace_function():
assert len(observed) == 1
+ # test call event
+ assert len(calls) == 0
+ cir(frame, "call", None)
+ assert len(calls) == 1
+ assert calls[0] == ("<ipython-abc123>", "foo")
+
frame = generate_mocked_frame(
"<ipython-abc123>", "foo", 3, {"data": arr}
)
@@ -54,7 +60,7 @@ def test_trace_function():
cir(frame, "line", None)
assert len(observed) == 2
assert np.allclose(observed[1][0], arr.T)
- assert observed[1][1] == 4
+ assert observed[1][1] == 5
# check failed eval call for attributes
mocked_linecache.return_value = "data.doesnt_exist"
@@ -68,7 +74,7 @@ def test_trace_function():
cir(frame, "line", None)
assert len(observed) == 3
assert np.allclose(observed[2][0], frame.f_locals["more_data"])
- assert observed[2][1] == 6
+ assert observed[2][1] == 7
# check that we track assignment statements on function return
mocked_linecache.return_value = "even_more_data = more_data ** 2"
@@ -85,11 +91,11 @@ def test_trace_function():
cir(frame, "return", None)
assert len(observed) == 6
assert observed[3][0] is None
- assert observed[3][1] == 9
+ assert observed[3][1] == 10
assert np.allclose(observed[4][0], frame.f_locals["more_data"] ** 2)
- assert observed[4][1] == 7
+ assert observed[4][1] == 8
assert np.allclose(observed[5][0], frame.f_locals["more_data"] ** 3)
- assert observed[5][1] == 8
+ assert observed[5][1] == 9
# check that skip_types respected
frame.f_locals["none_type"] = type(None)
@@ -106,7 +112,7 @@ def test_trace_function():
cir(frame, "return", None) # run a return since arr shows up in vars_not_found
assert len(observed) == 7
assert np.allclose(observed[6][0], -1 * arr)
- assert observed[6][1] == 12
+ assert observed[6][1] == 13
# check that IPython child frame return values are tracked
frame = generate_mocked_frame("/path/to/file.py", "bar", 100, f_back=frame)
@@ -116,7 +122,14 @@ def test_trace_function():
cir(frame, "return", np.exp(arr))
assert len(observed) == 8
assert np.allclose(observed[7][0], np.exp(arr))
- assert observed[7][1] == 12
+ assert observed[7][1] == 13
+
+ assert len(calls) == 1
+ frame = generate_mocked_frame("/path/to/foo.py", "bar", 100)
+ cir(frame, "call", None)
+ assert len(calls) == 2
+ assert calls[0] == ("<ipython-abc123>", "foo")
+ assert calls[1] == ("/path/to/foo.py", "bar")
def test_tracing_control():
diff --git a/tests/test_student.py b/tests/test_student.py
index c94cd0f..9582f33 100644
--- a/tests/test_student.py
+++ b/tests/test_student.py
@@ -63,9 +63,14 @@ def test_constructor():
assert stu.nb is nb
assert stu.steps == max(t[1] for t in stu.values)
assert len(stu.values) == 993
+ assert isinstance(stu.calls, list)
+ assert all(isinstance(c, tuple) for c in stu.calls)
+ assert all(len(c) == 2 for c in stu.calls)
+ assert all(isinstance(c[0], str) for c in stu.calls)
+ assert all(isinstance(c[1], str) for c in stu.calls)
with mock.patch("pybryt.student.execute_notebook") as mocked_exec:
- mocked_exec.return_value = (0, [], None)
+ mocked_exec.return_value = (0, [], [], None)
with tempfile.NamedTemporaryFile(mode="w+", suffix=".ipynb") as ntf:
nbformat.write(nb, ntf.name)
@@ -73,6 +78,7 @@ def test_constructor():
stu = StudentImplementation(ntf.name)
assert stu.steps == 0
assert stu.values == []
+ assert stu.calls == []
assert stu.nb == nb
with pytest.raises(TypeError, match="path_or_nb is of unsupported type <class 'int'>"):
@@ -257,7 +263,7 @@ def test_generate_student_impls():
nbs = [nb] * num_notebooks
with mock.patch("pybryt.student.execute_notebook") as mocked_execute:
- mocked_execute.return_value = (stu.steps, stu.values, None)
+ mocked_execute.return_value = (stu.steps, stu.values, stu.calls, None)
stus = generate_student_impls(nbs)
assert all(s == stu for s in stus)
|
Annotation for requiring/forbidding the use of specific functions
Instructors should be able to write annotations for asserting the use or non-use of specific functions.
This change is somewhat complicated, as it will involve editing the trace function to track information that it is not currently tracking (which function is being traced and to what package it belongs, namely).
|
0.0
|
87c4deff9ab0f0a021b7d49749321b48025ff3ac
|
[
"tests/execution/test_complexity.py::test_time_complexity",
"tests/execution/test_tracing.py::test_trace_function"
] |
[
"tests/execution/test_tracing.py::test_tracing_control",
"tests/execution/test_tracing.py::test_tracing_context_manager",
"tests/test_student.py::test_from_cache"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-22 20:39:28+00:00
|
mit
| 3,911 |
|
microsoft__pybryt-98
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e620d0f..bcf4d22 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,11 @@
All notable changes to this project will be documented in this file, and this project adheres to
[Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## Unreleased
+
+* Added customizable timeout to notebook execution
+* Changed the notebook execution template to use `pybryt.tracing_on` and `pybryt.tracing_off`
+
## 0.1.7 - 2021-06-28
* Fixed `pybryt execute` output per [#94](https://github.com/microsoft/pybryt/issues/94)
diff --git a/docs/student_implementations.rst b/docs/student_implementations.rst
index 4352852..c1a8096 100644
--- a/docs/student_implementations.rst
+++ b/docs/student_implementations.rst
@@ -34,6 +34,17 @@ written in a Python script (which PyBryt would by default not trace).
stu = pybryt.StudentImplementation("harness.ipynb", addl_filenames=["subm.py"])
+To prevent notebooks from getting stuck in a loop or from taking up too many resources, PyBryt
+automatically sets a timeout of 1200 seconds for each notebook to execute. This cap can be changed
+using the `timeout` argument to the constructor, and can be removed by setting that value to ``None``:
+
+.. code-block:: python
+
+ stu = pybryt.StudentImplementation("subm.ipynb", timeout=2000)
+
+ # no timeout
+ stu = pybryt.StudentImplementation("subm.ipynb", timeout=None)
+
PyBryt also employs various custom notebook preprocessors for handling special cases that occur in
the code to allow different types of values to be checked. To see the exact version of the code that
PyBryt executes, set ``output`` to a path to a notebook that PyBryt will write with the executed
diff --git a/pybryt/cli.py b/pybryt/cli.py
index 88981fe..2920198 100644
--- a/pybryt/cli.py
+++ b/pybryt/cli.py
@@ -101,8 +101,10 @@ def check(ref, stu, name, output_nb, output, output_type):
help="Execute notebooks in parallel using the multiprocessing library")
@click.option("-o", "--output", default=None, type=click.Path(),
help="Path at which to write the pickled student implementation")
[email protected]("--timeout", default=1200, type=click.INT,
+ help="Timeout for notebook execution in seconds")
@click.argument("subm", nargs=-1, type=click.Path(exists=True, dir_okay=False))
-def execute(subm, parallel, output):
+def execute(subm, parallel, output, timeout):
"""
Execute student submissions to generate memory footprints.
@@ -113,7 +115,7 @@ def execute(subm, parallel, output):
if len(subm) == 0:
raise ValueError("You must specify at least one notebook to execute")
- stus = generate_student_impls(subm, parallel=parallel)
+ stus = generate_student_impls(subm, parallel=parallel, timeout=timeout)
if output is None:
output = "./"
diff --git a/pybryt/execution/__init__.py b/pybryt/execution/__init__.py
index 4ab61aa..1dbb26d 100644
--- a/pybryt/execution/__init__.py
+++ b/pybryt/execution/__init__.py
@@ -24,8 +24,8 @@ NBFORMAT_VERSION = 4
def execute_notebook(nb: nbformat.NotebookNode, nb_path: str, addl_filenames: List[str] = [],
- output: Optional[str] = None) -> Tuple[int, List[Tuple[Any, int]], List[Tuple[str, str]], \
- nbformat.NotebookNode]:
+ output: Optional[str] = None, timeout: Optional[int] = 1200) -> Tuple[int, List[Tuple[Any, \
+ int]], List[Tuple[str, str]], nbformat.NotebookNode]:
"""
Executes a submission using ``nbconvert`` and returns the memory footprint.
@@ -40,6 +40,8 @@ def execute_notebook(nb: nbformat.NotebookNode, nb_path: str, addl_filenames: Li
nb_path (``str``): path to the notebook ``nb``
addl_filenames (``list[str]``, optional): a list of additional files to trace inside
output (``str``, optional): a file path at which to write the executed notebook
+ timeout (``int``, optional): number of seconds to allow for notebook execution; set to
+ ``None`` for no time limit
Returns:
``tuple[int, list[tuple[object, int]], list[tuple[str, str]], nbformat.NotebookNode]``: the
@@ -56,15 +58,16 @@ def execute_notebook(nb: nbformat.NotebookNode, nb_path: str, addl_filenames: Li
first_cell = nbformat.v4.new_code_cell(dedent(f"""\
import sys
- from pybryt.execution import create_collector
+ from pybryt.execution import create_collector, tracing_on
cir_results_{secret}, cir = create_collector(addl_filenames={addl_filenames})
- sys.settrace(cir)
{TRACING_VARNAME} = True
+ tracing_on(tracing_func=cir)
%cd {nb_dir}
"""))
last_cell = nbformat.v4.new_code_cell(dedent(f"""\
- sys.settrace(None)
+ from pybryt.execution import tracing_off
+ tracing_off()
import dill
from pybryt.utils import filter_picklable_list
filter_picklable_list(cir_results_{secret}[0])
@@ -75,7 +78,7 @@ def execute_notebook(nb: nbformat.NotebookNode, nb_path: str, addl_filenames: Li
nb['cells'].insert(0, first_cell)
nb['cells'].append(last_cell)
- ep = ExecutePreprocessor(timeout=1200, allow_errors=True)
+ ep = ExecutePreprocessor(timeout=timeout, allow_errors=True)
ep.preprocess(nb)
diff --git a/pybryt/student.py b/pybryt/student.py
index 456a839..4c5748d 100644
--- a/pybryt/student.py
+++ b/pybryt/student.py
@@ -38,6 +38,8 @@ class StudentImplementation(Serializable):
addl_filenames (``list[str]``, optional): additional filenames to trace inside during
execution
output (``str``, optional): a path at which to write executed notebook
+ timeout (``int``, optional): number of seconds to allow for notebook execution; set to
+ ``None`` for no time limit
"""
nb: Optional[nbformat.NotebookNode]
@@ -60,7 +62,7 @@ class StudentImplementation(Serializable):
def __init__(
self, path_or_nb: Optional[Union[str, nbformat.NotebookNode]], addl_filenames: List[str] = [],
- output: Optional[str] = None
+ output: Optional[str] = None, timeout: Optional[int] = 1200,
):
self.executed_nb = None
if path_or_nb is None:
@@ -76,19 +78,21 @@ class StudentImplementation(Serializable):
else:
raise TypeError(f"path_or_nb is of unsupported type {type(path_or_nb)}")
- self._execute(addl_filenames=addl_filenames, output=output)
+ self._execute(timeout, addl_filenames=addl_filenames, output=output)
- def _execute(self, addl_filenames: List[str] = [], output: Optional[str] = None) -> NoReturn:
+ def _execute(self, timeout: Optional[int], addl_filenames: List[str] = [], output: Optional[str] = None) -> NoReturn:
"""
Executes the notebook ``self.nb``.
Args:
+ timeout (``int``): number of seconds to allow for notebook execution; set to
+ ``None`` for no time limit
addl_filenames (``list[str]``, optional): additional filenames to trace inside during
execution
output (``str``, optional): a path at which to write executed notebook
"""
self.steps, self.values, self.calls, self.executed_nb = execute_notebook(
- self.nb, self.nb_path, addl_filenames=addl_filenames, output=output
+ self.nb, self.nb_path, addl_filenames=addl_filenames, output=output, timeout=timeout,
)
if self.errors:
|
microsoft/pybryt
|
de78a8d99925aed6b3e98f1d82780dc6d6de835c
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 21a4450..69528d8 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -128,15 +128,19 @@ def test_execute():
result = runner.invoke(click_cli, ["execute", *fns])
assert result.exit_code == 0
- mocked_generate.assert_called_with(fns, parallel=False)
+ mocked_generate.assert_called_with(fns, parallel=False, timeout=1200)
result = runner.invoke(click_cli, ["execute", fns[0]])
assert result.exit_code == 0
- mocked_generate.assert_called_with((fns[0], ), parallel=False)
+ mocked_generate.assert_called_with((fns[0], ), parallel=False, timeout=1200)
result = runner.invoke(click_cli, ["execute", "-p", *fns])
assert result.exit_code == 0
- mocked_generate.assert_called_with(fns, parallel=True)
+ mocked_generate.assert_called_with(fns, parallel=True, timeout=1200)
+
+ result = runner.invoke(click_cli, ["execute", *fns, "--timeout", "100"])
+ assert result.exit_code == 0
+ mocked_generate.assert_called_with(fns, parallel=False, timeout=100)
# check for error on nonexistance output dir
result = runner.invoke(click_cli, ["execute", *fns, "-o", "/some/fake/path"])
|
Allow timeouts for submission execution
Allow instructors to specify a timeout for executing submissions, programmatically and via `pybryt execute`.
|
0.0
|
de78a8d99925aed6b3e98f1d82780dc6d6de835c
|
[
"tests/test_cli.py::test_execute"
] |
[
"tests/test_cli.py::test_check",
"tests/test_cli.py::test_compile",
"tests/test_cli.py::test_cli_func"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-28 18:51:47+00:00
|
mit
| 3,912 |
|
microsoft__torchgeo-675
|
diff --git a/requirements/required.txt b/requirements/required.txt
index 8dadad11..49c7199c 100644
--- a/requirements/required.txt
+++ b/requirements/required.txt
@@ -1,10 +1,10 @@
# setup
-setuptools==63.1.0
+setuptools==63.2.0
# install
einops==0.4.1
fiona==1.9a2
-kornia==0.6.5
+kornia==0.6.6
matplotlib==3.5.2
numpy==1.23.1;python_version>='3.8'
omegaconf==2.2.2
diff --git a/torchgeo/datasets/geo.py b/torchgeo/datasets/geo.py
index c74833d8..07900368 100644
--- a/torchgeo/datasets/geo.py
+++ b/torchgeo/datasets/geo.py
@@ -611,12 +611,12 @@ class VectorDataset(GeoDataset):
)
if shapes:
masks = rasterio.features.rasterize(
- shapes, out_shape=(int(height), int(width)), transform=transform
+ shapes, out_shape=(round(height), round(width)), transform=transform
)
else:
# If no features are found in this query, return an empty mask
# with the default fill value and dtype used by rasterize
- masks = np.zeros((int(height), int(width)), dtype=np.uint8)
+ masks = np.zeros((round(height), round(width)), dtype=np.uint8)
sample = {"mask": torch.tensor(masks), "crs": self.crs, "bbox": query}
|
microsoft/torchgeo
|
b41b6d68f99edfbe4d82b31f6f9b16cbd33f485e
|
diff --git a/tests/datasets/test_geo.py b/tests/datasets/test_geo.py
index 5742134d..5abb7766 100644
--- a/tests/datasets/test_geo.py
+++ b/tests/datasets/test_geo.py
@@ -223,7 +223,7 @@ class TestVectorDataset:
def test_empty_shapes(self, dataset: CustomVectorDataset) -> None:
query = BoundingBox(1.1, 1.9, 1.1, 1.9, 0, 0)
x = dataset[query]
- assert torch.equal(x["mask"], torch.zeros(7, 7, dtype=torch.uint8))
+ assert torch.equal(x["mask"], torch.zeros(8, 8, dtype=torch.uint8))
def test_invalid_query(self, dataset: CustomVectorDataset) -> None:
query = BoundingBox(3, 3, 3, 3, 0, 0)
|
VectorDataset incorrect sample shape
### Description
Querying a sample from a VectorDataset object returns (sometimes?) a mask with incorrect shape.
This is caused by not rounding the calculated width and height pixel values to the nearest integer, and just taking the `int(width)` and `int(height)` values instead for `masks = rasterio.features.rasterize` in [torchgeo.datasets.geo.py:614](https://github.com/microsoft/torchgeo/blob/b41b6d68f99edfbe4d82b31f6f9b16cbd33f485e/torchgeo/datasets/geo.py#L614).
### Steps to reproduce
Run the following code to reproduce the error:
```
from torchgeo.datasets import CanadianBuildingFootprints
from torchgeo.samplers import RandomGeoSampler
cbf_dataset = CanadianBuildingFootprints(download=True) # or any other VectorDataset-based dataset
size = 512
sampler = RandomGeoSampler(cbf_dataset, size=size, length=100) # or any other sampler
query = next(iter(sampler))
sample = cbf_dataset[query]
print(sample["mask"].shape)
assert sample["mask"].shape[0] == size, sample["mask"].shape
assert sample["mask"].shape[1] == size, sample["mask"].shape
```
### Version
0.3.0
|
0.0
|
b41b6d68f99edfbe4d82b31f6f9b16cbd33f485e
|
[
"tests/datasets/test_geo.py::TestVectorDataset::test_empty_shapes"
] |
[
"tests/datasets/test_geo.py::TestGeoDataset::test_getitem",
"tests/datasets/test_geo.py::TestGeoDataset::test_len",
"tests/datasets/test_geo.py::TestGeoDataset::test_crs[crs0]",
"tests/datasets/test_geo.py::TestGeoDataset::test_crs[crs1]",
"tests/datasets/test_geo.py::TestGeoDataset::test_and_two",
"tests/datasets/test_geo.py::TestGeoDataset::test_and_three",
"tests/datasets/test_geo.py::TestGeoDataset::test_and_four",
"tests/datasets/test_geo.py::TestGeoDataset::test_or_two",
"tests/datasets/test_geo.py::TestGeoDataset::test_or_three",
"tests/datasets/test_geo.py::TestGeoDataset::test_or_four",
"tests/datasets/test_geo.py::TestGeoDataset::test_str",
"tests/datasets/test_geo.py::TestGeoDataset::test_picklable",
"tests/datasets/test_geo.py::TestGeoDataset::test_abstract",
"tests/datasets/test_geo.py::TestGeoDataset::test_and_nongeo",
"tests/datasets/test_geo.py::TestRasterDataset::test_getitem_single_file[True]",
"tests/datasets/test_geo.py::TestRasterDataset::test_getitem_single_file[False]",
"tests/datasets/test_geo.py::TestRasterDataset::test_getitem_separate_files[True]",
"tests/datasets/test_geo.py::TestRasterDataset::test_getitem_separate_files[False]",
"tests/datasets/test_geo.py::TestRasterDataset::test_getitem_uint_dtype",
"tests/datasets/test_geo.py::TestRasterDataset::test_invalid_query[True]",
"tests/datasets/test_geo.py::TestRasterDataset::test_invalid_query[False]",
"tests/datasets/test_geo.py::TestRasterDataset::test_no_data",
"tests/datasets/test_geo.py::TestVectorDataset::test_getitem",
"tests/datasets/test_geo.py::TestVectorDataset::test_invalid_query",
"tests/datasets/test_geo.py::TestVectorDataset::test_no_data",
"tests/datasets/test_geo.py::TestNonGeoDataset::test_getitem",
"tests/datasets/test_geo.py::TestNonGeoDataset::test_len",
"tests/datasets/test_geo.py::TestNonGeoDataset::test_add_two",
"tests/datasets/test_geo.py::TestNonGeoDataset::test_add_three",
"tests/datasets/test_geo.py::TestNonGeoDataset::test_add_four",
"tests/datasets/test_geo.py::TestNonGeoDataset::test_str",
"tests/datasets/test_geo.py::TestNonGeoDataset::test_abstract",
"tests/datasets/test_geo.py::TestVisionDataset::test_deprecation",
"tests/datasets/test_geo.py::TestNonGeoClassificationDataset::test_getitem",
"tests/datasets/test_geo.py::TestNonGeoClassificationDataset::test_len",
"tests/datasets/test_geo.py::TestNonGeoClassificationDataset::test_add_two",
"tests/datasets/test_geo.py::TestNonGeoClassificationDataset::test_add_three",
"tests/datasets/test_geo.py::TestNonGeoClassificationDataset::test_add_four",
"tests/datasets/test_geo.py::TestNonGeoClassificationDataset::test_str",
"tests/datasets/test_geo.py::TestVisionClassificationDataset::test_deprecation",
"tests/datasets/test_geo.py::TestIntersectionDataset::test_getitem",
"tests/datasets/test_geo.py::TestIntersectionDataset::test_len",
"tests/datasets/test_geo.py::TestIntersectionDataset::test_str",
"tests/datasets/test_geo.py::TestIntersectionDataset::test_nongeo_dataset",
"tests/datasets/test_geo.py::TestIntersectionDataset::test_different_crs",
"tests/datasets/test_geo.py::TestIntersectionDataset::test_different_res",
"tests/datasets/test_geo.py::TestIntersectionDataset::test_no_overlap",
"tests/datasets/test_geo.py::TestIntersectionDataset::test_invalid_query",
"tests/datasets/test_geo.py::TestUnionDataset::test_getitem",
"tests/datasets/test_geo.py::TestUnionDataset::test_len",
"tests/datasets/test_geo.py::TestUnionDataset::test_str",
"tests/datasets/test_geo.py::TestUnionDataset::test_nongeo_dataset",
"tests/datasets/test_geo.py::TestUnionDataset::test_different_crs",
"tests/datasets/test_geo.py::TestUnionDataset::test_different_res",
"tests/datasets/test_geo.py::TestUnionDataset::test_no_overlap",
"tests/datasets/test_geo.py::TestUnionDataset::test_invalid_query"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-14 11:50:33+00:00
|
mit
| 3,913 |
|
mido__mido-474
|
diff --git a/mido/messages/messages.py b/mido/messages/messages.py
index ff82681..9f760a4 100644
--- a/mido/messages/messages.py
+++ b/mido/messages/messages.py
@@ -111,19 +111,25 @@ class SysexData(tuple):
class Message(BaseMessage):
- def __init__(self, type, **args):
+ def __init__(self, type, skip_checks=False, **args):
msgdict = make_msgdict(type, args)
if type == 'sysex':
msgdict['data'] = SysexData(msgdict['data'])
- check_msgdict(msgdict)
+
+ if not skip_checks:
+ check_msgdict(msgdict)
+
vars(self).update(msgdict)
- def copy(self, **overrides):
+ def copy(self, skip_checks=False, **overrides):
"""Return a copy of the message.
Attributes will be overridden by the passed keyword arguments.
Only message specific attributes can be overridden. The message
type can not be changed.
+
+ The skip_checks arg can be used to bypass validation of message
+ attributes and should be used cautiously.
"""
if not overrides:
# Bypass all checks.
@@ -139,8 +145,11 @@ class Message(BaseMessage):
msgdict = vars(self).copy()
msgdict.update(overrides)
- check_msgdict(msgdict)
- return self.__class__(**msgdict)
+
+ if not skip_checks:
+ check_msgdict(msgdict)
+
+ return self.__class__(skip_checks=skip_checks, **msgdict)
@classmethod
def from_bytes(cl, data, time=0):
diff --git a/mido/midifiles/midifiles.py b/mido/midifiles/midifiles.py
index 3ee2847..cd3be20 100644
--- a/mido/midifiles/midifiles.py
+++ b/mido/midifiles/midifiles.py
@@ -327,7 +327,7 @@ class MidiFile:
raise TypeError("can't merge tracks in type 2 (asynchronous) file")
if self._merged_track is None:
- self._merged_track = merge_tracks(self.tracks)
+ self._merged_track = merge_tracks(self.tracks, skip_checks=True)
return self._merged_track
@merged_track.deleter
@@ -396,7 +396,7 @@ class MidiFile:
else:
delta = 0
- yield msg.copy(time=delta)
+ yield msg.copy(skip_checks=True, time=delta)
if msg.type == 'set_tempo':
tempo = msg.tempo
diff --git a/mido/midifiles/tracks.py b/mido/midifiles/tracks.py
index 17caccc..871f9bd 100644
--- a/mido/midifiles/tracks.py
+++ b/mido/midifiles/tracks.py
@@ -64,24 +64,24 @@ class MidiTrack(list):
return f'{self.__class__.__name__}({messages})'
-def _to_abstime(messages):
+def _to_abstime(messages, skip_checks=False):
"""Convert messages to absolute time."""
now = 0
for msg in messages:
now += msg.time
- yield msg.copy(time=now)
+ yield msg.copy(skip_checks=skip_checks, time=now)
-def _to_reltime(messages):
+def _to_reltime(messages, skip_checks=False):
"""Convert messages to relative time."""
now = 0
for msg in messages:
delta = msg.time - now
- yield msg.copy(time=delta)
+ yield msg.copy(skip_checks=skip_checks, time=delta)
now = msg.time
-def fix_end_of_track(messages):
+def fix_end_of_track(messages, skip_checks=False):
"""Remove all end_of_track messages and add one at the end.
This is used by merge_tracks() and MidiFile.save()."""
@@ -95,7 +95,7 @@ def fix_end_of_track(messages):
else:
if accum:
delta = accum + msg.time
- yield msg.copy(time=delta)
+ yield msg.copy(skip_checks=skip_checks, time=delta)
accum = 0
else:
yield msg
@@ -103,16 +103,25 @@ def fix_end_of_track(messages):
yield MetaMessage('end_of_track', time=accum)
-def merge_tracks(tracks):
+def merge_tracks(tracks, skip_checks=False):
"""Returns a MidiTrack object with all messages from all tracks.
The messages are returned in playback order with delta times
as if they were all in one track.
+
+ Pass skip_checks=True to skip validation of messages before merging.
+ This should ONLY be used when the messages in tracks have already
+ been validated by mido.checks.
"""
messages = []
for track in tracks:
- messages.extend(_to_abstime(track))
+ messages.extend(_to_abstime(track, skip_checks=skip_checks))
messages.sort(key=lambda msg: msg.time)
- return MidiTrack(fix_end_of_track(_to_reltime(messages)))
+ return MidiTrack(
+ fix_end_of_track(
+ _to_reltime(messages, skip_checks=skip_checks),
+ skip_checks=skip_checks,
+ )
+ )
|
mido/mido
|
4de3e3cb12580ee304d849c1a0f4630035f7b4a0
|
diff --git a/tests/midifiles/test_tracks.py b/tests/midifiles/test_tracks.py
index b28e88b..f48f2ed 100644
--- a/tests/midifiles/test_tracks.py
+++ b/tests/midifiles/test_tracks.py
@@ -3,7 +3,9 @@
# SPDX-License-Identifier: MIT
import itertools
+import time
+import mido
from mido.messages import Message
from mido.midifiles.meta import MetaMessage
from mido.midifiles.tracks import MidiTrack
@@ -35,3 +37,23 @@ def test_track_repr():
track_eval = eval(repr(track))
for m1, m2 in zip(track, track_eval):
assert m1 == m2
+
+
+def test_merge_large_midifile():
+ mid = mido.MidiFile()
+ for k in range(5):
+ t = mido.MidiTrack()
+ for _ in range(10000):
+ t.append(mido.Message("note_on", note=72, time=1000 + 100 * k))
+ t.append(mido.Message("note_off", note=72, time=500 + 100 * k))
+ mid.tracks.append(t)
+
+ start = time.time()
+ merged = list(mido.merge_tracks(mid.tracks, skip_checks=True))
+ finish = time.time()
+
+ merged_duration_ticks = sum(msg.time for msg in merged)
+ max_track_duration_ticks = max(
+ sum(msg.time for msg in t) for t in mid.tracks)
+ assert merged_duration_ticks == max_track_duration_ticks
+ assert (finish - start) < 2.0
|
merge_tracks is slow
`mido.merge_tracks` is slow. This is because we are doing a lot of data checks when we copy messages. Since the messages that are being flattened into a single track have already been checked, I think these additional checks are unnecessary (and inefficient).
```python
import mido
import cProfile, pstats, io, time
from pstats import SortKey
mf = mido.MidiFile()
for k in range(5):
t = mido.MidiTrack()
for _ in range(10000):
msg = mido.Message("note_on", note=72, time=1000 + 100 * k)
msg = mido.Message("note_off", note=72, time=500 + 100 * k)
t.append(msg)
mf.tracks.append(t)
with cProfile.Profile() as pr:
now = time.time()
i = 0
for _ in mido.merge_tracks(mf.tracks):
i += 1
print(i)
print(time.time() - now)
s = io.StringIO()
sortby = SortKey.TIME
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
```
Profiled on the current implementation:
```
(mido) ➜ mido git:(faster-merge-tracks) ✗ python -O examples/benchmark.py
50001
0.9691178798675537
7300036 function calls in 0.965 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
200000 0.146 0.000 0.735 0.000 /Users/intern/workspace/forks/mido/mido/messages/checks.py:88(check_msgdict)
1000000 0.125 0.000 0.572 0.000 /Users/intern/workspace/forks/mido/mido/messages/checks.py:84(check_value)
800001 0.089 0.000 0.215 0.000 {built-in method _abc._abc_instancecheck}
800002 0.083 0.000 0.354 0.000 {built-in method builtins.isinstance}
600000 0.081 0.000 0.081 0.000 {built-in method _abc._abc_subclasscheck}
100000 0.073 0.000 0.923 0.000 /Users/intern/workspace/forks/mido/mido/messages/messages.py:117(copy)
800001 0.057 0.000 0.272 0.000 /Users/intern/anaconda3/envs/mido/lib/python3.9/abc.py:117(__instancecheck__)
600000 0.044 0.000 0.125 0.000 /Users/intern/anaconda3/envs/mido/lib/python3.9/abc.py:121(__subclasscheck__)
400000 0.040 0.000 0.224 0.000 /Users/intern/workspace/forks/mido/mido/messages/checks.py:53(check_data_byte)
100000 0.040 0.000 0.478 0.000 /Users/intern/workspace/forks/mido/mido/messages/messages.py:110(__init__)
100000 0.032 0.000 0.041 0.000 /Users/intern/workspace/forks/mido/mido/messages/specs.py:110(make_msgdict)
300000 0.025 0.000 0.025 0.000 {method 'update' of 'dict' objects}
200000 0.023 0.000 0.145 0.000 /Users/intern/workspace/forks/mido/mido/messages/checks.py:11(check_channel)
200001 0.018 0.000 0.066 0.000 /Users/intern/workspace/forks/mido/mido/messages/checks.py:60(check_time)
50001 0.014 0.000 0.479 0.000 /Users/intern/workspace/forks/mido/mido/midifiles/tracks.py:71(_to_reltime)
200002 0.013 0.000 0.013 0.000 {built-in method builtins.vars}
200000 0.012 0.000 0.012 0.000 /Users/intern/workspace/forks/mido/mido/messages/checks.py:6(check_type)
50005 0.012 0.000 0.470 0.000 /Users/intern/workspace/forks/mido/mido/midifiles/tracks.py:63(_to_abstime)
200001 0.010 0.000 0.010 0.000 {method 'items' of 'dict' objects}
200000 0.008 0.000 0.008 0.000 {method 'get' of 'dict' objects}
50002 0.007 0.000 0.486 0.000 /Users/intern/workspace/forks/mido/mido/midifiles/tracks.py:80(fix_end_of_track)
...
```
If we change the `copy` calls in `_to_abstime`, _to_reltime`, and `fix_end_of_track` to exclude the `overrides` (and thus bypass all checks), the speed up is around 5x.
```
(mido) ➜ mido git:(faster-merge-tracks) ✗ python -O examples/benchmark.py
50001
0.17537927627563477
1500036 function calls in 0.171 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
100000 0.040 0.000 0.063 0.000 /Users/intern/workspace/forks/mido/mido/messages/messages.py:117(copy)
100000 0.023 0.000 0.072 0.000 /Users/intern/workspace/forks/mido/mido/messages/messages.py:191(_setattr)
400002 0.018 0.000 0.018 0.000 {built-in method builtins.vars}
50001 0.012 0.000 0.081 0.000 /Users/intern/workspace/forks/mido/mido/midifiles/tracks.py:75(_to_reltime)
100002 0.011 0.000 0.024 0.000 {built-in method builtins.isinstance}
50005 0.011 0.000 0.077 0.000 /Users/intern/workspace/forks/mido/mido/midifiles/tracks.py:65(_to_abstime)
100000 0.010 0.000 0.041 0.000 /Users/intern/workspace/forks/mido/mido/messages/checks.py:84(check_value)
100000 0.008 0.000 0.008 0.000 {method 'update' of 'dict' objects}
100001 0.007 0.000 0.031 0.000 /Users/intern/workspace/forks/mido/mido/messages/checks.py:60(check_time)
100001 0.007 0.000 0.012 0.000 /Users/intern/anaconda3/envs/mido/lib/python3.9/abc.py:117(__instancecheck__)
100001 0.006 0.000 0.006 0.000 {built-in method _abc._abc_instancecheck}
100000 0.006 0.000 0.006 0.000 {built-in method __new__ of type object at 0x1027ab388}
50002 0.005 0.000 0.086 0.000 /Users/intern/workspace/forks/mido/mido/midifiles/tracks.py:86(fix_end_of_track)
```
The three isolated changes in `tracks.py` each look like
```diff
class MidiTrack(list):
@property
@@ -65,7 +67,9 @@ def _to_abstime(messages):
now = 0
for msg in messages:
now += msg.time
- yield msg.copy(time=now)
+ new_msg = msg.copy()
+ new_msg.time = now
+ yield new_msg
```
I think the project prefers to treat `Message` objects as immutable ... So if this is a problem, maybe it would be better for an `_unsafe_copy` method or `skip_checks` boolean argument for `copy`.
|
0.0
|
4de3e3cb12580ee304d849c1a0f4630035f7b4a0
|
[
"tests/midifiles/test_tracks.py::test_merge_large_midifile"
] |
[
"tests/midifiles/test_tracks.py::test_track_slice",
"tests/midifiles/test_tracks.py::test_track_name",
"tests/midifiles/test_tracks.py::test_track_repr"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-05 08:28:08+00:00
|
mit
| 3,914 |
|
mie-lab__trackintel-217
|
diff --git a/trackintel/preprocessing/positionfixes.py b/trackintel/preprocessing/positionfixes.py
index 650c5a3..46afbf9 100644
--- a/trackintel/preprocessing/positionfixes.py
+++ b/trackintel/preprocessing/positionfixes.py
@@ -85,6 +85,10 @@ def generate_staypoints(
# copy the original pfs for adding 'staypoint_id' column
pfs = pfs_input.copy()
+ # if the positionfixes already have a column "staypoint_id", we drop it
+ if "staypoint_id" in pfs:
+ pfs.drop(columns="staypoint_id", inplace=True)
+
elevation_flag = "elevation" in pfs.columns # if there is elevation data
geo_col = pfs.geometry.name
@@ -214,6 +218,10 @@ def generate_triplegs(pfs_input, stps_input, method="between_staypoints", gap_th
# copy the original pfs for adding 'staypoint_id' column
pfs = pfs_input.copy()
+ # if the positionfixes already have a column "tripleg_id", we drop it
+ if "tripleg_id" in pfs:
+ pfs.drop(columns="tripleg_id", inplace=True)
+
if method == "between_staypoints":
# get case:
diff --git a/trackintel/preprocessing/triplegs.py b/trackintel/preprocessing/triplegs.py
index 066c95b..f599a38 100644
--- a/trackintel/preprocessing/triplegs.py
+++ b/trackintel/preprocessing/triplegs.py
@@ -91,6 +91,15 @@ def generate_trips(stps_input, tpls_input, gap_threshold=15, print_progress=Fals
tpls = tpls_input.copy()
stps = stps_input.copy()
+ # if the triplegs already have a column "trip_id", we drop it
+ if "trip_id" in tpls:
+ tpls.drop(columns="trip_id", inplace=True)
+
+ # if the staypoints already have any of the columns "trip_id", "prev_trip_id", "next_trip_id", we drop them
+ for col in ["trip_id", "prev_trip_id", "next_trip_id"]:
+ if col in stps:
+ stps.drop(columns=col, inplace=True)
+
tpls["type"] = "tripleg"
stps["type"] = "staypoint"
|
mie-lab/trackintel
|
ddd0a9df13d5e4e0ffe962ccf261655e1c814aa4
|
diff --git a/tests/preprocessing/test_positionfixes.py b/tests/preprocessing/test_positionfixes.py
index b4664ca..e16b9a9 100644
--- a/tests/preprocessing/test_positionfixes.py
+++ b/tests/preprocessing/test_positionfixes.py
@@ -23,6 +23,20 @@ def geolife_pfs_stps_long():
class TestGenerate_staypoints:
"""Tests for generate_staypoints() method."""
+ def test_duplicate_columns(self):
+ """Test if running the function twice, the generated column does not yield exception in join statement"""
+
+ # we run generate_staypoints twice in order to check that the extra column(tripleg_id) does
+ # not cause any problems in the second run
+ pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife"))
+ pfs_run_1, _ = pfs.as_positionfixes.generate_staypoints(
+ method="sliding", dist_threshold=100, time_threshold=5.0, include_last=True
+ )
+ pfs_run_2, _ = pfs_run_1.as_positionfixes.generate_staypoints(
+ method="sliding", dist_threshold=100, time_threshold=5.0, include_last=True
+ )
+ assert set(pfs_run_1.columns) == set(pfs_run_2.columns)
+
def test_sliding_min(self):
"""Test if using small thresholds, stp extraction yields each pfs."""
pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife"))
@@ -123,6 +137,17 @@ class TestGenerate_staypoints:
class TestGenerate_triplegs:
"""Tests for generate_triplegs() method."""
+ def test_duplicate_columns(self, geolife_pfs_stps_long):
+ """Test if running the function twice, the generated column does not yield exception in join statement"""
+
+ # we run generate_triplegs twice in order to check that the extra column (tripleg_id) does
+ # not cause any problems in the second run
+ pfs, stps = geolife_pfs_stps_long
+
+ pfs_run_1, _ = pfs.as_positionfixes.generate_triplegs(stps, method="between_staypoints")
+ pfs_run_2, _ = pfs_run_1.as_positionfixes.generate_triplegs(stps, method="between_staypoints")
+ assert set(pfs_run_1.columns) == set(pfs_run_2.columns)
+
def test_user_without_stps(self, geolife_pfs_stps_long):
"""Check if it is safe to have users that have pfs but no stps."""
pfs, stps = geolife_pfs_stps_long
diff --git a/tests/preprocessing/test_triplegs.py b/tests/preprocessing/test_triplegs.py
index 76d793a..40e3388 100644
--- a/tests/preprocessing/test_triplegs.py
+++ b/tests/preprocessing/test_triplegs.py
@@ -29,6 +29,24 @@ class TestSmoothen_triplegs:
class TestGenerate_trips:
"""Tests for generate_trips() method."""
+ def test_duplicate_columns(self):
+ """Test if running the function twice, the generated column does not yield exception in join statement"""
+ # load pregenerated trips
+ trips_loaded = ti.read_trips_csv(os.path.join("tests", "data", "geolife_long", "trips.csv"), index_col="id")
+
+ # create trips from geolife (based on positionfixes)
+ pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
+ pfs, stps = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
+ stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
+ pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+
+ # generate trips and a joint staypoint/triplegs dataframe
+ stps_run_1, tpls_run_1, _ = ti.preprocessing.triplegs.generate_trips(stps, tpls, gap_threshold=15)
+ stps_run_2, tpls_run_2, _ = ti.preprocessing.triplegs.generate_trips(stps_run_1, tpls_run_1, gap_threshold=15)
+
+ assert set(tpls_run_1.columns) == set(tpls_run_2.columns)
+ assert set(stps_run_1.columns) == set(stps_run_2.columns)
+
def test_generate_trips(self):
"""Test if we can generate the example trips based on example data."""
# load pregenerated trips
|
BUG: generate_staypoints and generate_trips
if the input pfs already has a column `staypoint_id`, the following error will occur on Line 144
`ValueError: columns overlap but no suffix specified: Index(['staypoint_id'], dtype='object')`
|
0.0
|
ddd0a9df13d5e4e0ffe962ccf261655e1c814aa4
|
[
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_columns"
] |
[
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_sliding_min",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_sliding_max",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_missing_link",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_dtype_consistent",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_index_start",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_include_last",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_print_progress",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_temporal",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_duplicate_columns",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_user_without_stps",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_pfs_without_stps",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_stability",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_random_order",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_pfs_index",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_dtype_consistent",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_missing_link",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_index_start",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_invalid_inputs",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_temporal",
"tests/preprocessing/test_triplegs.py::TestSmoothen_triplegs::test_smoothen_triplegs"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-05-19 09:49:17+00:00
|
mit
| 3,915 |
|
mie-lab__trackintel-265
|
diff --git a/examples/trackintel_basic_tutorial.ipynb b/examples/trackintel_basic_tutorial.ipynb
index 03e9363..279dcb9 100644
--- a/examples/trackintel_basic_tutorial.ipynb
+++ b/examples/trackintel_basic_tutorial.ipynb
@@ -97,7 +97,7 @@
"metadata": {},
"outputs": [],
"source": [
- "pfs = ti.io.from_geopandas.read_positionfixes_gpd(gdf, tracked_at=\"time\", user_id=\"User\", geom=\"geometry\", tz='UTC')\n",
+ "pfs = ti.io.from_geopandas.read_positionfixes_gpd(gdf, tracked_at=\"time\", user_id=\"User\", geom_col=\"geometry\", tz='UTC')\n",
"# now you can safely call .as_positionfixes or use any trackintel functions\n",
"pfs.as_positionfixes.plot()"
]
diff --git a/trackintel/io/from_geopandas.py b/trackintel/io/from_geopandas.py
index d0b38b3..adb7636 100644
--- a/trackintel/io/from_geopandas.py
+++ b/trackintel/io/from_geopandas.py
@@ -3,7 +3,7 @@ import pandas as pd
from trackintel.io.file import _localize_timestamp
-def read_positionfixes_gpd(gdf, tracked_at="tracked_at", user_id="user_id", geom="geom", tz=None, mapper={}):
+def read_positionfixes_gpd(gdf, tracked_at="tracked_at", user_id="user_id", geom_col="geom", tz=None, mapper={}):
"""
Read positionfixes from GeoDataFrames.
@@ -20,7 +20,7 @@ def read_positionfixes_gpd(gdf, tracked_at="tracked_at", user_id="user_id", geom
user_id : str, default 'user_id'
name of the column storing the user_id.
- geom : str, default 'geom'
+ geom_col : str, default 'geom'
name of the column storing the geometry.
tz : str, optional
@@ -36,13 +36,13 @@ def read_positionfixes_gpd(gdf, tracked_at="tracked_at", user_id="user_id", geom
Examples
--------
- >>> trackintel.read_positionfixes_gpd(gdf, user_id='User', geom='geometry', tz='utc')
+ >>> trackintel.read_positionfixes_gpd(gdf, user_id='User', geom_col='geom', tz='utc')
"""
- columns = {tracked_at: "tracked_at", user_id: "user_id", geom: "geom"}
+ columns = {tracked_at: "tracked_at", user_id: "user_id"}
columns.update(mapper)
pfs = gdf.rename(columns=columns)
- pfs = pfs.set_geometry("geom")
+ pfs = pfs.set_geometry(geom_col)
# check and/or set timezone
for col in ["tracked_at"]:
@@ -55,7 +55,7 @@ def read_positionfixes_gpd(gdf, tracked_at="tracked_at", user_id="user_id", geom
def read_staypoints_gpd(
- gdf, started_at="started_at", finished_at="finished_at", user_id="user_id", geom="geom", tz=None, mapper={}
+ gdf, started_at="started_at", finished_at="finished_at", user_id="user_id", geom_col="geom", tz=None, mapper={}
):
"""
Read staypoints from GeoDataFrames.
@@ -76,7 +76,7 @@ def read_staypoints_gpd(
user_id : str, default 'user_id'
name of the column storing the user_id.
- geom : str, default 'geom'
+ geom_col : str, default 'geom'
name of the column storing the geometry.
tz : str, optional
@@ -94,11 +94,11 @@ def read_staypoints_gpd(
--------
>>> trackintel.read_staypoints_gpd(gdf, started_at='start_time', finished_at='end_time', tz='utc')
"""
- columns = {started_at: "started_at", finished_at: "finished_at", user_id: "user_id", geom: "geom"}
+ columns = {started_at: "started_at", finished_at: "finished_at", user_id: "user_id"}
columns.update(mapper)
stps = gdf.rename(columns=columns)
- stps = stps.set_geometry("geom")
+ stps = stps.set_geometry(geom_col)
# check and/or set timezone
for col in ["started_at", "finished_at"]:
@@ -111,7 +111,7 @@ def read_staypoints_gpd(
def read_triplegs_gpd(
- gdf, started_at="started_at", finished_at="finished_at", user_id="user_id", geom="geometry", tz=None, mapper={}
+ gdf, started_at="started_at", finished_at="finished_at", user_id="user_id", geom_col="geom", tz=None, mapper={}
):
"""
Read triplegs from GeoDataFrames.
@@ -132,7 +132,7 @@ def read_triplegs_gpd(
user_id : str, default 'user_id'
name of the column storing the user_id.
- geom : str, default 'geom'
+ geom_col : str, default 'geom'
name of the column storing the geometry.
tz : str, optional
@@ -148,13 +148,13 @@ def read_triplegs_gpd(
Examples
--------
- >>> trackintel.read_triplegs_gpd(gdf, user_id='User', geom='geometry', tz='utc')
+ >>> trackintel.read_triplegs_gpd(gdf, user_id='User', geom_col='geom', tz='utc')
"""
- columns = {started_at: "started_at", finished_at: "finished_at", user_id: "user_id", geom: "geom"}
+ columns = {started_at: "started_at", finished_at: "finished_at", user_id: "user_id"}
columns.update(mapper)
tpls = gdf.rename(columns=columns)
- tpls = tpls.set_geometry("geom")
+ tpls = tpls.set_geometry(geom_col)
# check and/or set timezone
for col in ["started_at", "finished_at"]:
|
mie-lab/trackintel
|
a877097c1fedb2ea6e500bd6b47a157bdccbb08b
|
diff --git a/tests/io/test_from_geopandas.py b/tests/io/test_from_geopandas.py
index 558ee1d..a6a96c3 100644
--- a/tests/io/test_from_geopandas.py
+++ b/tests/io/test_from_geopandas.py
@@ -13,10 +13,11 @@ class TestFromGeopandas:
"""Test if the results of reading from gpd and csv agrees."""
gdf = gpd.read_file(os.path.join("tests", "data", "positionfixes.geojson"))
gdf.set_index("id", inplace=True)
- pfs_from_gpd = ti.io.from_geopandas.read_positionfixes_gpd(gdf, user_id="User", geom="geometry", tz="utc")
+ pfs_from_gpd = ti.io.from_geopandas.read_positionfixes_gpd(gdf, user_id="User", geom_col="geometry", tz="utc")
pfs_file = os.path.join("tests", "data", "positionfixes.csv")
pfs_from_csv = ti.read_positionfixes_csv(pfs_file, sep=";", tz="utc", index_col="id")
+ pfs_from_csv = pfs_from_csv.rename(columns={"geom": "geometry"})
pd.testing.assert_frame_equal(pfs_from_gpd, pfs_from_csv, check_exact=False)
@@ -24,10 +25,11 @@ class TestFromGeopandas:
"""Test if the results of reading from gpd and csv agrees."""
gdf = gpd.read_file(os.path.join("tests", "data", "triplegs.geojson"))
gdf.set_index("id", inplace=True)
- tpls_from_gpd = ti.io.from_geopandas.read_triplegs_gpd(gdf, user_id="User", geom="geometry", tz="utc")
+ tpls_from_gpd = ti.io.from_geopandas.read_triplegs_gpd(gdf, user_id="User", geom_col="geometry", tz="utc")
tpls_file = os.path.join("tests", "data", "triplegs.csv")
tpls_from_csv = ti.read_triplegs_csv(tpls_file, sep=";", tz="utc", index_col="id")
+ tpls_from_csv = tpls_from_csv.rename(columns={"geom": "geometry"})
pd.testing.assert_frame_equal(tpls_from_gpd, tpls_from_csv, check_exact=False)
@@ -36,11 +38,12 @@ class TestFromGeopandas:
gdf = gpd.read_file(os.path.join("tests", "data", "staypoints.geojson"))
gdf.set_index("id", inplace=True)
stps_from_gpd = ti.io.from_geopandas.read_staypoints_gpd(
- gdf, "start_time", "end_time", geom="geometry", tz="utc"
+ gdf, "start_time", "end_time", geom_col="geometry", tz="utc"
)
stps_file = os.path.join("tests", "data", "staypoints.csv")
stps_from_csv = ti.read_staypoints_csv(stps_file, sep=";", tz="utc", index_col="id")
+ stps_from_csv = stps_from_csv.rename(columns={"geom": "geometry"})
pd.testing.assert_frame_equal(stps_from_gpd, stps_from_csv, check_exact=False)
|
Allow for flexible geometry names in `read_*_gpd` functions.
For an old model of trackintel all geometry columns had to have the same name "geom". Later we dropped this prerequisite and now we access the name directly via the geometry attribute (see [contributing.md](https://github.com/mie-lab/trackintel/blob/master/CONTRIBUTING.md#adressing-geometry-columns)). This old model still persists in the `read_*_gpd` functions.
The proposed change would be to not rename the geometry column anymore and just add it directly.
Like:
```diff
- columns = {tracked_at: "tracked_at", user_id: "user_id", geom: "geom"}
+ columns = {tracked_at: "tracked_at", user_id: "user_id"}
columns.update(mapper)
pfs = gdf.rename(columns=columns)
- pfs = pfs.set_geometry("geom")
+ pfs = pfs.set_geometry(geom)
```
There are may some side effects that must be handled.
Additionally may rename argument to `geom_col` to be more consistent with the rest of the io module.
|
0.0
|
a877097c1fedb2ea6e500bd6b47a157bdccbb08b
|
[
"tests/io/test_from_geopandas.py::TestFromGeopandas::test_read_positionfixes_gpd",
"tests/io/test_from_geopandas.py::TestFromGeopandas::test_read_triplegs_gpd",
"tests/io/test_from_geopandas.py::TestFromGeopandas::test_read_staypoints_gpd"
] |
[
"tests/io/test_from_geopandas.py::TestFromGeopandas::test_read_locations_gpd",
"tests/io/test_from_geopandas.py::TestFromGeopandas::test_read_trips_gpd",
"tests/io/test_from_geopandas.py::TestFromGeopandas::test_read_tours_gpd"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-07 12:18:47+00:00
|
mit
| 3,916 |
|
mie-lab__trackintel-274
|
diff --git a/docs/modules/model.rst b/docs/modules/model.rst
index 9638bff..0b779b9 100644
--- a/docs/modules/model.rst
+++ b/docs/modules/model.rst
@@ -57,24 +57,45 @@ Available Accessors
The following accessors are available within *trackintel*.
+UsersAccessor
+-------------
+
.. autoclass:: trackintel.model.users.UsersAccessor
:members:
+PositionfixesAccessor
+---------------------
+
.. autoclass:: trackintel.model.positionfixes.PositionfixesAccessor
:members:
+StaypointsAccessor
+------------------
+
.. autoclass:: trackintel.model.staypoints.StaypointsAccessor
:members:
+TriplegsAccessor
+----------------
+
.. autoclass:: trackintel.model.triplegs.TriplegsAccessor
:members:
+LocationsAccessor
+-----------------
+
.. autoclass:: trackintel.model.locations.LocationsAccessor
:members:
+TripsAccessor
+-------------
+
.. autoclass:: trackintel.model.trips.TripsAccessor
:members:
+ToursAccessor
+-------------
+
.. autoclass:: trackintel.model.tours.ToursAccessor
:members:
@@ -87,7 +108,7 @@ Data Model (SQL)
For a general description of the data model, please refer to the
:doc:`/modules/model`. You can download the
-complete SQL script `here <https://github.com/mie-lab/trackintel/blob/master/sql/create_tables_pg.sql>`_
+complete SQL script `here <https://github.com/mie-lab/trackintel/blob/master/sql/create_tables_pg.sql>`__
in case you want to quickly set up a database. Also take a look at the `example on github
<https://github.com/mie-lab/trackintel/blob/master/examples/setup_example_database.py>`_.
diff --git a/trackintel/model/locations.py b/trackintel/model/locations.py
index a62969e..74daac8 100644
--- a/trackintel/model/locations.py
+++ b/trackintel/model/locations.py
@@ -1,9 +1,11 @@
import pandas as pd
-
import trackintel as ti
-import trackintel.preprocessing.filter
-import trackintel.visualization.locations
-import trackintel.visualization.staypoints
+import trackintel.io
+from trackintel.io.file import write_locations_csv
+from trackintel.io.postgis import write_locations_postgis
+from trackintel.model.util import copy_docstring
+from trackintel.preprocessing.filter import spatial_filter
+from trackintel.visualization.locations import plot_locations
@pd.api.extensions.register_dataframe_accessor("as_locations")
@@ -48,6 +50,7 @@ class LocationsAccessor(object):
# One for extend and one for the center
raise AttributeError("The center geometry must be a Point (only first checked).")
+ @copy_docstring(plot_locations)
def plot(self, *args, **kwargs):
"""
Plot this collection of locations.
@@ -56,6 +59,7 @@ class LocationsAccessor(object):
"""
ti.visualization.locations.plot_locations(self._obj, *args, **kwargs)
+ @copy_docstring(write_locations_csv)
def to_csv(self, filename, *args, **kwargs):
"""
Store this collection of locations as a CSV file.
@@ -64,6 +68,7 @@ class LocationsAccessor(object):
"""
ti.io.file.write_locations_csv(self._obj, filename, *args, **kwargs)
+ @copy_docstring(write_locations_postgis)
def to_postgis(
self, name, con, schema=None, if_exists="fail", index=True, index_label=None, chunksize=None, dtype=None
):
@@ -76,6 +81,7 @@ class LocationsAccessor(object):
self._obj, name, con, schema, if_exists, index, index_label, chunksize, dtype
)
+ @copy_docstring(spatial_filter)
def spatial_filter(self, *args, **kwargs):
"""
Filter locations with a geo extent.
diff --git a/trackintel/model/positionfixes.py b/trackintel/model/positionfixes.py
index eb1e098..db048c8 100644
--- a/trackintel/model/positionfixes.py
+++ b/trackintel/model/positionfixes.py
@@ -1,6 +1,11 @@
import pandas as pd
-
import trackintel as ti
+from trackintel.geogr.distances import calculate_distance_matrix
+from trackintel.io.file import write_positionfixes_csv
+from trackintel.io.postgis import write_positionfixes_postgis
+from trackintel.model.util import copy_docstring
+from trackintel.preprocessing.positionfixes import generate_staypoints, generate_triplegs
+from trackintel.visualization.positionfixes import plot_positionfixes
@pd.api.extensions.register_dataframe_accessor("as_positionfixes")
@@ -68,6 +73,7 @@ class PositionfixesAccessor(object):
lon = self._obj.geometry.x
return (float(lon.mean()), float(lat.mean()))
+ @copy_docstring(generate_staypoints)
def generate_staypoints(self, *args, **kwargs):
"""
Generate staypoints from this collection of positionfixes.
@@ -76,6 +82,7 @@ class PositionfixesAccessor(object):
"""
return ti.preprocessing.positionfixes.generate_staypoints(self._obj, *args, **kwargs)
+ @copy_docstring(generate_triplegs)
def generate_triplegs(self, stps_input=None, *args, **kwargs):
"""
Generate triplegs from this collection of positionfixes.
@@ -84,6 +91,7 @@ class PositionfixesAccessor(object):
"""
return ti.preprocessing.positionfixes.generate_triplegs(self._obj, stps_input, *args, **kwargs)
+ @copy_docstring(plot_positionfixes)
def plot(self, *args, **kwargs):
"""
Plot this collection of positionfixes.
@@ -92,6 +100,7 @@ class PositionfixesAccessor(object):
"""
ti.visualization.positionfixes.plot_positionfixes(self._obj, *args, **kwargs)
+ @copy_docstring(write_positionfixes_csv)
def to_csv(self, filename, *args, **kwargs):
"""
Store this collection of trackpoints as a CSV file.
@@ -100,6 +109,7 @@ class PositionfixesAccessor(object):
"""
ti.io.file.write_positionfixes_csv(self._obj, filename, *args, **kwargs)
+ @copy_docstring(write_positionfixes_postgis)
def to_postgis(
self, name, con, schema=None, if_exists="fail", index=True, index_label=None, chunksize=None, dtype=None
):
@@ -112,6 +122,7 @@ class PositionfixesAccessor(object):
self._obj, name, con, schema, if_exists, index, index_label, chunksize, dtype
)
+ @copy_docstring(calculate_distance_matrix)
def calculate_distance_matrix(self, *args, **kwargs):
"""
Calculate pair-wise distance among positionfixes or to other positionfixes.
diff --git a/trackintel/model/staypoints.py b/trackintel/model/staypoints.py
index 272636d..1a7ebb1 100644
--- a/trackintel/model/staypoints.py
+++ b/trackintel/model/staypoints.py
@@ -1,6 +1,13 @@
import pandas as pd
-
import trackintel as ti
+from trackintel.analysis.labelling import create_activity_flag
+from trackintel.analysis.tracking_quality import temporal_tracking_quality
+from trackintel.io.file import write_staypoints_csv
+from trackintel.io.postgis import write_staypoints_postgis
+from trackintel.model.util import copy_docstring
+from trackintel.preprocessing.filter import spatial_filter
+from trackintel.preprocessing.staypoints import generate_locations
+from trackintel.visualization.staypoints import plot_staypoints
@pd.api.extensions.register_dataframe_accessor("as_staypoints")
@@ -71,6 +78,7 @@ class StaypointsAccessor(object):
lon = self._obj.geometry.x
return (float(lon.mean()), float(lat.mean()))
+ @copy_docstring(generate_locations)
def generate_locations(self, *args, **kwargs):
"""
Generate locations from this collection of staypoints.
@@ -79,6 +87,7 @@ class StaypointsAccessor(object):
"""
return ti.preprocessing.staypoints.generate_locations(self._obj, *args, **kwargs)
+ @copy_docstring(create_activity_flag)
def create_activity_flag(self, *args, **kwargs):
"""
Set a flag if a staypoint is also an activity.
@@ -87,6 +96,7 @@ class StaypointsAccessor(object):
"""
return ti.analysis.labelling.create_activity_flag(self._obj, *args, **kwargs)
+ @copy_docstring(spatial_filter)
def spatial_filter(self, *args, **kwargs):
"""
Filter staypoints with a geo extent.
@@ -95,6 +105,7 @@ class StaypointsAccessor(object):
"""
return ti.preprocessing.filter.spatial_filter(self._obj, *args, **kwargs)
+ @copy_docstring(plot_staypoints)
def plot(self, *args, **kwargs):
"""
Plot this collection of staypoints.
@@ -103,6 +114,7 @@ class StaypointsAccessor(object):
"""
ti.visualization.staypoints.plot_staypoints(self._obj, *args, **kwargs)
+ @copy_docstring(write_staypoints_csv)
def to_csv(self, filename, *args, **kwargs):
"""
Store this collection of staypoints as a CSV file.
@@ -111,6 +123,7 @@ class StaypointsAccessor(object):
"""
ti.io.file.write_staypoints_csv(self._obj, filename, *args, **kwargs)
+ @copy_docstring(write_staypoints_postgis)
def to_postgis(
self, name, con, schema=None, if_exists="fail", index=True, index_label=None, chunksize=None, dtype=None
):
@@ -123,6 +136,7 @@ class StaypointsAccessor(object):
self._obj, name, con, schema, if_exists, index, index_label, chunksize, dtype
)
+ @copy_docstring(temporal_tracking_quality)
def temporal_tracking_quality(self, *args, **kwargs):
"""
Calculate per-user temporal tracking quality (temporal coverage).
diff --git a/trackintel/model/triplegs.py b/trackintel/model/triplegs.py
index 1d4ce62..dbfb855 100644
--- a/trackintel/model/triplegs.py
+++ b/trackintel/model/triplegs.py
@@ -1,8 +1,15 @@
import pandas as pd
-
import trackintel as ti
-import trackintel.preprocessing.filter
-import trackintel.visualization.triplegs
+from trackintel.analysis.labelling import predict_transport_mode
+from trackintel.analysis.modal_split import calculate_modal_split
+from trackintel.analysis.tracking_quality import temporal_tracking_quality
+from trackintel.geogr.distances import calculate_distance_matrix
+from trackintel.io.file import write_triplegs_csv
+from trackintel.io.postgis import write_triplegs_postgis
+from trackintel.model.util import copy_docstring
+from trackintel.preprocessing.filter import spatial_filter
+from trackintel.preprocessing.triplegs import generate_trips
+from trackintel.visualization.triplegs import plot_triplegs
@pd.api.extensions.register_dataframe_accessor("as_triplegs")
@@ -63,6 +70,7 @@ class TriplegsAccessor(object):
obj["finished_at"]
), "dtype of finished_at is {} but has to be datetime64 and timezone aware".format(obj["finished_at"].dtype)
+ @copy_docstring(plot_triplegs)
def plot(self, *args, **kwargs):
"""
Plot this collection of triplegs.
@@ -71,6 +79,7 @@ class TriplegsAccessor(object):
"""
ti.visualization.triplegs.plot_triplegs(self._obj, *args, **kwargs)
+ @copy_docstring(write_triplegs_csv)
def to_csv(self, filename, *args, **kwargs):
"""
Store this collection of triplegs as a CSV file.
@@ -79,6 +88,7 @@ class TriplegsAccessor(object):
"""
ti.io.file.write_triplegs_csv(self._obj, filename, *args, **kwargs)
+ @copy_docstring(write_triplegs_postgis)
def to_postgis(
self, name, con, schema=None, if_exists="fail", index=True, index_label=None, chunksize=None, dtype=None
):
@@ -91,6 +101,7 @@ class TriplegsAccessor(object):
self._obj, name, con, schema, if_exists, index, index_label, chunksize, dtype
)
+ @copy_docstring(calculate_distance_matrix)
def calculate_distance_matrix(self, *args, **kwargs):
"""
Calculate pair-wise distance among triplegs or to other triplegs.
@@ -99,6 +110,7 @@ class TriplegsAccessor(object):
"""
return ti.geogr.distances.calculate_distance_matrix(self._obj, *args, **kwargs)
+ @copy_docstring(spatial_filter)
def spatial_filter(self, *args, **kwargs):
"""
Filter triplegs with a geo extent.
@@ -107,6 +119,7 @@ class TriplegsAccessor(object):
"""
return ti.preprocessing.filter.spatial_filter(self._obj, *args, **kwargs)
+ @copy_docstring(generate_trips)
def generate_trips(self, *args, **kwargs):
"""
Generate trips based on staypoints and triplegs.
@@ -124,6 +137,7 @@ class TriplegsAccessor(object):
)
return ti.preprocessing.triplegs.generate_trips(stps_input=args[0], tpls_input=self._obj, **kwargs)
+ @copy_docstring(predict_transport_mode)
def predict_transport_mode(self, *args, **kwargs):
"""
Predict/impute the transport mode with which each tripleg was likely covered.
@@ -132,6 +146,7 @@ class TriplegsAccessor(object):
"""
return ti.analysis.labelling.predict_transport_mode(self._obj, *args, **kwargs)
+ @copy_docstring(calculate_modal_split)
def calculate_modal_split(self, *args, **kwargs):
"""
Calculate the modal split of the triplegs.
@@ -140,6 +155,7 @@ class TriplegsAccessor(object):
"""
return ti.analysis.modal_split.calculate_modal_split(self._obj, *args, **kwargs)
+ @copy_docstring(temporal_tracking_quality)
def temporal_tracking_quality(self, *args, **kwargs):
"""
Calculate per-user temporal tracking quality (temporal coverage).
diff --git a/trackintel/model/trips.py b/trackintel/model/trips.py
index a29441d..73abb1c 100644
--- a/trackintel/model/trips.py
+++ b/trackintel/model/trips.py
@@ -1,3 +1,7 @@
+from trackintel.analysis.tracking_quality import temporal_tracking_quality
+from trackintel.io.postgis import write_trips_postgis
+from trackintel.io.file import write_trips_csv
+from trackintel.model.util import copy_docstring
import pandas as pd
import trackintel as ti
@@ -69,6 +73,7 @@ class TripsAccessor(object):
"""
raise NotImplementedError
+ @copy_docstring(write_trips_csv)
def to_csv(self, filename, *args, **kwargs):
"""
Store this collection of trips as a CSV file.
@@ -77,6 +82,7 @@ class TripsAccessor(object):
"""
ti.io.file.write_trips_csv(self._obj, filename, *args, **kwargs)
+ @copy_docstring(write_trips_postgis)
def to_postgis(
self, name, con, schema=None, if_exists="fail", index=True, index_label=None, chunksize=None, dtype=None
):
@@ -87,6 +93,7 @@ class TripsAccessor(object):
"""
ti.io.postgis.write_trips_postgis(self._obj, name, con, schema, if_exists, index, index_label, chunksize, dtype)
+ @copy_docstring(temporal_tracking_quality)
def temporal_tracking_quality(self, *args, **kwargs):
"""
Calculate per-user temporal tracking quality (temporal coverage).
diff --git a/trackintel/model/util.py b/trackintel/model/util.py
new file mode 100644
index 0000000..1dc9cd1
--- /dev/null
+++ b/trackintel/model/util.py
@@ -0,0 +1,5 @@
+from functools import partial, update_wrapper
+
+
+def copy_docstring(wrapped, assigned=("__doc__",), updated=[]):
+ return partial(update_wrapper, wrapped=wrapped, assigned=assigned, updated=updated)
|
mie-lab/trackintel
|
e0c0cdd0d8472ba7b113b3819d062ea8abcd8168
|
diff --git a/tests/model/test_util.py b/tests/model/test_util.py
new file mode 100644
index 0000000..bb4db52
--- /dev/null
+++ b/tests/model/test_util.py
@@ -0,0 +1,23 @@
+from trackintel.model.util import copy_docstring
+from functools import WRAPPER_ASSIGNMENTS
+from trackintel.io.postgis import read_trips_postgis
+
+
+class TestCopy_Docstring:
+ def test_default(self):
+ @copy_docstring(read_trips_postgis)
+ def bar(b: int) -> int:
+ """Old docstring."""
+ pass
+
+ old_docs = """Old docstring."""
+ print(type(old_docs))
+
+ for wa in WRAPPER_ASSIGNMENTS:
+ attr_foo = getattr(read_trips_postgis, wa)
+ attr_bar = getattr(bar, wa)
+ if wa == "__doc__":
+ assert attr_foo == attr_bar
+ assert attr_bar != old_docs
+ else:
+ assert attr_foo != attr_bar
|
Add documentation to accessor methods.
The accessor functions with the pattern `as_triplegs.xyz` do not have a complete documentation, but rather refer to the wrapped functions in a different modules. This is inconvenient for working in an editor, as one cannot look up the functionality of the various parameters in the docstring. Instead one has to look up the referenced docstring seperatly.
Two ways to change that would be:
1. Just copy the docstring over at every change.
2. Use a decorator to copy the docstring directly.
The first way is rather error-prone as one has to remember to change both docstrings at any change. Here it would be good to write a test that checks the equality of the `__doc__` attribute of the functions. The second way can be unclear in the documentation as it replaces silently the existing docstring. Also other problem arises in terms of circular imports etc. .
|
0.0
|
e0c0cdd0d8472ba7b113b3819d062ea8abcd8168
|
[
"tests/model/test_util.py::TestCopy_Docstring::test_default"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-14 11:07:07+00:00
|
mit
| 3,917 |
|
mie-lab__trackintel-275
|
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index f6bf977..9cf8e55 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -38,13 +38,20 @@ This is a place to collect conventions we agreed upon until we find the right pl
### Time stamps
All timestamps are timezone aware pandas `datetime64[ns, UTC]` objects. The default timezone should be `UTC` but the user should be free to use a different one if he wants. See [Issue 101](https://github.com/mie-lab/trackintel/issues/18).
-### Organization of tests
+### Tests
+#### Organization of tests
See [Issue 23](https://github.com/mie-lab/trackintel/issues/23)
- The test folder copies the folder structure that the trackintel.trackintel folder has.
- Every python module has a single test file
- Every function has 1 test class
- Every method of this function should test a single property
+#### Test data
+If possible test data should be
+- independent of unrelated preprocessing steps (e.g., avoid starting with positionfixes if you write tests for trips)
+- simple and easy to understand (e.g., try to have a short example with an isolated special case rather than a large dataset that contains a lot of special cases)
+- defined directly in the code itself (e.g, [this example](https://github.com/mie-lab/trackintel/blob/e0c0cdd0d8472ba7b113b3819d062ea8abcd8168/tests/io/test_postgis_gpd.py#L50)
+
### Integrety of input data
Functions should never change the input dataframe but rather return an altered copy.
diff --git a/trackintel/analysis/location_identification.py b/trackintel/analysis/location_identification.py
index 399c212..7b6b73b 100644
--- a/trackintel/analysis/location_identification.py
+++ b/trackintel/analysis/location_identification.py
@@ -175,7 +175,7 @@ def freq_method(spts, *labels):
Assigning the most visited location the label "home" and the second most visited location the label "work".
The remaining locations get no label.
- Labels can also be passes as an arguement.
+ Labels can also be given as arguments.
Parameters
----------
@@ -241,6 +241,7 @@ def _freq_assign(duration, *labels):
"""
kth = (-duration).argsort()[: len(labels)] # if inefficient use partial sort.
label_array = np.full(len(duration), fill_value=None)
+ labels = labels[: len(kth)] # if provided with more labels than entries.
label_array[kth] = labels
return label_array
@@ -307,15 +308,18 @@ def osna_method(spts):
spts_pivot = spts_agg.unstack()
# get index of maximum for columns "work" and "home"
spts_idxmax = spts_pivot.groupby(["user_id"]).idxmax()
- # first assign "home" label
- spts_pivot.loc[spts_idxmax["home"], "activity_label"] = "home"
+ # first assign labels
+ for col in spts_idxmax.columns:
+ spts_pivot.loc[spts_idxmax[col].dropna(), "activity_label"] = col
# The "home" label could overlap with the "work" label
# we set the rows where "home" is maximum to zero (pd.NaT) and recalculate index of work maximum.
- redo_work = spts_idxmax[spts_idxmax["home"] == spts_idxmax["work"]]
- spts_pivot.loc[redo_work["work"], "work"] = pd.NaT
- spts_idxmax_work = spts_pivot.groupby(["user_id"])["work"].idxmax()
- spts_pivot.loc[spts_idxmax_work, "activity_label"] = "work"
+ if all(col in spts_idxmax.columns for col in ["work", "home"]):
+ redo_work = spts_idxmax[spts_idxmax["home"] == spts_idxmax["work"]]
+ spts_pivot.loc[redo_work["work"], "activity_label"] = "home"
+ spts_pivot.loc[redo_work["work"], "work"] = pd.NaT
+ spts_idxmax_work = spts_pivot.groupby(["user_id"])["work"].idxmax()
+ spts_pivot.loc[spts_idxmax_work.dropna(), "activity_label"] = "work"
# now join it back together
sel = spts_in.columns != "activity_label" # no overlap with older "activity_label"
|
mie-lab/trackintel
|
563473b885a91858a8463da9db66353d5adb1089
|
diff --git a/tests/analysis/test_location_identification.py b/tests/analysis/test_location_identification.py
index 9788293..a60ce42 100644
--- a/tests/analysis/test_location_identification.py
+++ b/tests/analysis/test_location_identification.py
@@ -6,16 +6,16 @@ import pandas as pd
import pytest
import trackintel as ti
from geopandas.testing import assert_geodataframe_equal
-from pandas.testing import assert_index_equal
+from pandas.testing import assert_frame_equal, assert_index_equal
from shapely.geometry import Point
from trackintel.analysis.location_identification import (
_freq_assign,
_freq_transform,
+ _osna_label_timeframes,
freq_method,
location_identifier,
- pre_filter_locations,
osna_method,
- _osna_label_timeframes,
+ pre_filter_locations,
)
@@ -210,6 +210,13 @@ class Test_Freq_Assign:
freq = _freq_assign(dur, *labels)
assert all(freq == freq_sol)
+ def test_more_labels_than_entries(self):
+ dur = pd.Series([9, 0])
+ labels = ("label1", "label2", "label3")
+ freq_sol = np.array([labels[0], labels[1]])
+ freq = _freq_assign(dur, *labels)
+ assert all(freq == freq_sol)
+
class TestLocation_Identifier:
"""Test function `location_identifier`"""
@@ -366,6 +373,42 @@ class TestOsna_Method:
spts.loc[spts["location_id"] == 2, "activity_label"] = "work"
assert_geodataframe_equal(spts, result)
+ def test_only_one_work_location(self):
+ """Test if only one work location of a user can be handled."""
+ t_work = pd.Timestamp("2021-07-14 18:00:00", tz="utc")
+ h = pd.Timedelta("1h")
+ p = Point(0.0, 0.0) # not used
+ list_dict = [{"user_id": 0, "location_id": 0, "started_at": t_work, "finished_at": t_work + h, "g": p}]
+ spts = gpd.GeoDataFrame(data=list_dict, geometry="g")
+ spts.index.name = "id"
+ result = osna_method(spts)
+ spts["activity_label"] = "work"
+ assert_geodataframe_equal(result, spts)
+
+ def test_only_one_rest_location(self):
+ """Test if only one rest location of a user can be handled."""
+ t_rest = pd.Timestamp("2021-07-14 07:00:00", tz="utc")
+ h = pd.Timedelta("1h")
+ p = Point(0.0, 0.0) # not used
+ list_dict = [{"user_id": 0, "location_id": 0, "started_at": t_rest, "finished_at": t_rest + h, "g": p}]
+ spts = gpd.GeoDataFrame(data=list_dict, geometry="g")
+ spts.index.name = "id"
+ result = osna_method(spts)
+ spts["activity_label"] = "home"
+ assert_geodataframe_equal(result, spts)
+
+ def test_only_one_leisure_location(self):
+ """Test if only one leisure location of a user can be handled."""
+ t_leis = pd.Timestamp("2021-07-14 01:00:00", tz="utc")
+ h = pd.Timedelta("1h")
+ p = Point(0.0, 0.0) # not used
+ list_dict = [{"user_id": 0, "location_id": 0, "started_at": t_leis, "finished_at": t_leis + h, "g": p}]
+ spts = gpd.GeoDataFrame(data=list_dict, geometry="g")
+ spts.index.name = "id"
+ result = osna_method(spts)
+ spts["activity_label"] = "home"
+ assert_geodataframe_equal(result, spts)
+
def test_prior_activity_label(self, example_osna):
"""Test that prior activity_label column does not corrupt output."""
example_osna["activity_label"] = np.arange(len(example_osna))
@@ -375,6 +418,23 @@ class TestOsna_Method:
example_osna.loc[example_osna["location_id"] == 1, "activity_label"] = "work"
assert_geodataframe_equal(example_osna, result)
+ def test_multiple_users_with_only_one_location(self):
+ """Test that function can handle multiple users with only one location."""
+ t_leis = pd.Timestamp("2021-07-14 01:00:00", tz="utc")
+ t_work = pd.Timestamp("2021-07-14 18:00:00", tz="utc")
+ h = pd.Timedelta("1h")
+ list_dict = [
+ {"user_id": 0, "location_id": 0, "started_at": t_leis, "finished_at": t_leis + h},
+ {"user_id": 0, "location_id": 1, "started_at": t_work, "finished_at": t_work + h},
+ {"user_id": 1, "location_id": 0, "started_at": t_leis, "finished_at": t_leis + h},
+ {"user_id": 2, "location_id": 0, "started_at": t_work, "finished_at": t_work + h},
+ ]
+ spts = pd.DataFrame(list_dict)
+ spts.index.name = "id"
+ result = osna_method(spts)
+ spts["activity_label"] = ["home", "work", "home", "work"]
+ assert_frame_equal(spts, result)
+
class Test_osna_label_timeframes:
"""Test for the _osna_label_timeframes() function."""
|
Location identification fails for user with a single significant location
There are edge cases where the function `pre_filter_locations` can result in filtering all but 1 location of a user. This leads to the following error in `_freq_assign` in [line 230](https://github.com/mie-lab/trackintel/blob/master/trackintel/analysis/location_identification.py#L230):
`ValueError: could not broadcast input array from shape (2,) into shape (1,)`
because it tries to assign 2 labels to 1 location.
Unfortunately, the filter is relatively complex and therefore I failed creating an independent script using artificial data but I have some test data available for download [here](https://1drv.ms/u/s!Ar5qZtQfLW_Mmq95kMzADK9ZhmWQdg?e=OhpPju) that can be used to reproduce the error.
```python
import pandas as pd
import geopandas as gpd
import trackintel as ti
path_to_testdata = r"D:\Geolife\geolife_all_spts.geojson"
spts = gpd.read_file(path_to_testdata)
spts["started_at"] = pd.to_datetime(spts["started_at"], utc=True)
spts["finished_at"] = pd.to_datetime(spts["finished_at"], utc=True)
print("extract locations")
spts, _ = spts.as_staypoints.generate_locations(
method="dbscan", epsilon=50, num_samples=1, distance_metric="haversine", agg_level="user"
)
spts = ti.analysis.location_identifier(spts, method="FREQ", pre_filter=True)
```
|
0.0
|
563473b885a91858a8463da9db66353d5adb1089
|
[
"tests/analysis/test_location_identification.py::Test_Freq_Assign::test_more_labels_than_entries",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_one_work_location",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_one_rest_location",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_one_leisure_location",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_multiple_users_with_only_one_location"
] |
[
"tests/analysis/test_location_identification.py::TestPre_Filter::test_no_kw",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_thresh_sp",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_thresh_loc",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_thresh_sp_at_loc",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_tresh_loc_time",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_loc_period",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_agg_level",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_agg_level_error",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_non_continous_index",
"tests/analysis/test_location_identification.py::TestFreq_method::test_default_labels",
"tests/analysis/test_location_identification.py::TestFreq_method::test_custom_labels",
"tests/analysis/test_location_identification.py::TestFreq_method::test_duration",
"tests/analysis/test_location_identification.py::Test_Freq_Transform::test_function",
"tests/analysis/test_location_identification.py::Test_Freq_Assign::test_function",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_unkown_method",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_no_location_column",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_pre_filter",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_freq_method",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_osna_method",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_default",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_weekends",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_leisure_weighting",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_prior_activity_label",
"tests/analysis/test_location_identification.py::Test_osna_label_timeframes::test_weekend",
"tests/analysis/test_location_identification.py::Test_osna_label_timeframes::test_weekday"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-14 19:10:55+00:00
|
mit
| 3,918 |
|
mie-lab__trackintel-290
|
diff --git a/trackintel/io/postgis.py b/trackintel/io/postgis.py
index c898fd5..b0cba39 100644
--- a/trackintel/io/postgis.py
+++ b/trackintel/io/postgis.py
@@ -2,6 +2,8 @@ from functools import wraps
from inspect import signature
import geopandas as gpd
+from geopandas.io.sql import _get_srid_from_crs
+from shapely.wkb import dumps
import pandas as pd
from geoalchemy2 import Geometry, WKTElement
from sqlalchemy import create_engine
@@ -224,10 +226,7 @@ def write_locations_postgis(
# May build additional check for that.
if "extent" in locations.columns:
# geopandas.to_postgis can only handle one geometry column -> do it manually
- if locations.crs is not None:
- srid = locations.crs.to_epsg()
- else:
- srid = -1
+ srid = _get_srid_from_crs(locations)
extent_schema = Geometry("POLYGON", srid)
if dtype is None:
@@ -235,7 +234,7 @@ def write_locations_postgis(
else:
dtype["extent"] = extent_schema
locations = locations.copy()
- locations["extent"] = locations["extent"].apply(lambda x: WKTElement(x.wkt, srid=srid))
+ locations["extent"] = locations["extent"].apply(lambda x: dumps(x, srid=srid, hex=True))
locations.to_postgis(
name,
diff --git a/trackintel/preprocessing/positionfixes.py b/trackintel/preprocessing/positionfixes.py
index d604382..1ff8cf9 100644
--- a/trackintel/preprocessing/positionfixes.py
+++ b/trackintel/preprocessing/positionfixes.py
@@ -5,9 +5,9 @@ import geopandas as gpd
import numpy as np
import pandas as pd
from shapely.geometry import LineString, Point
-from tqdm import tqdm
from trackintel.geogr.distances import haversine_dist
+from trackintel.preprocessing.util import applyParallel
def generate_staypoints(
@@ -20,6 +20,7 @@ def generate_staypoints(
include_last=False,
print_progress=False,
exclude_duplicate_pfs=True,
+ n_jobs=1,
):
"""
Generate staypoints from positionfixes.
@@ -48,18 +49,24 @@ def generate_staypoints(
temporal gaps larger than 'gap_threshold' will be excluded from staypoints generation.
Only valid in 'sliding' method.
- include_last: boolen, default False
+ include_last: boolean, default False
The algorithm in Li et al. (2008) only detects staypoint if the user steps out
of that staypoint. This will omit the last staypoint (if any). Set 'include_last'
to True to include this last staypoint.
- print_progress: boolen, default False
+ print_progress: boolean, default False
Show per-user progress if set to True.
exclude_duplicate_pfs: boolean, default True
Filters duplicate positionfixes before generating staypoints. Duplicates can lead to problems in later
processing steps (e.g., when generating triplegs). It is not recommended to set this to False.
+ n_jobs: int, default 1
+ The maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given, no parallel
+ computing code is used at all, which is useful for debugging. See
+ https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation
+ for a detailed description
+
Returns
-------
pfs: GeoDataFrame (as trackintel positionfixes)
@@ -118,37 +125,20 @@ def generate_staypoints(
# TODO: tests using a different distance function, e.g., L2 distance
if method == "sliding":
# Algorithm from Li et al. (2008). For details, please refer to the paper.
- if print_progress:
- tqdm.pandas(desc="User staypoint generation")
- stps = (
- pfs.groupby("user_id", as_index=False)
- .progress_apply(
- _generate_staypoints_sliding_user,
- geo_col=geo_col,
- elevation_flag=elevation_flag,
- dist_threshold=dist_threshold,
- time_threshold=time_threshold,
- gap_threshold=gap_threshold,
- distance_metric=distance_metric,
- include_last=include_last,
- )
- .reset_index(drop=True)
- )
- else:
- stps = (
- pfs.groupby("user_id", as_index=False)
- .apply(
- _generate_staypoints_sliding_user,
- geo_col=geo_col,
- elevation_flag=elevation_flag,
- dist_threshold=dist_threshold,
- time_threshold=time_threshold,
- gap_threshold=gap_threshold,
- distance_metric=distance_metric,
- include_last=include_last,
- )
- .reset_index(drop=True)
- )
+ stps = applyParallel(
+ pfs.groupby("user_id", as_index=False),
+ _generate_staypoints_sliding_user,
+ n_jobs=n_jobs,
+ print_progress=print_progress,
+ geo_col=geo_col,
+ elevation_flag=elevation_flag,
+ dist_threshold=dist_threshold,
+ time_threshold=time_threshold,
+ gap_threshold=gap_threshold,
+ distance_metric=distance_metric,
+ include_last=include_last,
+ ).reset_index(drop=True)
+
# index management
stps["id"] = np.arange(len(stps))
stps.set_index("id", inplace=True)
@@ -328,7 +318,7 @@ def generate_triplegs(
tpls_diff = np.diff(tpls_starts)
# get the start position of stps
- # pd.NA causes error in boolen comparision, replace to -1
+ # pd.NA causes error in boolean comparision, replace to -1
stps_id = pfs["staypoint_id"].copy().fillna(-1)
unique_stps, stps_starts = np.unique(stps_id, return_index=True)
# get the index of where the tpls_starts belong in stps_starts
diff --git a/trackintel/preprocessing/staypoints.py b/trackintel/preprocessing/staypoints.py
index 1a520b7..00040e2 100644
--- a/trackintel/preprocessing/staypoints.py
+++ b/trackintel/preprocessing/staypoints.py
@@ -5,9 +5,9 @@ import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
from sklearn.cluster import DBSCAN
-from tqdm import tqdm
from trackintel.geogr.distances import meters_to_decimal_degrees
+from trackintel.preprocessing.util import applyParallel
def generate_locations(
@@ -18,6 +18,7 @@ def generate_locations(
distance_metric="haversine",
agg_level="user",
print_progress=False,
+ n_jobs=1,
):
"""
Generate locations from the staypoints.
@@ -51,6 +52,12 @@ def generate_locations(
print_progress : bool, default False
If print_progress is True, the progress bar is displayed
+ n_jobs: int, default 1
+ The maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given, no parallel
+ computing code is used at all, which is useful for debugging. See
+ https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation
+ for a detailed description
+
Returns
-------
ret_sp: GeoDataFrame (as trackintel staypoints)
@@ -85,21 +92,15 @@ def generate_locations(
db = DBSCAN(eps=epsilon, min_samples=num_samples, algorithm="ball_tree", metric=distance_metric)
if agg_level == "user":
- if print_progress:
- tqdm.pandas(desc="User location generation")
- ret_stps = ret_stps.groupby("user_id", as_index=False).progress_apply(
- _generate_locations_per_user,
- geo_col=geo_col,
- distance_metric=distance_metric,
- db=db,
- )
- else:
- ret_stps = ret_stps.groupby("user_id", as_index=False).apply(
- _generate_locations_per_user,
- geo_col=geo_col,
- distance_metric=distance_metric,
- db=db,
- )
+ ret_stps = applyParallel(
+ ret_stps.groupby("user_id", as_index=False),
+ _generate_locations_per_user,
+ n_jobs=n_jobs,
+ print_progress=print_progress,
+ geo_col=geo_col,
+ distance_metric=distance_metric,
+ db=db,
+ )
# keeping track of noise labels
ret_stps_non_noise_labels = ret_stps[ret_stps["location_id"] != -1]
diff --git a/trackintel/preprocessing/util.py b/trackintel/preprocessing/util.py
index e14485b..04c6874 100644
--- a/trackintel/preprocessing/util.py
+++ b/trackintel/preprocessing/util.py
@@ -1,3 +1,8 @@
+import pandas as pd
+from joblib import Parallel, delayed
+from tqdm import tqdm
+
+
def calc_temp_overlap(start_1, end_1, start_2, end_2):
"""
Calculate the portion of the first time span that overlaps with the second
@@ -59,3 +64,38 @@ def calc_temp_overlap(start_1, end_1, start_2, end_2):
overlap_ratio = temp_overlap / dur.total_seconds()
return overlap_ratio
+
+
+def applyParallel(dfGrouped, func, n_jobs, print_progress, **kwargs):
+ """
+ Funtion warpper to parallelize funtions after .groupby().
+
+ Parameters
+ ----------
+ dfGrouped: pd.DataFrameGroupBy
+ The groupby object after calling df.groupby(COLUMN).
+
+ func: function
+ Function to apply to the dfGrouped object, i.e., dfGrouped.apply(func).
+
+ n_jobs: int
+ The maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given, no parallel
+ computing code is used at all, which is useful for debugging. See
+ https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation
+ for a detailed description
+
+ print_progress: boolean
+ If set to True print the progress of apply.
+
+ **kwargs:
+ Other arguments passed to func.
+
+ Returns
+ -------
+ pd.DataFrame:
+ The result of dfGrouped.apply(func)
+ """
+ df_ls = Parallel(n_jobs=n_jobs)(
+ delayed(func)(group, **kwargs) for _, group in tqdm(dfGrouped, disable=not print_progress)
+ )
+ return pd.concat(df_ls)
|
mie-lab/trackintel
|
33d6205475ad8c4a859a9a5883241fe1098997b2
|
diff --git a/.github/workflows/CI-tests.yml b/.github/workflows/CI-tests.yml
index 2e2e170..3c267ac 100644
--- a/.github/workflows/CI-tests.yml
+++ b/.github/workflows/CI-tests.yml
@@ -60,5 +60,5 @@ jobs:
source activate test
conda install postgis -c conda-forge
source ci/envs/setup_postgres.sh
- pytest -v -r s --color=yes --cov=trackintel --cov-append --cov-report term-missing --cov-report xml tests/io/test_postgis_gpd.py | tee /dev/stderr | if grep SKIPPED >/dev/null;then echo "TESTS SKIPPED, FAILING" && exit 1;fi
+ pytest -v -r s --color=yes --cov=trackintel --cov-append --cov-report term-missing --cov-report xml tests/io/test_postgis.py | tee /dev/stderr | if grep SKIPPED >/dev/null;then echo "TESTS SKIPPED, FAILING" && exit 1;fi
- uses: codecov/codecov-action@v1
\ No newline at end of file
diff --git a/tests/io/test_postgis_gpd.py b/tests/io/test_postgis.py
similarity index 100%
rename from tests/io/test_postgis_gpd.py
rename to tests/io/test_postgis.py
diff --git a/tests/preprocessing/test_positionfixes.py b/tests/preprocessing/test_positionfixes.py
index c1ec21d..2e84a7a 100644
--- a/tests/preprocessing/test_positionfixes.py
+++ b/tests/preprocessing/test_positionfixes.py
@@ -86,6 +86,18 @@ def example_positionfixes_isolated():
class TestGenerate_staypoints:
"""Tests for generate_staypoints() method."""
+ def test_parallel_computing(self):
+ """The result obtained with parallel computing should be identical."""
+ pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
+ # without parallel computing code
+ pfs_ori, stps_ori = pfs.as_positionfixes.generate_staypoints(n_jobs=1)
+ # using two cores
+ pfs_para, stps_para = pfs.as_positionfixes.generate_staypoints(n_jobs=2)
+
+ # the result of parallel computing should be identical
+ assert_geodataframe_equal(pfs_ori, pfs_para)
+ assert_geodataframe_equal(stps_ori, stps_para)
+
def test_duplicate_pfs_warning(self, example_positionfixes):
"""Calling generate_staypoints with duplicate positionfixes should raise a warning."""
pfs_duplicate_loc = example_positionfixes.copy()
diff --git a/tests/preprocessing/test_staypoints.py b/tests/preprocessing/test_staypoints.py
index 463cf5a..5ba08c3 100644
--- a/tests/preprocessing/test_staypoints.py
+++ b/tests/preprocessing/test_staypoints.py
@@ -7,6 +7,7 @@ import pandas as pd
import pytest
from shapely.geometry import Point
from sklearn.cluster import DBSCAN
+from geopandas.testing import assert_geodataframe_equal
import trackintel as ti
from trackintel.geogr.distances import calculate_distance_matrix
@@ -57,6 +58,23 @@ def example_staypoints():
class TestGenerate_locations:
"""Tests for generate_locations() method."""
+ def test_parallel_computing(self, example_staypoints):
+ """The result obtained with parallel computing should be identical."""
+ stps = example_staypoints
+
+ # without parallel computing code
+ stps_ori, locs_ori = stps.as_staypoints.generate_locations(
+ method="dbscan", epsilon=10, num_samples=2, distance_metric="haversine", agg_level="user", n_jobs=1
+ )
+ # using two cores
+ stps_para, locs_para = stps.as_staypoints.generate_locations(
+ method="dbscan", epsilon=10, num_samples=2, distance_metric="haversine", agg_level="user", n_jobs=2
+ )
+
+ # the result of parallel computing should be identical
+ assert_geodataframe_equal(locs_ori, locs_para)
+ assert_geodataframe_equal(stps_ori, stps_para)
+
def test_dbscan_hav_euc(self):
"""Test if using haversine and euclidean distances will generate the same location result."""
stps_file = os.path.join("tests", "data", "geolife", "geolife_staypoints.csv")
diff --git a/tests/preprocessing/test_triplegs.py b/tests/preprocessing/test_triplegs.py
index 58f5c55..84f22b9 100644
--- a/tests/preprocessing/test_triplegs.py
+++ b/tests/preprocessing/test_triplegs.py
@@ -275,8 +275,6 @@ class TestGenerate_trips:
stps_in = gpd.GeoDataFrame(stps_in, geometry="geom")
stps_in = ti.io.read_staypoints_gpd(stps_in, tz="utc")
- assert stps_in.as_staypoints
-
tpls_in = pd.read_csv(
os.path.join("tests", "data", "trips", "triplegs_gaps.csv"),
sep=";",
@@ -289,12 +287,10 @@ class TestGenerate_trips:
tpls_in = gpd.GeoDataFrame(tpls_in, geometry="geom")
tpls_in = ti.io.read_triplegs_gpd(tpls_in, tz="utc")
- assert tpls_in.as_triplegs
-
# load ground truth data
- trips_loaded = ti.read_trips_csv(os.path.join("tests", "data", "trips", "trips_gaps.csv"), index_col="id")
- trips_loaded["started_at"] = pd.to_datetime(trips_loaded["started_at"], utc=True)
- trips_loaded["finished_at"] = pd.to_datetime(trips_loaded["finished_at"], utc=True)
+ trips_loaded = ti.read_trips_csv(
+ os.path.join("tests", "data", "trips", "trips_gaps.csv"), index_col="id", tz="utc"
+ )
stps_tpls_loaded = pd.read_csv(os.path.join("tests", "data", "trips", "stps_tpls_gaps.csv"), index_col="id")
stps_tpls_loaded["started_at"] = pd.to_datetime(stps_tpls_loaded["started_at"], utc=True)
|
Write locations with extent to postgis throws error
Hi,
I have tried to write locations with extent to a postgis database but I received an error. Here is an example that is very similar to the existing test:
```python
from shapely.geometry import Point, Polygon
import geopandas as gpd
from sqlalchemy import create_engine
import trackintel as ti
from db_login import DSN # database login information
import numpy as np
engine = create_engine(
"postgresql://{db_user}:{db_password}@{db_host}:{db_port}/{db_database}".format(
**DSN
)
)
p1 = Point(8.5067847, 47.4)
p2 = Point(8.5067847, 47.5)
p3 = Point(8.5067847, 47.6)
list_dict = [
{"user_id": 0, "center": p1},
{"user_id": 0, "center": p2},
{"user_id": 1, "center": p3},
]
locs = gpd.GeoDataFrame(data=list_dict, geometry="center", crs="EPSG:4326")
locs.index.name = "id"
coords = [[8.45, 47.6], [8.45, 47.4], [8.55, 47.4], [8.55, 47.6], [8.45, 47.6]]
extent = Polygon(coords)
locs["extent"] = extent # broadcasting
locs["extent"] = gpd.GeoSeries(locs["extent"]) # dtype
locs.as_locations.to_postgis(name='locations', con=engine, schema="yumuv_graph_rep", if_exists='replace')
```
This throws the following error:
```
psycopg2.errors.InvalidParameterValue: Geometry SRID (0) does not match column SRID (4326)
CONTEXT: COPY locations, line 1, column extent: "POLYGON ((8.449999999999999 47.6, 8.449999999999999 47.4, 8.550000000000001 47.4, 8.550000000000001 ..."
```
I am honestly not really sure why as it is very similar to the existing test for locations. I could solve this by replacing [line 238](https://github.com/mie-lab/trackintel/blob/master/trackintel/io/postgis.py#L238)
`locations["extent"] = locations["extent"].apply(lambda x: WKTElement(x.wkt, srid=srid))` with the following:
`locations = gpd.GeoDataFrame(_convert_to_ewkb(locations, 'extent', srid), geometry='center', crs=srid)`
`_convert_to_ewkb` is a function from geopandas and can be imported `from geopandas.io.sql import _convert_to_ewkb`. This function is used in Geopandas to prepare the main geometry [link](https://github.com/geopandas/geopandas/blob/master/geopandas/io/sql.py#L402)
|
0.0
|
33d6205475ad8c4a859a9a5883241fe1098997b2
|
[
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_parallel_computing",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_parallel_computing",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_index_stability",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_location_ids_and_noise"
] |
[
"tests/io/test_postgis.py::TestGetSrid::test_srid",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_pfs_filtering",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_columns",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_sliding_min",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_sliding_max",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_missing_link",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_dtype_consistent",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_index_start",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_include_last",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_print_progress",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_temporal",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_noncontinuous_unordered_index",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_invalid_isolates",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_duplicate_columns",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_user_without_stps",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_pfs_without_stps",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_stability",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_random_order",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_pfs_index",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_dtype_consistent",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_missing_link",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_index_start",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_invalid_inputs",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_temporal",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_missing_link",
"tests/preprocessing/test_triplegs.py::TestSmoothen_triplegs::test_smoothen_triplegs",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_only_staypoints_in_trip"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-28 17:53:44+00:00
|
mit
| 3,919 |
|
mie-lab__trackintel-291
|
diff --git a/trackintel/preprocessing/positionfixes.py b/trackintel/preprocessing/positionfixes.py
index d604382..1ff8cf9 100644
--- a/trackintel/preprocessing/positionfixes.py
+++ b/trackintel/preprocessing/positionfixes.py
@@ -5,9 +5,9 @@ import geopandas as gpd
import numpy as np
import pandas as pd
from shapely.geometry import LineString, Point
-from tqdm import tqdm
from trackintel.geogr.distances import haversine_dist
+from trackintel.preprocessing.util import applyParallel
def generate_staypoints(
@@ -20,6 +20,7 @@ def generate_staypoints(
include_last=False,
print_progress=False,
exclude_duplicate_pfs=True,
+ n_jobs=1,
):
"""
Generate staypoints from positionfixes.
@@ -48,18 +49,24 @@ def generate_staypoints(
temporal gaps larger than 'gap_threshold' will be excluded from staypoints generation.
Only valid in 'sliding' method.
- include_last: boolen, default False
+ include_last: boolean, default False
The algorithm in Li et al. (2008) only detects staypoint if the user steps out
of that staypoint. This will omit the last staypoint (if any). Set 'include_last'
to True to include this last staypoint.
- print_progress: boolen, default False
+ print_progress: boolean, default False
Show per-user progress if set to True.
exclude_duplicate_pfs: boolean, default True
Filters duplicate positionfixes before generating staypoints. Duplicates can lead to problems in later
processing steps (e.g., when generating triplegs). It is not recommended to set this to False.
+ n_jobs: int, default 1
+ The maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given, no parallel
+ computing code is used at all, which is useful for debugging. See
+ https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation
+ for a detailed description
+
Returns
-------
pfs: GeoDataFrame (as trackintel positionfixes)
@@ -118,37 +125,20 @@ def generate_staypoints(
# TODO: tests using a different distance function, e.g., L2 distance
if method == "sliding":
# Algorithm from Li et al. (2008). For details, please refer to the paper.
- if print_progress:
- tqdm.pandas(desc="User staypoint generation")
- stps = (
- pfs.groupby("user_id", as_index=False)
- .progress_apply(
- _generate_staypoints_sliding_user,
- geo_col=geo_col,
- elevation_flag=elevation_flag,
- dist_threshold=dist_threshold,
- time_threshold=time_threshold,
- gap_threshold=gap_threshold,
- distance_metric=distance_metric,
- include_last=include_last,
- )
- .reset_index(drop=True)
- )
- else:
- stps = (
- pfs.groupby("user_id", as_index=False)
- .apply(
- _generate_staypoints_sliding_user,
- geo_col=geo_col,
- elevation_flag=elevation_flag,
- dist_threshold=dist_threshold,
- time_threshold=time_threshold,
- gap_threshold=gap_threshold,
- distance_metric=distance_metric,
- include_last=include_last,
- )
- .reset_index(drop=True)
- )
+ stps = applyParallel(
+ pfs.groupby("user_id", as_index=False),
+ _generate_staypoints_sliding_user,
+ n_jobs=n_jobs,
+ print_progress=print_progress,
+ geo_col=geo_col,
+ elevation_flag=elevation_flag,
+ dist_threshold=dist_threshold,
+ time_threshold=time_threshold,
+ gap_threshold=gap_threshold,
+ distance_metric=distance_metric,
+ include_last=include_last,
+ ).reset_index(drop=True)
+
# index management
stps["id"] = np.arange(len(stps))
stps.set_index("id", inplace=True)
@@ -328,7 +318,7 @@ def generate_triplegs(
tpls_diff = np.diff(tpls_starts)
# get the start position of stps
- # pd.NA causes error in boolen comparision, replace to -1
+ # pd.NA causes error in boolean comparision, replace to -1
stps_id = pfs["staypoint_id"].copy().fillna(-1)
unique_stps, stps_starts = np.unique(stps_id, return_index=True)
# get the index of where the tpls_starts belong in stps_starts
diff --git a/trackintel/preprocessing/staypoints.py b/trackintel/preprocessing/staypoints.py
index 1a520b7..00040e2 100644
--- a/trackintel/preprocessing/staypoints.py
+++ b/trackintel/preprocessing/staypoints.py
@@ -5,9 +5,9 @@ import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
from sklearn.cluster import DBSCAN
-from tqdm import tqdm
from trackintel.geogr.distances import meters_to_decimal_degrees
+from trackintel.preprocessing.util import applyParallel
def generate_locations(
@@ -18,6 +18,7 @@ def generate_locations(
distance_metric="haversine",
agg_level="user",
print_progress=False,
+ n_jobs=1,
):
"""
Generate locations from the staypoints.
@@ -51,6 +52,12 @@ def generate_locations(
print_progress : bool, default False
If print_progress is True, the progress bar is displayed
+ n_jobs: int, default 1
+ The maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given, no parallel
+ computing code is used at all, which is useful for debugging. See
+ https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation
+ for a detailed description
+
Returns
-------
ret_sp: GeoDataFrame (as trackintel staypoints)
@@ -85,21 +92,15 @@ def generate_locations(
db = DBSCAN(eps=epsilon, min_samples=num_samples, algorithm="ball_tree", metric=distance_metric)
if agg_level == "user":
- if print_progress:
- tqdm.pandas(desc="User location generation")
- ret_stps = ret_stps.groupby("user_id", as_index=False).progress_apply(
- _generate_locations_per_user,
- geo_col=geo_col,
- distance_metric=distance_metric,
- db=db,
- )
- else:
- ret_stps = ret_stps.groupby("user_id", as_index=False).apply(
- _generate_locations_per_user,
- geo_col=geo_col,
- distance_metric=distance_metric,
- db=db,
- )
+ ret_stps = applyParallel(
+ ret_stps.groupby("user_id", as_index=False),
+ _generate_locations_per_user,
+ n_jobs=n_jobs,
+ print_progress=print_progress,
+ geo_col=geo_col,
+ distance_metric=distance_metric,
+ db=db,
+ )
# keeping track of noise labels
ret_stps_non_noise_labels = ret_stps[ret_stps["location_id"] != -1]
diff --git a/trackintel/preprocessing/util.py b/trackintel/preprocessing/util.py
index e14485b..04c6874 100644
--- a/trackintel/preprocessing/util.py
+++ b/trackintel/preprocessing/util.py
@@ -1,3 +1,8 @@
+import pandas as pd
+from joblib import Parallel, delayed
+from tqdm import tqdm
+
+
def calc_temp_overlap(start_1, end_1, start_2, end_2):
"""
Calculate the portion of the first time span that overlaps with the second
@@ -59,3 +64,38 @@ def calc_temp_overlap(start_1, end_1, start_2, end_2):
overlap_ratio = temp_overlap / dur.total_seconds()
return overlap_ratio
+
+
+def applyParallel(dfGrouped, func, n_jobs, print_progress, **kwargs):
+ """
+ Funtion warpper to parallelize funtions after .groupby().
+
+ Parameters
+ ----------
+ dfGrouped: pd.DataFrameGroupBy
+ The groupby object after calling df.groupby(COLUMN).
+
+ func: function
+ Function to apply to the dfGrouped object, i.e., dfGrouped.apply(func).
+
+ n_jobs: int
+ The maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given, no parallel
+ computing code is used at all, which is useful for debugging. See
+ https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation
+ for a detailed description
+
+ print_progress: boolean
+ If set to True print the progress of apply.
+
+ **kwargs:
+ Other arguments passed to func.
+
+ Returns
+ -------
+ pd.DataFrame:
+ The result of dfGrouped.apply(func)
+ """
+ df_ls = Parallel(n_jobs=n_jobs)(
+ delayed(func)(group, **kwargs) for _, group in tqdm(dfGrouped, disable=not print_progress)
+ )
+ return pd.concat(df_ls)
|
mie-lab/trackintel
|
33d6205475ad8c4a859a9a5883241fe1098997b2
|
diff --git a/tests/preprocessing/test_positionfixes.py b/tests/preprocessing/test_positionfixes.py
index c1ec21d..2e84a7a 100644
--- a/tests/preprocessing/test_positionfixes.py
+++ b/tests/preprocessing/test_positionfixes.py
@@ -86,6 +86,18 @@ def example_positionfixes_isolated():
class TestGenerate_staypoints:
"""Tests for generate_staypoints() method."""
+ def test_parallel_computing(self):
+ """The result obtained with parallel computing should be identical."""
+ pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
+ # without parallel computing code
+ pfs_ori, stps_ori = pfs.as_positionfixes.generate_staypoints(n_jobs=1)
+ # using two cores
+ pfs_para, stps_para = pfs.as_positionfixes.generate_staypoints(n_jobs=2)
+
+ # the result of parallel computing should be identical
+ assert_geodataframe_equal(pfs_ori, pfs_para)
+ assert_geodataframe_equal(stps_ori, stps_para)
+
def test_duplicate_pfs_warning(self, example_positionfixes):
"""Calling generate_staypoints with duplicate positionfixes should raise a warning."""
pfs_duplicate_loc = example_positionfixes.copy()
diff --git a/tests/preprocessing/test_staypoints.py b/tests/preprocessing/test_staypoints.py
index 463cf5a..5ba08c3 100644
--- a/tests/preprocessing/test_staypoints.py
+++ b/tests/preprocessing/test_staypoints.py
@@ -7,6 +7,7 @@ import pandas as pd
import pytest
from shapely.geometry import Point
from sklearn.cluster import DBSCAN
+from geopandas.testing import assert_geodataframe_equal
import trackintel as ti
from trackintel.geogr.distances import calculate_distance_matrix
@@ -57,6 +58,23 @@ def example_staypoints():
class TestGenerate_locations:
"""Tests for generate_locations() method."""
+ def test_parallel_computing(self, example_staypoints):
+ """The result obtained with parallel computing should be identical."""
+ stps = example_staypoints
+
+ # without parallel computing code
+ stps_ori, locs_ori = stps.as_staypoints.generate_locations(
+ method="dbscan", epsilon=10, num_samples=2, distance_metric="haversine", agg_level="user", n_jobs=1
+ )
+ # using two cores
+ stps_para, locs_para = stps.as_staypoints.generate_locations(
+ method="dbscan", epsilon=10, num_samples=2, distance_metric="haversine", agg_level="user", n_jobs=2
+ )
+
+ # the result of parallel computing should be identical
+ assert_geodataframe_equal(locs_ori, locs_para)
+ assert_geodataframe_equal(stps_ori, stps_para)
+
def test_dbscan_hav_euc(self):
"""Test if using haversine and euclidean distances will generate the same location result."""
stps_file = os.path.join("tests", "data", "geolife", "geolife_staypoints.csv")
diff --git a/tests/preprocessing/test_triplegs.py b/tests/preprocessing/test_triplegs.py
index 58f5c55..84f22b9 100644
--- a/tests/preprocessing/test_triplegs.py
+++ b/tests/preprocessing/test_triplegs.py
@@ -275,8 +275,6 @@ class TestGenerate_trips:
stps_in = gpd.GeoDataFrame(stps_in, geometry="geom")
stps_in = ti.io.read_staypoints_gpd(stps_in, tz="utc")
- assert stps_in.as_staypoints
-
tpls_in = pd.read_csv(
os.path.join("tests", "data", "trips", "triplegs_gaps.csv"),
sep=";",
@@ -289,12 +287,10 @@ class TestGenerate_trips:
tpls_in = gpd.GeoDataFrame(tpls_in, geometry="geom")
tpls_in = ti.io.read_triplegs_gpd(tpls_in, tz="utc")
- assert tpls_in.as_triplegs
-
# load ground truth data
- trips_loaded = ti.read_trips_csv(os.path.join("tests", "data", "trips", "trips_gaps.csv"), index_col="id")
- trips_loaded["started_at"] = pd.to_datetime(trips_loaded["started_at"], utc=True)
- trips_loaded["finished_at"] = pd.to_datetime(trips_loaded["finished_at"], utc=True)
+ trips_loaded = ti.read_trips_csv(
+ os.path.join("tests", "data", "trips", "trips_gaps.csv"), index_col="id", tz="utc"
+ )
stps_tpls_loaded = pd.read_csv(os.path.join("tests", "data", "trips", "stps_tpls_gaps.csv"), index_col="id")
stps_tpls_loaded["started_at"] = pd.to_datetime(stps_tpls_loaded["started_at"], utc=True)
|
Parallelization of generate() function
The current generate() function can be speeded up by parallelization. This can be achieved by passing an optional argument like `n_jobs`, and wrap the operations after `groupby` using a generic parallelized function, for example:
````python
from joblib import Parallel, delayed
def applyParallel(dfGrouped, func, n_jobs, **kwargs):
retLst = Parallel(n_jobs=n_jobs)(delayed(func)(group, **kwargs) for _, group in dfGrouped)
return pd.concat(retLst)
# example to use the function
result = applyParallel(pfs.groupby("userid"), _generate_staypoints_sliding_user, n_jobs = 1)
````
This mainly relates to `trackintel.preprocessing.positionfixes.generate_staypoints()` (currently the bottleneck function) and `trackintel.preprocessing.staypoints.generate_locations()` functions.
|
0.0
|
33d6205475ad8c4a859a9a5883241fe1098997b2
|
[
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_parallel_computing",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_parallel_computing",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_index_stability",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_location_ids_and_noise"
] |
[
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_pfs_filtering",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_columns",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_sliding_min",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_sliding_max",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_missing_link",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_dtype_consistent",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_index_start",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_include_last",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_print_progress",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_temporal",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_noncontinuous_unordered_index",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_invalid_isolates",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_duplicate_columns",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_user_without_stps",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_pfs_without_stps",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_stability",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_random_order",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_pfs_index",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_dtype_consistent",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_missing_link",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_index_start",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_invalid_inputs",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_temporal",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_missing_link",
"tests/preprocessing/test_triplegs.py::TestSmoothen_triplegs::test_smoothen_triplegs",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_only_staypoints_in_trip"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-28 19:00:50+00:00
|
mit
| 3,920 |
|
mie-lab__trackintel-296
|
diff --git a/trackintel/io/postgis.py b/trackintel/io/postgis.py
index c898fd5..b0cba39 100644
--- a/trackintel/io/postgis.py
+++ b/trackintel/io/postgis.py
@@ -2,6 +2,8 @@ from functools import wraps
from inspect import signature
import geopandas as gpd
+from geopandas.io.sql import _get_srid_from_crs
+from shapely.wkb import dumps
import pandas as pd
from geoalchemy2 import Geometry, WKTElement
from sqlalchemy import create_engine
@@ -224,10 +226,7 @@ def write_locations_postgis(
# May build additional check for that.
if "extent" in locations.columns:
# geopandas.to_postgis can only handle one geometry column -> do it manually
- if locations.crs is not None:
- srid = locations.crs.to_epsg()
- else:
- srid = -1
+ srid = _get_srid_from_crs(locations)
extent_schema = Geometry("POLYGON", srid)
if dtype is None:
@@ -235,7 +234,7 @@ def write_locations_postgis(
else:
dtype["extent"] = extent_schema
locations = locations.copy()
- locations["extent"] = locations["extent"].apply(lambda x: WKTElement(x.wkt, srid=srid))
+ locations["extent"] = locations["extent"].apply(lambda x: dumps(x, srid=srid, hex=True))
locations.to_postgis(
name,
diff --git a/trackintel/preprocessing/positionfixes.py b/trackintel/preprocessing/positionfixes.py
index d604382..1ff8cf9 100644
--- a/trackintel/preprocessing/positionfixes.py
+++ b/trackintel/preprocessing/positionfixes.py
@@ -5,9 +5,9 @@ import geopandas as gpd
import numpy as np
import pandas as pd
from shapely.geometry import LineString, Point
-from tqdm import tqdm
from trackintel.geogr.distances import haversine_dist
+from trackintel.preprocessing.util import applyParallel
def generate_staypoints(
@@ -20,6 +20,7 @@ def generate_staypoints(
include_last=False,
print_progress=False,
exclude_duplicate_pfs=True,
+ n_jobs=1,
):
"""
Generate staypoints from positionfixes.
@@ -48,18 +49,24 @@ def generate_staypoints(
temporal gaps larger than 'gap_threshold' will be excluded from staypoints generation.
Only valid in 'sliding' method.
- include_last: boolen, default False
+ include_last: boolean, default False
The algorithm in Li et al. (2008) only detects staypoint if the user steps out
of that staypoint. This will omit the last staypoint (if any). Set 'include_last'
to True to include this last staypoint.
- print_progress: boolen, default False
+ print_progress: boolean, default False
Show per-user progress if set to True.
exclude_duplicate_pfs: boolean, default True
Filters duplicate positionfixes before generating staypoints. Duplicates can lead to problems in later
processing steps (e.g., when generating triplegs). It is not recommended to set this to False.
+ n_jobs: int, default 1
+ The maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given, no parallel
+ computing code is used at all, which is useful for debugging. See
+ https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation
+ for a detailed description
+
Returns
-------
pfs: GeoDataFrame (as trackintel positionfixes)
@@ -118,37 +125,20 @@ def generate_staypoints(
# TODO: tests using a different distance function, e.g., L2 distance
if method == "sliding":
# Algorithm from Li et al. (2008). For details, please refer to the paper.
- if print_progress:
- tqdm.pandas(desc="User staypoint generation")
- stps = (
- pfs.groupby("user_id", as_index=False)
- .progress_apply(
- _generate_staypoints_sliding_user,
- geo_col=geo_col,
- elevation_flag=elevation_flag,
- dist_threshold=dist_threshold,
- time_threshold=time_threshold,
- gap_threshold=gap_threshold,
- distance_metric=distance_metric,
- include_last=include_last,
- )
- .reset_index(drop=True)
- )
- else:
- stps = (
- pfs.groupby("user_id", as_index=False)
- .apply(
- _generate_staypoints_sliding_user,
- geo_col=geo_col,
- elevation_flag=elevation_flag,
- dist_threshold=dist_threshold,
- time_threshold=time_threshold,
- gap_threshold=gap_threshold,
- distance_metric=distance_metric,
- include_last=include_last,
- )
- .reset_index(drop=True)
- )
+ stps = applyParallel(
+ pfs.groupby("user_id", as_index=False),
+ _generate_staypoints_sliding_user,
+ n_jobs=n_jobs,
+ print_progress=print_progress,
+ geo_col=geo_col,
+ elevation_flag=elevation_flag,
+ dist_threshold=dist_threshold,
+ time_threshold=time_threshold,
+ gap_threshold=gap_threshold,
+ distance_metric=distance_metric,
+ include_last=include_last,
+ ).reset_index(drop=True)
+
# index management
stps["id"] = np.arange(len(stps))
stps.set_index("id", inplace=True)
@@ -328,7 +318,7 @@ def generate_triplegs(
tpls_diff = np.diff(tpls_starts)
# get the start position of stps
- # pd.NA causes error in boolen comparision, replace to -1
+ # pd.NA causes error in boolean comparision, replace to -1
stps_id = pfs["staypoint_id"].copy().fillna(-1)
unique_stps, stps_starts = np.unique(stps_id, return_index=True)
# get the index of where the tpls_starts belong in stps_starts
diff --git a/trackintel/preprocessing/staypoints.py b/trackintel/preprocessing/staypoints.py
index 1a520b7..00040e2 100644
--- a/trackintel/preprocessing/staypoints.py
+++ b/trackintel/preprocessing/staypoints.py
@@ -5,9 +5,9 @@ import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
from sklearn.cluster import DBSCAN
-from tqdm import tqdm
from trackintel.geogr.distances import meters_to_decimal_degrees
+from trackintel.preprocessing.util import applyParallel
def generate_locations(
@@ -18,6 +18,7 @@ def generate_locations(
distance_metric="haversine",
agg_level="user",
print_progress=False,
+ n_jobs=1,
):
"""
Generate locations from the staypoints.
@@ -51,6 +52,12 @@ def generate_locations(
print_progress : bool, default False
If print_progress is True, the progress bar is displayed
+ n_jobs: int, default 1
+ The maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given, no parallel
+ computing code is used at all, which is useful for debugging. See
+ https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation
+ for a detailed description
+
Returns
-------
ret_sp: GeoDataFrame (as trackintel staypoints)
@@ -85,21 +92,15 @@ def generate_locations(
db = DBSCAN(eps=epsilon, min_samples=num_samples, algorithm="ball_tree", metric=distance_metric)
if agg_level == "user":
- if print_progress:
- tqdm.pandas(desc="User location generation")
- ret_stps = ret_stps.groupby("user_id", as_index=False).progress_apply(
- _generate_locations_per_user,
- geo_col=geo_col,
- distance_metric=distance_metric,
- db=db,
- )
- else:
- ret_stps = ret_stps.groupby("user_id", as_index=False).apply(
- _generate_locations_per_user,
- geo_col=geo_col,
- distance_metric=distance_metric,
- db=db,
- )
+ ret_stps = applyParallel(
+ ret_stps.groupby("user_id", as_index=False),
+ _generate_locations_per_user,
+ n_jobs=n_jobs,
+ print_progress=print_progress,
+ geo_col=geo_col,
+ distance_metric=distance_metric,
+ db=db,
+ )
# keeping track of noise labels
ret_stps_non_noise_labels = ret_stps[ret_stps["location_id"] != -1]
diff --git a/trackintel/preprocessing/triplegs.py b/trackintel/preprocessing/triplegs.py
index 776c3b3..a27ad58 100644
--- a/trackintel/preprocessing/triplegs.py
+++ b/trackintel/preprocessing/triplegs.py
@@ -1,11 +1,9 @@
import warnings
+import geopandas as gpd
import numpy as np
import pandas as pd
-from shapely import geometry
-from tqdm import tqdm
-from shapely.geometry import MultiPoint, Point
-import geopandas as gpd
+from shapely.geometry import MultiPoint
def smoothen_triplegs(triplegs, tolerance=1.0, preserve_topology=True):
@@ -164,7 +162,7 @@ def generate_trips(spts, tpls, gap_threshold=15, add_geometry=True):
trips_grouper = spts_tpls_no_act.groupby("temp_trip_id")
trips = trips_grouper.agg(
- {"user_id": "mean", "started_at": min, "finished_at": max, "type": list, "spts_tpls_id": list}
+ {"user_id": "first", "started_at": min, "finished_at": max, "type": list, "spts_tpls_id": list}
)
def _seperate_ids(row):
diff --git a/trackintel/preprocessing/util.py b/trackintel/preprocessing/util.py
index e14485b..04c6874 100644
--- a/trackintel/preprocessing/util.py
+++ b/trackintel/preprocessing/util.py
@@ -1,3 +1,8 @@
+import pandas as pd
+from joblib import Parallel, delayed
+from tqdm import tqdm
+
+
def calc_temp_overlap(start_1, end_1, start_2, end_2):
"""
Calculate the portion of the first time span that overlaps with the second
@@ -59,3 +64,38 @@ def calc_temp_overlap(start_1, end_1, start_2, end_2):
overlap_ratio = temp_overlap / dur.total_seconds()
return overlap_ratio
+
+
+def applyParallel(dfGrouped, func, n_jobs, print_progress, **kwargs):
+ """
+ Funtion warpper to parallelize funtions after .groupby().
+
+ Parameters
+ ----------
+ dfGrouped: pd.DataFrameGroupBy
+ The groupby object after calling df.groupby(COLUMN).
+
+ func: function
+ Function to apply to the dfGrouped object, i.e., dfGrouped.apply(func).
+
+ n_jobs: int
+ The maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given, no parallel
+ computing code is used at all, which is useful for debugging. See
+ https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation
+ for a detailed description
+
+ print_progress: boolean
+ If set to True print the progress of apply.
+
+ **kwargs:
+ Other arguments passed to func.
+
+ Returns
+ -------
+ pd.DataFrame:
+ The result of dfGrouped.apply(func)
+ """
+ df_ls = Parallel(n_jobs=n_jobs)(
+ delayed(func)(group, **kwargs) for _, group in tqdm(dfGrouped, disable=not print_progress)
+ )
+ return pd.concat(df_ls)
|
mie-lab/trackintel
|
33d6205475ad8c4a859a9a5883241fe1098997b2
|
diff --git a/.github/workflows/CI-tests.yml b/.github/workflows/CI-tests.yml
index 2e2e170..3c267ac 100644
--- a/.github/workflows/CI-tests.yml
+++ b/.github/workflows/CI-tests.yml
@@ -60,5 +60,5 @@ jobs:
source activate test
conda install postgis -c conda-forge
source ci/envs/setup_postgres.sh
- pytest -v -r s --color=yes --cov=trackintel --cov-append --cov-report term-missing --cov-report xml tests/io/test_postgis_gpd.py | tee /dev/stderr | if grep SKIPPED >/dev/null;then echo "TESTS SKIPPED, FAILING" && exit 1;fi
+ pytest -v -r s --color=yes --cov=trackintel --cov-append --cov-report term-missing --cov-report xml tests/io/test_postgis.py | tee /dev/stderr | if grep SKIPPED >/dev/null;then echo "TESTS SKIPPED, FAILING" && exit 1;fi
- uses: codecov/codecov-action@v1
\ No newline at end of file
diff --git a/tests/io/test_postgis_gpd.py b/tests/io/test_postgis.py
similarity index 100%
rename from tests/io/test_postgis_gpd.py
rename to tests/io/test_postgis.py
diff --git a/tests/preprocessing/test_positionfixes.py b/tests/preprocessing/test_positionfixes.py
index c1ec21d..2e84a7a 100644
--- a/tests/preprocessing/test_positionfixes.py
+++ b/tests/preprocessing/test_positionfixes.py
@@ -86,6 +86,18 @@ def example_positionfixes_isolated():
class TestGenerate_staypoints:
"""Tests for generate_staypoints() method."""
+ def test_parallel_computing(self):
+ """The result obtained with parallel computing should be identical."""
+ pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
+ # without parallel computing code
+ pfs_ori, stps_ori = pfs.as_positionfixes.generate_staypoints(n_jobs=1)
+ # using two cores
+ pfs_para, stps_para = pfs.as_positionfixes.generate_staypoints(n_jobs=2)
+
+ # the result of parallel computing should be identical
+ assert_geodataframe_equal(pfs_ori, pfs_para)
+ assert_geodataframe_equal(stps_ori, stps_para)
+
def test_duplicate_pfs_warning(self, example_positionfixes):
"""Calling generate_staypoints with duplicate positionfixes should raise a warning."""
pfs_duplicate_loc = example_positionfixes.copy()
diff --git a/tests/preprocessing/test_staypoints.py b/tests/preprocessing/test_staypoints.py
index 463cf5a..5ba08c3 100644
--- a/tests/preprocessing/test_staypoints.py
+++ b/tests/preprocessing/test_staypoints.py
@@ -7,6 +7,7 @@ import pandas as pd
import pytest
from shapely.geometry import Point
from sklearn.cluster import DBSCAN
+from geopandas.testing import assert_geodataframe_equal
import trackintel as ti
from trackintel.geogr.distances import calculate_distance_matrix
@@ -57,6 +58,23 @@ def example_staypoints():
class TestGenerate_locations:
"""Tests for generate_locations() method."""
+ def test_parallel_computing(self, example_staypoints):
+ """The result obtained with parallel computing should be identical."""
+ stps = example_staypoints
+
+ # without parallel computing code
+ stps_ori, locs_ori = stps.as_staypoints.generate_locations(
+ method="dbscan", epsilon=10, num_samples=2, distance_metric="haversine", agg_level="user", n_jobs=1
+ )
+ # using two cores
+ stps_para, locs_para = stps.as_staypoints.generate_locations(
+ method="dbscan", epsilon=10, num_samples=2, distance_metric="haversine", agg_level="user", n_jobs=2
+ )
+
+ # the result of parallel computing should be identical
+ assert_geodataframe_equal(locs_ori, locs_para)
+ assert_geodataframe_equal(stps_ori, stps_para)
+
def test_dbscan_hav_euc(self):
"""Test if using haversine and euclidean distances will generate the same location result."""
stps_file = os.path.join("tests", "data", "geolife", "geolife_staypoints.csv")
diff --git a/tests/preprocessing/test_triplegs.py b/tests/preprocessing/test_triplegs.py
index 58f5c55..84f22b9 100644
--- a/tests/preprocessing/test_triplegs.py
+++ b/tests/preprocessing/test_triplegs.py
@@ -275,8 +275,6 @@ class TestGenerate_trips:
stps_in = gpd.GeoDataFrame(stps_in, geometry="geom")
stps_in = ti.io.read_staypoints_gpd(stps_in, tz="utc")
- assert stps_in.as_staypoints
-
tpls_in = pd.read_csv(
os.path.join("tests", "data", "trips", "triplegs_gaps.csv"),
sep=";",
@@ -289,12 +287,10 @@ class TestGenerate_trips:
tpls_in = gpd.GeoDataFrame(tpls_in, geometry="geom")
tpls_in = ti.io.read_triplegs_gpd(tpls_in, tz="utc")
- assert tpls_in.as_triplegs
-
# load ground truth data
- trips_loaded = ti.read_trips_csv(os.path.join("tests", "data", "trips", "trips_gaps.csv"), index_col="id")
- trips_loaded["started_at"] = pd.to_datetime(trips_loaded["started_at"], utc=True)
- trips_loaded["finished_at"] = pd.to_datetime(trips_loaded["finished_at"], utc=True)
+ trips_loaded = ti.read_trips_csv(
+ os.path.join("tests", "data", "trips", "trips_gaps.csv"), index_col="id", tz="utc"
+ )
stps_tpls_loaded = pd.read_csv(os.path.join("tests", "data", "trips", "stps_tpls_gaps.csv"), index_col="id")
stps_tpls_loaded["started_at"] = pd.to_datetime(stps_tpls_loaded["started_at"], utc=True)
|
generate_trips only accepts numeric user_ids
I think it would be enough to change the aggregation function [here]( https://github.com/mie-lab/trackintel/blob/33d6205475ad8c4a859a9a5883241fe1098997b2/trackintel/preprocessing/triplegs.py#L167) for `user_id` to `first` or something similar that accepts strings.
|
0.0
|
33d6205475ad8c4a859a9a5883241fe1098997b2
|
[
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_parallel_computing",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_parallel_computing",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_index_stability",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_location_ids_and_noise"
] |
[
"tests/io/test_postgis.py::TestGetSrid::test_srid",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_pfs_filtering",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_columns",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_sliding_min",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_sliding_max",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_missing_link",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_dtype_consistent",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_index_start",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_include_last",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_print_progress",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_temporal",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_noncontinuous_unordered_index",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_invalid_isolates",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_duplicate_columns",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_user_without_stps",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_pfs_without_stps",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_stability",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_random_order",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_pfs_index",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_dtype_consistent",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_missing_link",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_index_start",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_invalid_inputs",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_temporal",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_missing_link",
"tests/preprocessing/test_triplegs.py::TestSmoothen_triplegs::test_smoothen_triplegs",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_only_staypoints_in_trip"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-30 09:24:50+00:00
|
mit
| 3,921 |
|
mie-lab__trackintel-311
|
diff --git a/docs/environment.yml b/docs/environment.yml
index 4750c75..1392606 100644
--- a/docs/environment.yml
+++ b/docs/environment.yml
@@ -3,19 +3,19 @@ channels:
- conda-forge
- defaults
dependencies:
-- python=3.8
-- numpy
-- matplotlib
-- pandas
+# - python=3.8
- geopandas
-- scikit-learn
-- networkx
-- pint
-- pip
-- geoalchemy2
-- osmnx
-- psycopg2
-- sphinx
-- sphinx_rtd_theme
-- tqdm
-- similaritymeasures
\ No newline at end of file
+- pip:
+ - numpy
+ - matplotlib
+ - pandas
+ - scikit-learn
+ - networkx
+ - pint
+ - geoalchemy2
+ - osmnx
+ - psycopg2
+ - sphinx
+ - sphinx_rtd_theme
+ - tqdm
+ - similaritymeasures
diff --git a/trackintel/io/postgis.py b/trackintel/io/postgis.py
index 16c59dd..2a9c0d2 100644
--- a/trackintel/io/postgis.py
+++ b/trackintel/io/postgis.py
@@ -620,7 +620,7 @@ __doc = """Stores {long} to PostGIS. Usually, this is directly called on a {long
Examples
--------
>>> {short}.as_{long}.to_postgis(conn_string, table_name)
- >>> ti.io.postgis.write_{long}_postgis(pfs, conn_string, table_name)
+ >>> ti.io.postgis.write_{long}_postgis({short}, conn_string, table_name)
"""
write_positionfixes_postgis.__doc__ = __doc.format(long="positionfixes", short="pfs")
diff --git a/trackintel/preprocessing/triplegs.py b/trackintel/preprocessing/triplegs.py
index a27ad58..12dfb90 100644
--- a/trackintel/preprocessing/triplegs.py
+++ b/trackintel/preprocessing/triplegs.py
@@ -3,7 +3,7 @@ import warnings
import geopandas as gpd
import numpy as np
import pandas as pd
-from shapely.geometry import MultiPoint
+from shapely.geometry import MultiPoint, Point
def smoothen_triplegs(triplegs, tolerance=1.0, preserve_topology=True):
@@ -237,6 +237,19 @@ def generate_trips(spts, tpls, gap_threshold=15, add_geometry=True):
axis=1,
)
+ # now handle the data that is aggregated in the trips
+ # assign trip_id to tpls
+ temp = trips.explode("tpls")
+ temp.index = temp["tpls"]
+ temp = temp[temp["tpls"].notna()]
+ tpls = tpls.join(temp["trip_id"], how="left")
+
+ # assign trip_id to spts, for non-activity spts
+ temp = trips.explode("spts")
+ temp.index = temp["spts"]
+ temp = temp[temp["spts"].notna()]
+ spts = spts.join(temp["trip_id"], how="left")
+
# fill missing points and convert to MultiPoint
# for all trips with missing 'origin_staypoint_id' we now assign the startpoint of the first tripleg of the trip.
# for all tripls with missing 'destination_staypoint_id' we now assign the endpoint of the last tripleg of the trip.
@@ -245,13 +258,13 @@ def generate_trips(spts, tpls, gap_threshold=15, add_geometry=True):
origin_nan_rows = trips[pd.isna(trips["origin_staypoint_id"])].copy()
trips.loc[pd.isna(trips["origin_staypoint_id"]), "origin_geom"] = origin_nan_rows.tpls.map(
# from tpls table, get the first point of the first tripleg for the trip
- lambda x: tpls.loc[x[0], tpls.geometry.name].boundary[0]
+ lambda x: Point(tpls.loc[x[0], tpls.geometry.name].coords[0])
)
# fill geometry for destionations staypoints that are NaN
destination_nan_rows = trips[pd.isna(trips["destination_staypoint_id"])].copy()
trips.loc[pd.isna(trips["destination_staypoint_id"]), "destination_geom"] = destination_nan_rows.tpls.map(
# from tpls table, get the last point of the last tripleg on the trip
- lambda x: tpls.loc[x[-1], tpls.geometry.name].boundary[1]
+ lambda x: Point(tpls.loc[x[-1], tpls.geometry.name].coords[-1])
)
# convert to GeoDataFrame with MultiPoint column
trips["geom"] = [MultiPoint([x, y]) for x, y in zip(trips.origin_geom, trips.destination_geom)]
@@ -259,19 +272,6 @@ def generate_trips(spts, tpls, gap_threshold=15, add_geometry=True):
# cleanup
trips.drop(["origin_geom", "destination_geom"], inplace=True, axis=1)
- # now handle the data that is aggregated in the trips
- # assign trip_id to tpls
- temp = trips.explode("tpls")
- temp.index = temp["tpls"]
- temp = temp[temp["tpls"].notna()]
- tpls = tpls.join(temp["trip_id"], how="left")
-
- # assign trip_id to spts, for non-activity spts
- temp = trips.explode("spts")
- temp.index = temp["spts"]
- temp = temp[temp["spts"].notna()]
- spts = spts.join(temp["trip_id"], how="left")
-
# final cleaning
tpls.drop(columns=["type"], inplace=True)
spts.drop(columns=["type"], inplace=True)
|
mie-lab/trackintel
|
3ba405ba80a6da491bee13ed25aa416aa02e3428
|
diff --git a/tests/preprocessing/test_triplegs.py b/tests/preprocessing/test_triplegs.py
index 84f22b9..5ac1537 100644
--- a/tests/preprocessing/test_triplegs.py
+++ b/tests/preprocessing/test_triplegs.py
@@ -14,6 +14,29 @@ import trackintel as ti
from trackintel.preprocessing.triplegs import generate_trips
[email protected]
+def example_triplegs():
+ """Generate input data for trip generation from geolife positionfixes"""
+ pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
+ pfs, stps = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
+ stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
+ pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+ return stps, tpls
+
+
[email protected]
+def example_triplegs_higher_gap_threshold():
+ """Generate input data for trip generation, but with a higher gap threshold in stp generation"""
+ # create trips from geolife (based on positionfixes)
+ pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
+ pfs, stps = pfs.as_positionfixes.generate_staypoints(
+ method="sliding", dist_threshold=25, time_threshold=5, gap_threshold=1e6
+ )
+ stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
+ pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+ return stps, tpls
+
+
class TestSmoothen_triplegs:
def test_smoothen_triplegs(self):
tpls_file = os.path.join("tests", "data", "triplegs_with_too_many_points_test.csv")
@@ -35,13 +58,10 @@ class TestSmoothen_triplegs:
class TestGenerate_trips:
"""Tests for generate_trips() method."""
- def test_duplicate_columns(self):
+ def test_duplicate_columns(self, example_triplegs):
"""Test if running the function twice, the generated column does not yield exception in join statement"""
# create trips from geolife (based on positionfixes)
- pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
- pfs, stps = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
- stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
- pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+ stps, tpls = example_triplegs
# generate trips and a joint staypoint/triplegs dataframe
stps_run_1, tpls_run_1, _ = generate_trips(stps, tpls, gap_threshold=15)
@@ -51,18 +71,13 @@ class TestGenerate_trips:
assert set(tpls_run_1.columns) == set(tpls_run_2.columns)
assert set(stps_run_1.columns) == set(stps_run_2.columns)
- def test_generate_trips(self):
+ def test_generate_trips(self, example_triplegs_higher_gap_threshold):
"""Test if we can generate the example trips based on example data."""
# load pregenerated trips
trips_loaded = ti.read_trips_csv(os.path.join("tests", "data", "geolife_long", "trips.csv"), index_col="id")
- # create trips from geolife (based on positionfixes)
- pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
- pfs, stps = pfs.as_positionfixes.generate_staypoints(
- method="sliding", dist_threshold=25, time_threshold=5, gap_threshold=1e6
- )
- stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
- pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+ # create trips from geolife (based on positionfixes) - with gap_threshold 1e6
+ stps, tpls = example_triplegs_higher_gap_threshold
# generate trips and a joint staypoint/triplegs dataframe
stps, tpls, trips = generate_trips(stps, tpls, gap_threshold=15)
@@ -72,15 +87,10 @@ class TestGenerate_trips:
# test if generated trips are equal
assert_geodataframe_equal(trips_loaded, trips)
- def test_trip_wo_geom(self):
+ def test_trip_wo_geom(self, example_triplegs_higher_gap_threshold):
"""Test if the add_geometry parameter shows correct behavior"""
- # create trips from geolife (based on positionfixes)
- pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
- pfs, stps = pfs.as_positionfixes.generate_staypoints(
- method="sliding", dist_threshold=25, time_threshold=5, gap_threshold=1e6
- )
- stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
- pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+ # create trips from geolife (based on positionfixes) - with gap_threshold 1e6
+ stps, tpls = example_triplegs_higher_gap_threshold
# generate trips dataframe with geometry
_, _, trips = generate_trips(stps, tpls, gap_threshold=15)
@@ -92,15 +102,10 @@ class TestGenerate_trips:
# test if generated trips are equal
assert_frame_equal(trips_wo_geom, trips)
- def test_trip_coordinates(self):
+ def test_trip_coordinates(self, example_triplegs_higher_gap_threshold):
"""Test if coordinates of start and destination are correct"""
- # create trips from geolife (based on positionfixes)
- pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
- pfs, stps = pfs.as_positionfixes.generate_staypoints(
- method="sliding", dist_threshold=25, time_threshold=5, gap_threshold=1e6
- )
- stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
- pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+ # create trips from geolife (based on positionfixes) - with gap_threshold 1e6
+ stps, tpls = example_triplegs_higher_gap_threshold
# generate trips and a joint staypoint/triplegs dataframe
stps, tpls, trips = ti.preprocessing.triplegs.generate_trips(stps, tpls, gap_threshold=15)
@@ -133,16 +138,11 @@ class TestGenerate_trips:
assert correct_dest_point == dest_point_trips
- def test_accessor(self):
+ def test_accessor(self, example_triplegs):
"""Test if the accessor leads to the same results as the explicit function."""
- # load pregenerated trips
- trips_loaded = ti.read_trips_csv(os.path.join("tests", "data", "geolife_long", "trips.csv"), index_col="id")
- # prepare data
- pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
- pfs, stps = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
- stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
- pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+ # get geolife test data (based on positionfixes)
+ stps, tpls = example_triplegs
# generate trips using the explicit function import
stps_expl, tpls_expl, trips_expl = ti.preprocessing.triplegs.generate_trips(stps, tpls, gap_threshold=15)
@@ -155,16 +155,11 @@ class TestGenerate_trips:
assert_geodataframe_equal(stps_expl, stps_acc)
assert_geodataframe_equal(tpls_expl, tpls_acc)
- def test_accessor_arguments(self):
+ def test_accessor_arguments(self, example_triplegs):
"""Test if the accessor is robust to different ways to receive arguments"""
- # load pregenerated trips
- trips_loaded = ti.read_trips_csv(os.path.join("tests", "data", "geolife_long", "trips.csv"), index_col="id")
- # prepare data
- pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
- pfs, spts = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
- spts = spts.as_staypoints.create_activity_flag(time_threshold=15)
- pfs, tpls = pfs.as_positionfixes.generate_triplegs(spts)
+ # get geolife test data (based on positionfixes)
+ spts, tpls = example_triplegs
# accessor with only arguments (not allowed)
with pytest.raises(AssertionError):
@@ -181,13 +176,10 @@ class TestGenerate_trips:
assert_geodataframe_equal(tpls_1, tpls_2)
assert_geodataframe_equal(trips_1, trips_2)
- def test_generate_trips_missing_link(self):
+ def test_generate_trips_missing_link(self, example_triplegs):
"""Test nan is assigned for missing link between stps and trips, and tpls and trips."""
- # create trips from geolife (based on positionfixes)
- pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
- pfs, stps = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
- stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
- pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+ # get geolife test data (based on positionfixes)
+ stps, tpls = example_triplegs
# generate trips and a joint staypoint/triplegs dataframe
stps, tpls, _ = generate_trips(stps, tpls, gap_threshold=15)
@@ -195,13 +187,10 @@ class TestGenerate_trips:
assert pd.isna(stps["prev_trip_id"]).any()
assert pd.isna(stps["next_trip_id"]).any()
- def test_generate_trips_dtype_consistent(self):
+ def test_generate_trips_dtype_consistent(self, example_triplegs):
"""Test the dtypes for the generated columns."""
- # create trips from geolife (based on positionfixes)
- pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
- pfs, stps = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
- stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
- pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+ # get geolife test data (based on positionfixes)
+ stps, tpls = example_triplegs
# generate trips and a joint staypoint/triplegs dataframe
stps, tpls, trips = generate_trips(stps, tpls, gap_threshold=15)
@@ -214,15 +203,12 @@ class TestGenerate_trips:
assert stps["next_trip_id"].dtype == "Int64"
assert tpls["trip_id"].dtype == "Int64"
- def test_compare_to_old_trip_function(self):
+ def test_compare_to_old_trip_function(self, example_triplegs):
"""Test if we can generate the example trips based on example data."""
# load pregenerated trips
- # create trips from geolife (based on positionfixes)
- pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
- pfs, stps = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
- stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
- pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+ # get geolife test data (based on positionfixes)
+ stps, tpls = example_triplegs
# generate trips and a joint staypoint/triplegs dataframe
stps, tpls, trips = generate_trips(stps, tpls, gap_threshold=15)
@@ -235,12 +221,10 @@ class TestGenerate_trips:
assert_frame_equal(stps, stps_, check_like=True, check_index_type=False)
assert_frame_equal(tpls, tpls_, check_like=True, check_index_type=False)
- def test_generate_trips_index_start(self):
+ def test_generate_trips_index_start(self, example_triplegs):
"""Test the generated index start from 0 for different methods."""
- pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
- pfs, stps = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
- stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
- pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+ # get geolife test data (based on positionfixes)
+ stps, tpls = example_triplegs
# generate trips and a joint staypoint/triplegs dataframe
_, _, trips = generate_trips(stps, tpls, gap_threshold=15)
@@ -307,20 +291,14 @@ class TestGenerate_trips:
# test if generated staypoints/triplegs are equal (especially important for trip ids)
assert_frame_equal(stps_tpls_loaded, stps_tpls, check_dtype=False)
- def test_generate_trips_id_management(self):
+ def test_generate_trips_id_management(self, example_triplegs_higher_gap_threshold):
"""Test if we can generate the example trips based on example data."""
stps_tpls_loaded = pd.read_csv(os.path.join("tests", "data", "geolife_long", "stps_tpls.csv"), index_col="id")
stps_tpls_loaded["started_at"] = pd.to_datetime(stps_tpls_loaded["started_at"])
stps_tpls_loaded["started_at_next"] = pd.to_datetime(stps_tpls_loaded["started_at_next"])
stps_tpls_loaded["finished_at"] = pd.to_datetime(stps_tpls_loaded["finished_at"])
- # create trips from geolife (based on positionfixes)
- pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
- pfs, stps = pfs.as_positionfixes.generate_staypoints(
- method="sliding", dist_threshold=25, time_threshold=5, gap_threshold=1e6
- )
- stps = stps.as_staypoints.create_activity_flag(time_threshold=15)
- pfs, tpls = pfs.as_positionfixes.generate_triplegs(stps)
+ stps, tpls = example_triplegs_higher_gap_threshold
# generate trips and a joint staypoint/triplegs dataframe
gap_threshold = 15
@@ -356,6 +334,20 @@ class TestGenerate_trips:
assert (tpls_["trip_id"] == 0).all()
assert len(trips) == 1
+ def test_loop_linestring_case(self, example_triplegs):
+ """Test corner case where a tripleg starts and ends at the same point"""
+ # input data: preprocessed stps and tpls
+ stps, tpls = example_triplegs
+
+ # add a tripleg with same start as end, by modifying the first tripleg
+ tpls.loc[0, "geom"] = LineString([(0, 0), (1, 1), (0, 0)])
+
+ # generate trips and a joint staypoint/triplegs dataframe
+ stps, tpls, trips = ti.preprocessing.triplegs.generate_trips(stps, tpls, gap_threshold=15)
+
+ # test if start of first trip is (0,0)
+ assert trips.loc[0, "geom"][0] == Point(0, 0)
+
def _create_debug_stps_tpls_data(stps, tpls, gap_threshold):
"""Preprocess stps and tpls for "test_generate_trips_*."""
|
trips with geometries cause error in id assignment
The problem is that geopandas overrides the explode method of pandas (see issue [here](https://github.com/geopandas/geopandas/issues/2061)). This causes a problem [here](https://github.com/geopandas/geopandas/issues/2061) and [here](https://github.com/mie-lab/trackintel/blob/master/trackintel/preprocessing/triplegs.py#L270) as we want to access the original pandas explode method.
I think the simplest fix would be to call the dataframe constructor before using explode. It is not too expensive and then we don't have to wait until the geopandas issue is fixed and we are not limited to only new geopandas versions.
E.g.:
`temp = pd.DataFrame(trips).explode("tpls")`
`temp = pd.DataFrame(trips).explode("spts")`
|
0.0
|
3ba405ba80a6da491bee13ed25aa416aa02e3428
|
[
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_duplicate_columns",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_trip_wo_geom",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_accessor",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_accessor_arguments",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_missing_link",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_dtype_consistent",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_compare_to_old_trip_function",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_index_start"
] |
[
"tests/preprocessing/test_triplegs.py::TestSmoothen_triplegs::test_smoothen_triplegs",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_only_staypoints_in_trip"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-08-18 16:24:13+00:00
|
mit
| 3,922 |
|
mie-lab__trackintel-344
|
diff --git a/docs/modules/preprocessing.rst b/docs/modules/preprocessing.rst
index cbf76da..707b33c 100644
--- a/docs/modules/preprocessing.rst
+++ b/docs/modules/preprocessing.rst
@@ -31,6 +31,11 @@ frequently visits and/or infer if they correspond to activities.
.. autofunction:: trackintel.preprocessing.staypoints.generate_locations
+Due to tracking artifacts, it can occur that one activity is split into several staypoints.
+We can aggregate the staypoints horizontally that are close in time and at the same location.
+
+.. autofunction:: trackintel.preprocessing.staypoints.merge_staypoints
+
Triplegs
========
diff --git a/trackintel/io/file.py b/trackintel/io/file.py
index f444527..879b322 100644
--- a/trackintel/io/file.py
+++ b/trackintel/io/file.py
@@ -1,15 +1,11 @@
import warnings
-from geopandas.geodataframe import GeoDataFrame
-import numpy as np
-import dateutil
-import dateutil.parser
import geopandas as gpd
+import numpy as np
import pandas as pd
import pytz
-import warnings
+from geopandas.geodataframe import GeoDataFrame
from shapely import wkt
-from shapely import geometry
from shapely.geometry import Point
@@ -24,6 +20,9 @@ def read_positionfixes_csv(*args, columns=None, tz=None, index_col=object(), crs
Parameters
----------
+ args
+ Arguments as passed to pd.read_csv().
+
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
The required columns for this function include: "user_id", "tracked_at", "latitude"
@@ -41,6 +40,9 @@ def read_positionfixes_csv(*args, columns=None, tz=None, index_col=object(), crs
by pyproj.CRS.from_user_input(), such as an authority string
(eg 'EPSG:4326') or a WKT string.
+ kwargs
+ Additional keyword arguments passed to pd.read_csv().
+
Returns
-------
pfs : GeoDataFrame (as trackintel positionfixes)
@@ -116,10 +118,20 @@ def write_positionfixes_csv(positionfixes, filename, *args, **kwargs):
filename : str
The file to write to.
+ args
+ Additional arguments passed to pd.DataFrame.to_csv().
+
+ kwargs
+ Additional keyword arguments passed to pd.DataFrame.to_csv().
+
Notes
-----
"longitude" and "latitude" is extracted from the geometry column and the orignal
geometry column is dropped.
+
+ Examples
+ ---------
+ >>> ps.as_positionfixes.to_csv("export_pfs.csv")
"""
gdf = positionfixes.copy()
gdf["longitude"] = positionfixes.geometry.apply(lambda p: p.coords[0][0])
@@ -139,6 +151,9 @@ def read_triplegs_csv(*args, columns=None, tz=None, index_col=object(), crs=None
Parameters
----------
+ args
+ Arguments as passed to pd.read_csv().
+
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
The required columns for this function include: "user_id", "started_at", "finished_at"
@@ -156,6 +171,9 @@ def read_triplegs_csv(*args, columns=None, tz=None, index_col=object(), crs=None
by pyproj.CRS.from_user_input(), such as an authority string
(eg “EPSG:4326”) or a WKT string.
+ kwargs
+ Additional keyword arguments passed to pd.read_csv().
+
Returns
-------
tpls : GeoDataFrame (as trackintel triplegs)
@@ -209,7 +227,7 @@ def write_triplegs_csv(triplegs, filename, *args, **kwargs):
"""
Write triplegs to csv file.
- Wraps the pandas to_csv function, but transforms the geom into WKT
+ Wraps the pandas to_csv function, but transforms the geometry into WKT
before writing.
Parameters
@@ -219,6 +237,16 @@ def write_triplegs_csv(triplegs, filename, *args, **kwargs):
filename : str
The file to write to.
+
+ args
+ Additional arguments passed to pd.DataFrame.to_csv().
+
+ kwargs
+ Additional keyword arguments passed to pd.DataFrame.to_csv().
+
+ Examples
+ --------
+ >>> tpls.as_triplegs.to_csv("export_tpls.csv")
"""
geo_col_name = triplegs.geometry.name
df = pd.DataFrame(triplegs, copy=True)
@@ -237,6 +265,9 @@ def read_staypoints_csv(*args, columns=None, tz=None, index_col=object(), crs=No
Parameters
----------
+ args
+ Arguments as passed to pd.read_csv().
+
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
The required columns for this function include: "user_id", "started_at", "finished_at"
@@ -254,6 +285,9 @@ def read_staypoints_csv(*args, columns=None, tz=None, index_col=object(), crs=No
by pyproj.CRS.from_user_input(), such as an authority string
(eg “EPSG:4326”) or a WKT string.
+ kwargs
+ Additional keyword arguments passed to pd.read_csv().
+
Returns
-------
sp : GeoDataFrame (as trackintel staypoints)
@@ -307,7 +341,7 @@ def write_staypoints_csv(staypoints, filename, *args, **kwargs):
"""
Write staypoints to csv file.
- Wraps the pandas to_csv function, but transforms the geom into WKT
+ Wraps the pandas to_csv function, but transforms the geometry into WKT
before writing.
Parameters
@@ -317,6 +351,16 @@ def write_staypoints_csv(staypoints, filename, *args, **kwargs):
filename : str
The file to write to.
+
+ args
+ Additional arguments passed to pd.DataFrame.to_csv().
+
+ kwargs
+ Additional keyword arguments passed to pd.DataFrame.to_csv().
+
+ Examples
+ --------
+ >>> tpls.as_triplegs.to_csv("export_tpls.csv")
"""
geo_col_name = staypoints.geometry.name
df = pd.DataFrame(staypoints, copy=True)
@@ -335,6 +379,9 @@ def read_locations_csv(*args, columns=None, index_col=object(), crs=None, **kwar
Parameters
----------
+ args
+ Arguments as passed to pd.read_csv().
+
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
The required columns for this function include: "user_id" and "center".
@@ -348,6 +395,9 @@ def read_locations_csv(*args, columns=None, index_col=object(), crs=None, **kwar
by pyproj.CRS.from_user_input(), such as an authority string
(eg “EPSG:4326”) or a WKT string.
+ kwargs
+ Additional keyword arguments passed to pd.read_csv().
+
Returns
-------
locs : GeoDataFrame (as trackintel locations)
@@ -404,6 +454,16 @@ def write_locations_csv(locations, filename, *args, **kwargs):
filename : str
The file to write to.
+
+ args
+ Additional arguments passed to pd.DataFrame.to_csv().
+
+ kwargs
+ Additional keyword arguments passed to pd.DataFrame.to_csv().
+
+ Examples
+ --------
+ >>> locs.as_locations.to_csv("export_locs.csv")
"""
df = pd.DataFrame(locations, copy=True)
df["center"] = locations["center"].apply(wkt.dumps)
@@ -422,6 +482,9 @@ def read_trips_csv(*args, columns=None, tz=None, index_col=object(), **kwargs):
Parameters
----------
+ args
+ Arguments as passed to pd.read_csv().
+
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
The required columns for this function include: "user_id", "started_at",
@@ -435,6 +498,9 @@ def read_trips_csv(*args, columns=None, tz=None, index_col=object(), **kwargs):
column name to be used as index. If None the default index is assumed
as unique identifier.
+ kwargs
+ Additional keyword arguments passed to pd.read_csv().
+
Returns
-------
trips : (Geo)DataFrame (as trackintel trips)
@@ -494,6 +560,7 @@ def write_trips_csv(trips, filename, *args, **kwargs):
Write trips to csv file.
Wraps the pandas to_csv function.
+ Geometry get transformed to WKT before writing.
Parameters
----------
@@ -502,6 +569,16 @@ def write_trips_csv(trips, filename, *args, **kwargs):
filename : str
The file to write to.
+
+ args
+ Additional arguments passed to pd.DataFrame.to_csv().
+
+ kwargs
+ Additional keyword arguments passed to pd.DataFrame.to_csv().
+
+ Examples
+ --------
+ >>> trips.as_trips.to_csv("export_trips.csv")
"""
df = trips.copy()
if isinstance(df, GeoDataFrame):
@@ -520,16 +597,26 @@ def read_tours_csv(*args, columns=None, index_col=object(), tz=None, **kwargs):
Parameters
----------
+ args
+ Arguments as passed to pd.read_csv().
+
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
tz : str, optional
pytz compatible timezone string. If None UTC is assumed.
+ kwargs
+ Additional keyword arguments passed to pd.read_csv().
+
Returns
-------
tours : DataFrame (as trackintel tours)
A DataFrame containing the tours.
+
+ Examples
+ --------
+ >>> trackintel.read_tours_csv('data.csv', columns={'uuid':'user_id'})
"""
columns = {} if columns is None else columns
@@ -571,6 +658,16 @@ def write_tours_csv(tours, filename, *args, **kwargs):
filename : str
The file to write to.
+
+ args
+ Additional arguments passed to pd.DataFrame.to_csv().
+
+ kwargs
+ Additional keyword arguments passed to pd.DataFrame.to_csv().
+
+ Examples
+ --------
+ >>> tours.as_tours.to_csv("export_tours.csv")
"""
tours.to_csv(filename, index=True, *args, **kwargs)
diff --git a/trackintel/preprocessing/staypoints.py b/trackintel/preprocessing/staypoints.py
index 679c565..ed8837e 100644
--- a/trackintel/preprocessing/staypoints.py
+++ b/trackintel/preprocessing/staypoints.py
@@ -233,7 +233,8 @@ def merge_staypoints(staypoints, triplegs, max_time_gap="10min", agg={}):
Parameters
----------
staypoints : GeoDataFrame (as trackintel staypoints)
- The staypoints have to follow the standard definition for staypoints DataFrames.
+ The staypoints must contain a column `location_id` (see `generate_locations` function) and have to follow the
+ standard trackintel definition for staypoints DataFrames.
triplegs: GeoDataFrame (as trackintel triplegs)
The triplegs have to follow the standard definition for triplegs DataFrames.
@@ -277,6 +278,7 @@ def merge_staypoints(staypoints, triplegs, max_time_gap="10min", agg={}):
# otherwise check if it's a Timedelta already, and raise error if not
elif not isinstance(max_time_gap, pd.Timedelta):
raise TypeError("Parameter max_time_gap must be either of type String or pd.Timedelta!")
+ assert "location_id" in staypoints.columns, "Staypoints must contain column location_id"
sp_merge = staypoints.copy()
index_name = staypoints.index.name
@@ -285,6 +287,9 @@ def merge_staypoints(staypoints, triplegs, max_time_gap="10min", agg={}):
tpls_merge = triplegs.copy()
tpls_merge["type"] = "tripleg"
sp_merge["type"] = "staypoint"
+ # convert datatypes in order to preserve the datatypes (especially ints) despite of NaNs during concat
+ sp_merge = sp_merge.convert_dtypes()
+
# a joined dataframe sp_tpls is constructed to add the columns 'type' and 'next_type' to the 'sp_merge' table
# concat and sort by time
sp_tpls = pd.concat([sp_merge, tpls_merge]).sort_values(by=["user_id", "started_at"])
@@ -327,7 +332,13 @@ def merge_staypoints(staypoints, triplegs, max_time_gap="10min", agg={}):
cond_old = cond.copy()
# Staypoint-required columnsare aggregated in the following manner:
- agg_dict = {index_name: "first", "user_id": "first", "started_at": "first", "finished_at": "last"}
+ agg_dict = {
+ index_name: "first",
+ "user_id": "first",
+ "started_at": "first",
+ "finished_at": "last",
+ "location_id": "first",
+ }
# User-defined further aggregation
agg_dict.update(agg)
|
mie-lab/trackintel
|
5cd2b9a492984969dd6c8d8a6ca6dbd4b31e45ad
|
diff --git a/tests/preprocessing/test_staypoints.py b/tests/preprocessing/test_staypoints.py
index 79a8745..10c4425 100644
--- a/tests/preprocessing/test_staypoints.py
+++ b/tests/preprocessing/test_staypoints.py
@@ -399,7 +399,8 @@ class TestMergeStaypoints:
"""Test staypoint merging."""
sp, tpls = example_staypoints_merge
# first test with empty tpls
- merged_sp = sp.as_staypoints.merge_staypoints(tpls, agg={"geom": "first", "location_id": "first"})
+ merged_sp = sp.as_staypoints.merge_staypoints(tpls, agg={"geom": "first"})
+ merged_sp = merged_sp.reindex(columns=sp.columns)
assert len(merged_sp) == len(sp) - 3
# some staypoints stay the same (not merged)
assert (merged_sp.loc[1] == sp.loc[1]).all()
@@ -458,3 +459,11 @@ class TestMergeStaypoints:
# the geom should correspond to the first one
assert sp.loc[7, "geom"] == merged_sp.loc[7, "geom"]
assert sp.loc[2, "geom"] == merged_sp.loc[2, "geom"]
+
+ def test_merge_staypoints_error(self, example_staypoints_merge):
+ sp, tpls = example_staypoints_merge
+ sp.drop(columns=["location_id"], inplace=True)
+ with pytest.raises(AssertionError) as excinfo:
+ _ = sp.as_staypoints.merge_staypoints(tpls)
+
+ assert "Staypoints must contain column location_id" in str(excinfo.value)
|
Update docstrings in io.file
All functions in io.file use `*args` and `**kwargs`, but provide no documentation for them, the usage must be looked up in the code itself. They should be included in the docstrings :)
Examples are missing for write functions (see #335).
|
0.0
|
5cd2b9a492984969dd6c8d8a6ca6dbd4b31e45ad
|
[
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_error"
] |
[
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_empty_generation",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_parallel_computing",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_missing_link",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_index_stability",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_location_ids_and_noise",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_time",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_max_time_gap",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_time_gap_error",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_agg"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-09-15 15:01:22+00:00
|
mit
| 3,923 |
|
mie-lab__trackintel-345
|
diff --git a/docs/modules/preprocessing.rst b/docs/modules/preprocessing.rst
index cbf76da..707b33c 100644
--- a/docs/modules/preprocessing.rst
+++ b/docs/modules/preprocessing.rst
@@ -31,6 +31,11 @@ frequently visits and/or infer if they correspond to activities.
.. autofunction:: trackintel.preprocessing.staypoints.generate_locations
+Due to tracking artifacts, it can occur that one activity is split into several staypoints.
+We can aggregate the staypoints horizontally that are close in time and at the same location.
+
+.. autofunction:: trackintel.preprocessing.staypoints.merge_staypoints
+
Triplegs
========
diff --git a/trackintel/geogr/distances.py b/trackintel/geogr/distances.py
index 58ee163..2c428a7 100644
--- a/trackintel/geogr/distances.py
+++ b/trackintel/geogr/distances.py
@@ -55,6 +55,10 @@ def calculate_distance_matrix(X, Y=None, dist_metric="haversine", n_jobs=0, **kw
D: np.array
matrix of shape (len(X), len(X)) or of shape (len(X), len(Y)) if Y is provided.
+ Examples
+ --------
+ >>> calculate_distance_matrix(staypoints, dist_metric="haversine")
+ >>> calculate_distance_matrix(triplegs_1, triplegs_2, dist_metric="dtw")
"""
geom_type = X.geometry.iat[0].geom_type
if Y is None:
@@ -179,6 +183,10 @@ def meters_to_decimal_degrees(meters, latitude):
-------
float
An approximation of a distance (given in meters) in degrees.
+
+ Examples
+ --------
+ >>> meters_to_decimal_degrees(500.0, 47.410)
"""
return meters / (111.32 * 1000.0 * cos(latitude * (pi / 180.0)))
@@ -205,7 +213,6 @@ def check_gdf_planar(gdf, transform=False):
gdf : GeoDataFrame
if transform is True, return the re-projected gdf.
-
Examples
--------
>>> from trackintel.geogr.distances import check_gdf_planar
diff --git a/trackintel/io/file.py b/trackintel/io/file.py
index f444527..879b322 100644
--- a/trackintel/io/file.py
+++ b/trackintel/io/file.py
@@ -1,15 +1,11 @@
import warnings
-from geopandas.geodataframe import GeoDataFrame
-import numpy as np
-import dateutil
-import dateutil.parser
import geopandas as gpd
+import numpy as np
import pandas as pd
import pytz
-import warnings
+from geopandas.geodataframe import GeoDataFrame
from shapely import wkt
-from shapely import geometry
from shapely.geometry import Point
@@ -24,6 +20,9 @@ def read_positionfixes_csv(*args, columns=None, tz=None, index_col=object(), crs
Parameters
----------
+ args
+ Arguments as passed to pd.read_csv().
+
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
The required columns for this function include: "user_id", "tracked_at", "latitude"
@@ -41,6 +40,9 @@ def read_positionfixes_csv(*args, columns=None, tz=None, index_col=object(), crs
by pyproj.CRS.from_user_input(), such as an authority string
(eg 'EPSG:4326') or a WKT string.
+ kwargs
+ Additional keyword arguments passed to pd.read_csv().
+
Returns
-------
pfs : GeoDataFrame (as trackintel positionfixes)
@@ -116,10 +118,20 @@ def write_positionfixes_csv(positionfixes, filename, *args, **kwargs):
filename : str
The file to write to.
+ args
+ Additional arguments passed to pd.DataFrame.to_csv().
+
+ kwargs
+ Additional keyword arguments passed to pd.DataFrame.to_csv().
+
Notes
-----
"longitude" and "latitude" is extracted from the geometry column and the orignal
geometry column is dropped.
+
+ Examples
+ ---------
+ >>> ps.as_positionfixes.to_csv("export_pfs.csv")
"""
gdf = positionfixes.copy()
gdf["longitude"] = positionfixes.geometry.apply(lambda p: p.coords[0][0])
@@ -139,6 +151,9 @@ def read_triplegs_csv(*args, columns=None, tz=None, index_col=object(), crs=None
Parameters
----------
+ args
+ Arguments as passed to pd.read_csv().
+
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
The required columns for this function include: "user_id", "started_at", "finished_at"
@@ -156,6 +171,9 @@ def read_triplegs_csv(*args, columns=None, tz=None, index_col=object(), crs=None
by pyproj.CRS.from_user_input(), such as an authority string
(eg “EPSG:4326”) or a WKT string.
+ kwargs
+ Additional keyword arguments passed to pd.read_csv().
+
Returns
-------
tpls : GeoDataFrame (as trackintel triplegs)
@@ -209,7 +227,7 @@ def write_triplegs_csv(triplegs, filename, *args, **kwargs):
"""
Write triplegs to csv file.
- Wraps the pandas to_csv function, but transforms the geom into WKT
+ Wraps the pandas to_csv function, but transforms the geometry into WKT
before writing.
Parameters
@@ -219,6 +237,16 @@ def write_triplegs_csv(triplegs, filename, *args, **kwargs):
filename : str
The file to write to.
+
+ args
+ Additional arguments passed to pd.DataFrame.to_csv().
+
+ kwargs
+ Additional keyword arguments passed to pd.DataFrame.to_csv().
+
+ Examples
+ --------
+ >>> tpls.as_triplegs.to_csv("export_tpls.csv")
"""
geo_col_name = triplegs.geometry.name
df = pd.DataFrame(triplegs, copy=True)
@@ -237,6 +265,9 @@ def read_staypoints_csv(*args, columns=None, tz=None, index_col=object(), crs=No
Parameters
----------
+ args
+ Arguments as passed to pd.read_csv().
+
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
The required columns for this function include: "user_id", "started_at", "finished_at"
@@ -254,6 +285,9 @@ def read_staypoints_csv(*args, columns=None, tz=None, index_col=object(), crs=No
by pyproj.CRS.from_user_input(), such as an authority string
(eg “EPSG:4326”) or a WKT string.
+ kwargs
+ Additional keyword arguments passed to pd.read_csv().
+
Returns
-------
sp : GeoDataFrame (as trackintel staypoints)
@@ -307,7 +341,7 @@ def write_staypoints_csv(staypoints, filename, *args, **kwargs):
"""
Write staypoints to csv file.
- Wraps the pandas to_csv function, but transforms the geom into WKT
+ Wraps the pandas to_csv function, but transforms the geometry into WKT
before writing.
Parameters
@@ -317,6 +351,16 @@ def write_staypoints_csv(staypoints, filename, *args, **kwargs):
filename : str
The file to write to.
+
+ args
+ Additional arguments passed to pd.DataFrame.to_csv().
+
+ kwargs
+ Additional keyword arguments passed to pd.DataFrame.to_csv().
+
+ Examples
+ --------
+ >>> tpls.as_triplegs.to_csv("export_tpls.csv")
"""
geo_col_name = staypoints.geometry.name
df = pd.DataFrame(staypoints, copy=True)
@@ -335,6 +379,9 @@ def read_locations_csv(*args, columns=None, index_col=object(), crs=None, **kwar
Parameters
----------
+ args
+ Arguments as passed to pd.read_csv().
+
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
The required columns for this function include: "user_id" and "center".
@@ -348,6 +395,9 @@ def read_locations_csv(*args, columns=None, index_col=object(), crs=None, **kwar
by pyproj.CRS.from_user_input(), such as an authority string
(eg “EPSG:4326”) or a WKT string.
+ kwargs
+ Additional keyword arguments passed to pd.read_csv().
+
Returns
-------
locs : GeoDataFrame (as trackintel locations)
@@ -404,6 +454,16 @@ def write_locations_csv(locations, filename, *args, **kwargs):
filename : str
The file to write to.
+
+ args
+ Additional arguments passed to pd.DataFrame.to_csv().
+
+ kwargs
+ Additional keyword arguments passed to pd.DataFrame.to_csv().
+
+ Examples
+ --------
+ >>> locs.as_locations.to_csv("export_locs.csv")
"""
df = pd.DataFrame(locations, copy=True)
df["center"] = locations["center"].apply(wkt.dumps)
@@ -422,6 +482,9 @@ def read_trips_csv(*args, columns=None, tz=None, index_col=object(), **kwargs):
Parameters
----------
+ args
+ Arguments as passed to pd.read_csv().
+
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
The required columns for this function include: "user_id", "started_at",
@@ -435,6 +498,9 @@ def read_trips_csv(*args, columns=None, tz=None, index_col=object(), **kwargs):
column name to be used as index. If None the default index is assumed
as unique identifier.
+ kwargs
+ Additional keyword arguments passed to pd.read_csv().
+
Returns
-------
trips : (Geo)DataFrame (as trackintel trips)
@@ -494,6 +560,7 @@ def write_trips_csv(trips, filename, *args, **kwargs):
Write trips to csv file.
Wraps the pandas to_csv function.
+ Geometry get transformed to WKT before writing.
Parameters
----------
@@ -502,6 +569,16 @@ def write_trips_csv(trips, filename, *args, **kwargs):
filename : str
The file to write to.
+
+ args
+ Additional arguments passed to pd.DataFrame.to_csv().
+
+ kwargs
+ Additional keyword arguments passed to pd.DataFrame.to_csv().
+
+ Examples
+ --------
+ >>> trips.as_trips.to_csv("export_trips.csv")
"""
df = trips.copy()
if isinstance(df, GeoDataFrame):
@@ -520,16 +597,26 @@ def read_tours_csv(*args, columns=None, index_col=object(), tz=None, **kwargs):
Parameters
----------
+ args
+ Arguments as passed to pd.read_csv().
+
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
tz : str, optional
pytz compatible timezone string. If None UTC is assumed.
+ kwargs
+ Additional keyword arguments passed to pd.read_csv().
+
Returns
-------
tours : DataFrame (as trackintel tours)
A DataFrame containing the tours.
+
+ Examples
+ --------
+ >>> trackintel.read_tours_csv('data.csv', columns={'uuid':'user_id'})
"""
columns = {} if columns is None else columns
@@ -571,6 +658,16 @@ def write_tours_csv(tours, filename, *args, **kwargs):
filename : str
The file to write to.
+
+ args
+ Additional arguments passed to pd.DataFrame.to_csv().
+
+ kwargs
+ Additional keyword arguments passed to pd.DataFrame.to_csv().
+
+ Examples
+ --------
+ >>> tours.as_tours.to_csv("export_tours.csv")
"""
tours.to_csv(filename, index=True, *args, **kwargs)
diff --git a/trackintel/preprocessing/staypoints.py b/trackintel/preprocessing/staypoints.py
index 679c565..ed8837e 100644
--- a/trackintel/preprocessing/staypoints.py
+++ b/trackintel/preprocessing/staypoints.py
@@ -233,7 +233,8 @@ def merge_staypoints(staypoints, triplegs, max_time_gap="10min", agg={}):
Parameters
----------
staypoints : GeoDataFrame (as trackintel staypoints)
- The staypoints have to follow the standard definition for staypoints DataFrames.
+ The staypoints must contain a column `location_id` (see `generate_locations` function) and have to follow the
+ standard trackintel definition for staypoints DataFrames.
triplegs: GeoDataFrame (as trackintel triplegs)
The triplegs have to follow the standard definition for triplegs DataFrames.
@@ -277,6 +278,7 @@ def merge_staypoints(staypoints, triplegs, max_time_gap="10min", agg={}):
# otherwise check if it's a Timedelta already, and raise error if not
elif not isinstance(max_time_gap, pd.Timedelta):
raise TypeError("Parameter max_time_gap must be either of type String or pd.Timedelta!")
+ assert "location_id" in staypoints.columns, "Staypoints must contain column location_id"
sp_merge = staypoints.copy()
index_name = staypoints.index.name
@@ -285,6 +287,9 @@ def merge_staypoints(staypoints, triplegs, max_time_gap="10min", agg={}):
tpls_merge = triplegs.copy()
tpls_merge["type"] = "tripleg"
sp_merge["type"] = "staypoint"
+ # convert datatypes in order to preserve the datatypes (especially ints) despite of NaNs during concat
+ sp_merge = sp_merge.convert_dtypes()
+
# a joined dataframe sp_tpls is constructed to add the columns 'type' and 'next_type' to the 'sp_merge' table
# concat and sort by time
sp_tpls = pd.concat([sp_merge, tpls_merge]).sort_values(by=["user_id", "started_at"])
@@ -327,7 +332,13 @@ def merge_staypoints(staypoints, triplegs, max_time_gap="10min", agg={}):
cond_old = cond.copy()
# Staypoint-required columnsare aggregated in the following manner:
- agg_dict = {index_name: "first", "user_id": "first", "started_at": "first", "finished_at": "last"}
+ agg_dict = {
+ index_name: "first",
+ "user_id": "first",
+ "started_at": "first",
+ "finished_at": "last",
+ "location_id": "first",
+ }
# User-defined further aggregation
agg_dict.update(agg)
|
mie-lab/trackintel
|
5cd2b9a492984969dd6c8d8a6ca6dbd4b31e45ad
|
diff --git a/tests/preprocessing/test_staypoints.py b/tests/preprocessing/test_staypoints.py
index 79a8745..10c4425 100644
--- a/tests/preprocessing/test_staypoints.py
+++ b/tests/preprocessing/test_staypoints.py
@@ -399,7 +399,8 @@ class TestMergeStaypoints:
"""Test staypoint merging."""
sp, tpls = example_staypoints_merge
# first test with empty tpls
- merged_sp = sp.as_staypoints.merge_staypoints(tpls, agg={"geom": "first", "location_id": "first"})
+ merged_sp = sp.as_staypoints.merge_staypoints(tpls, agg={"geom": "first"})
+ merged_sp = merged_sp.reindex(columns=sp.columns)
assert len(merged_sp) == len(sp) - 3
# some staypoints stay the same (not merged)
assert (merged_sp.loc[1] == sp.loc[1]).all()
@@ -458,3 +459,11 @@ class TestMergeStaypoints:
# the geom should correspond to the first one
assert sp.loc[7, "geom"] == merged_sp.loc[7, "geom"]
assert sp.loc[2, "geom"] == merged_sp.loc[2, "geom"]
+
+ def test_merge_staypoints_error(self, example_staypoints_merge):
+ sp, tpls = example_staypoints_merge
+ sp.drop(columns=["location_id"], inplace=True)
+ with pytest.raises(AssertionError) as excinfo:
+ _ = sp.as_staypoints.merge_staypoints(tpls)
+
+ assert "Staypoints must contain column location_id" in str(excinfo.value)
|
Examples for functions in geogr.distances
For #86 we should provide examples for all external visible functions.
There are missing examples for
- `calculate_distance_matrix`
- `meters_to_decimal_degrees`
they can be found [here](https://github.com/mie-lab/trackintel/blob/b89f0c8c35cc1adcc858a8bdc60a32d32b50a2b9/trackintel/geogr/distances.py#L15)
|
0.0
|
5cd2b9a492984969dd6c8d8a6ca6dbd4b31e45ad
|
[
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_error"
] |
[
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_empty_generation",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_parallel_computing",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_missing_link",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_index_stability",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_location_ids_and_noise",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_time",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_max_time_gap",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_time_gap_error",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_agg"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-09-15 15:17:43+00:00
|
mit
| 3,924 |
|
mie-lab__trackintel-347
|
diff --git a/docs/modules/preprocessing.rst b/docs/modules/preprocessing.rst
index cbf76da..707b33c 100644
--- a/docs/modules/preprocessing.rst
+++ b/docs/modules/preprocessing.rst
@@ -31,6 +31,11 @@ frequently visits and/or infer if they correspond to activities.
.. autofunction:: trackintel.preprocessing.staypoints.generate_locations
+Due to tracking artifacts, it can occur that one activity is split into several staypoints.
+We can aggregate the staypoints horizontally that are close in time and at the same location.
+
+.. autofunction:: trackintel.preprocessing.staypoints.merge_staypoints
+
Triplegs
========
diff --git a/trackintel/preprocessing/staypoints.py b/trackintel/preprocessing/staypoints.py
index 679c565..ed8837e 100644
--- a/trackintel/preprocessing/staypoints.py
+++ b/trackintel/preprocessing/staypoints.py
@@ -233,7 +233,8 @@ def merge_staypoints(staypoints, triplegs, max_time_gap="10min", agg={}):
Parameters
----------
staypoints : GeoDataFrame (as trackintel staypoints)
- The staypoints have to follow the standard definition for staypoints DataFrames.
+ The staypoints must contain a column `location_id` (see `generate_locations` function) and have to follow the
+ standard trackintel definition for staypoints DataFrames.
triplegs: GeoDataFrame (as trackintel triplegs)
The triplegs have to follow the standard definition for triplegs DataFrames.
@@ -277,6 +278,7 @@ def merge_staypoints(staypoints, triplegs, max_time_gap="10min", agg={}):
# otherwise check if it's a Timedelta already, and raise error if not
elif not isinstance(max_time_gap, pd.Timedelta):
raise TypeError("Parameter max_time_gap must be either of type String or pd.Timedelta!")
+ assert "location_id" in staypoints.columns, "Staypoints must contain column location_id"
sp_merge = staypoints.copy()
index_name = staypoints.index.name
@@ -285,6 +287,9 @@ def merge_staypoints(staypoints, triplegs, max_time_gap="10min", agg={}):
tpls_merge = triplegs.copy()
tpls_merge["type"] = "tripleg"
sp_merge["type"] = "staypoint"
+ # convert datatypes in order to preserve the datatypes (especially ints) despite of NaNs during concat
+ sp_merge = sp_merge.convert_dtypes()
+
# a joined dataframe sp_tpls is constructed to add the columns 'type' and 'next_type' to the 'sp_merge' table
# concat and sort by time
sp_tpls = pd.concat([sp_merge, tpls_merge]).sort_values(by=["user_id", "started_at"])
@@ -327,7 +332,13 @@ def merge_staypoints(staypoints, triplegs, max_time_gap="10min", agg={}):
cond_old = cond.copy()
# Staypoint-required columnsare aggregated in the following manner:
- agg_dict = {index_name: "first", "user_id": "first", "started_at": "first", "finished_at": "last"}
+ agg_dict = {
+ index_name: "first",
+ "user_id": "first",
+ "started_at": "first",
+ "finished_at": "last",
+ "location_id": "first",
+ }
# User-defined further aggregation
agg_dict.update(agg)
|
mie-lab/trackintel
|
5cd2b9a492984969dd6c8d8a6ca6dbd4b31e45ad
|
diff --git a/tests/preprocessing/test_staypoints.py b/tests/preprocessing/test_staypoints.py
index 79a8745..10c4425 100644
--- a/tests/preprocessing/test_staypoints.py
+++ b/tests/preprocessing/test_staypoints.py
@@ -399,7 +399,8 @@ class TestMergeStaypoints:
"""Test staypoint merging."""
sp, tpls = example_staypoints_merge
# first test with empty tpls
- merged_sp = sp.as_staypoints.merge_staypoints(tpls, agg={"geom": "first", "location_id": "first"})
+ merged_sp = sp.as_staypoints.merge_staypoints(tpls, agg={"geom": "first"})
+ merged_sp = merged_sp.reindex(columns=sp.columns)
assert len(merged_sp) == len(sp) - 3
# some staypoints stay the same (not merged)
assert (merged_sp.loc[1] == sp.loc[1]).all()
@@ -458,3 +459,11 @@ class TestMergeStaypoints:
# the geom should correspond to the first one
assert sp.loc[7, "geom"] == merged_sp.loc[7, "geom"]
assert sp.loc[2, "geom"] == merged_sp.loc[2, "geom"]
+
+ def test_merge_staypoints_error(self, example_staypoints_merge):
+ sp, tpls = example_staypoints_merge
+ sp.drop(columns=["location_id"], inplace=True)
+ with pytest.raises(AssertionError) as excinfo:
+ _ = sp.as_staypoints.merge_staypoints(tpls)
+
+ assert "Staypoints must contain column location_id" in str(excinfo.value)
|
ENH for merge_staypoints() function
The `merge_staypoints()` function in `trackintel/preprocessing/staypoints.py` would need a location_id column. I think this is not yet reflected in the docstring.
Also, the function is not yet added to the documentation.
|
0.0
|
5cd2b9a492984969dd6c8d8a6ca6dbd4b31e45ad
|
[
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_error"
] |
[
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_empty_generation",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_parallel_computing",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_missing_link",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_index_stability",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_location_ids_and_noise",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_time",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_max_time_gap",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_time_gap_error",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_agg"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-09-15 18:14:46+00:00
|
mit
| 3,925 |
|
mie-lab__trackintel-360
|
diff --git a/docs/modules/io.rst b/docs/modules/io.rst
index 09083d7..0c2d291 100644
--- a/docs/modules/io.rst
+++ b/docs/modules/io.rst
@@ -61,6 +61,8 @@ PostGIS Import
.. autofunction:: trackintel.io.postgis.read_trips_postgis
+.. autofunction:: trackintel.io.postgis.read_tours_postgis
+
CSV File Export
===============
@@ -89,6 +91,8 @@ PostGIS Export
.. autofunction:: trackintel.io.postgis.write_trips_postgis
+.. autofunction:: trackintel.io.postgis.write_tours_postgis
+
Predefined dataset readers
==========================
We also provide functionality to parse well-known datasets directly into the trackintel framework.
diff --git a/trackintel/io/__init__.py b/trackintel/io/__init__.py
index bc8260f..540233e 100644
--- a/trackintel/io/__init__.py
+++ b/trackintel/io/__init__.py
@@ -30,6 +30,9 @@ from .from_geopandas import read_trips_gpd
from .file import read_tours_csv
from .file import write_tours_csv
+from .postgis import read_tours_postgis
+from .postgis import write_tours_postgis
+from .from_geopandas import read_tours_gpd
from .dataset_reader import read_geolife
from .dataset_reader import geolife_add_modes_to_triplegs
diff --git a/trackintel/io/dataset_reader.py b/trackintel/io/dataset_reader.py
index ebb3f2e..5311452 100644
--- a/trackintel/io/dataset_reader.py
+++ b/trackintel/io/dataset_reader.py
@@ -39,6 +39,7 @@ def read_geolife(geolife_path, print_progress=False):
labels: dict
Dictionary with the available mode labels.
+ Keys are user ids of users that have a "labels.txt" in their folder.
Notes
-----
@@ -137,10 +138,7 @@ def _get_labels(geolife_path, uids):
No further checks are done on user ids, they must be convertable to ints.
"""
labels_rename = {"Start Time": "started_at", "End Time": "finished_at", "Transportation Mode": "mode"}
- label_dict = defaultdict(
- partial(pd.DataFrame, columns=["started_at", "finished_at", "mode"])
- ) # output dict for the labels
- # TODO: change geolife_add_modes_to_triplegs so that we can use a dict instead.
+ label_dict = {} # output dict for the labels
# get paths to all "labels.txt" files.
possible_label_paths = ((os.path.join(geolife_path, user_id, "labels.txt"), user_id) for user_id in uids)
@@ -242,16 +240,13 @@ def geolife_add_modes_to_triplegs(
# temp time fields for nn query
tpls["started_at_s"] = (tpls["started_at"] - pd.Timestamp("1970-01-01", tz="utc")) // pd.Timedelta("1s")
tpls["finished_at_s"] = (tpls["finished_at"] - pd.Timestamp("1970-01-01", tz="utc")) // pd.Timedelta("1s")
- all_users = tpls["user_id"].unique()
# tpls_id_mode_list is used to collect tripleg-mode matches. It will be filled with dictionaries with the
# following keys: [id', 'label_id', 'mode']
tpls_id_mode_list = list()
- for user_this in all_users:
+ for user_this in labels.keys():
tpls_this = tpls[tpls["user_id"] == user_this]
labels_this = labels[user_this]
- if labels_this.empty:
- continue
labels_this["started_at_s"] = (
labels_this["started_at"] - pd.Timestamp("1970-01-01", tz="utc")
@@ -292,10 +287,7 @@ def geolife_add_modes_to_triplegs(
tpls = tpls.join(tpls_id_mode)
tpls = tpls.astype({"label_id": "Int64"})
- try:
- tpls.drop(["started_at_s", "finished_at_s"], axis=1, inplace=True)
- except KeyError:
- pass
+ tpls.drop(["started_at_s", "finished_at_s"], axis=1, inplace=True)
try:
tpls.drop(["ratio"], axis=1, inplace=True)
diff --git a/trackintel/io/from_geopandas.py b/trackintel/io/from_geopandas.py
index 5e101c1..1bb34a1 100644
--- a/trackintel/io/from_geopandas.py
+++ b/trackintel/io/from_geopandas.py
@@ -401,7 +401,12 @@ def _trackintel_model(gdf, set_names=None, geom_col=None, crs=None, tz_cols=None
if tz_cols is not None:
for col in tz_cols:
if not pd.api.types.is_datetime64tz_dtype(gdf[col]):
- gdf[col] = _localize_timestamp(dt_series=gdf[col], pytz_tzinfo=tz, col_name=col)
+ try:
+ gdf[col] = _localize_timestamp(dt_series=gdf[col], pytz_tzinfo=tz, col_name=col)
+ except ValueError:
+ # Taken if column contains datetimes with different timezone informations.
+ # Cast them to UTC in this case.
+ gdf[col] = pd.to_datetime(gdf[col], utc=True)
# If is not GeoDataFrame and no geom_col is set end early.
# That allows us to handle DataFrames and GeoDataFrames in one function.
diff --git a/trackintel/io/postgis.py b/trackintel/io/postgis.py
index 7e2a9de..0746889 100644
--- a/trackintel/io/postgis.py
+++ b/trackintel/io/postgis.py
@@ -579,6 +579,118 @@ def write_trips_postgis(
)
+@_handle_con_string
+def read_tours_postgis(
+ sql,
+ con,
+ geom_col=None,
+ crs=None,
+ index_col=None,
+ coerce_float=True,
+ parse_dates=None,
+ params=None,
+ chunksize=None,
+ **kwargs
+):
+ """Read tours from a PostGIS database.
+
+ Parameters
+ ----------
+ sql : str
+ SQL query e.g. "SELECT * FROM tours"
+
+ con : str, sqlalchemy.engine.Connection or sqlalchemy.engine.Engine
+ Connection string or active connection to PostGIS database.
+
+ geom_col : str, optional
+ The geometry column of the table (if exists).
+
+ crs : optional
+ Coordinate reference system if table has geometry.
+
+ index_col : string or list of strings, optional
+ Column(s) to set as index(MultiIndex)
+
+ coerce_float : boolean, default True
+ Attempt to convert values of non-string, non-numeric objects (like
+ decimal.Decimal) to floating point, useful for SQL result sets
+
+ parse_dates : list or dict, default None
+ - List of column names to parse as dates.
+ - Dict of ``{column_name: format string}`` where format string is
+ strftime compatible in case of parsing string times, or is one of
+ (D, s, ns, ms, us) in case of parsing integer timestamps.
+ - Dict of ``{column_name: arg dict}``, where the arg dict
+ corresponds to the keyword arguments of
+ :func:`pandas.to_datetime`. Especially useful with databases
+ without native Datetime support, such as SQLite.
+
+ params : list, tuple or dict, optional, default None
+ List of parameters to pass to execute method.
+
+ chunksize : int, default None
+ If specified, return an iterator where chunksize is the number
+ of rows to include in each chunk.
+
+ **kwargs
+ Further keyword arguments as available in trackintels trackintel.io.read_tours_gpd().
+ Especially useful to rename column names from the SQL table to trackintel conform column names.
+ See second example how to use it in code.
+
+ Returns
+ -------
+ GeoDataFrame (as trackintel tours)
+ A GeoDataFrame containing the tours.
+
+ Examples
+ --------
+ >>> tours = ti.io.read_tours_postgis("SELECT * FROM tours", con)
+ >>> tours = ti.io.read_tours_postgis("SELECT * FROM tours", con, index_col="id", started_at="start_time",
+ ... finished_at="end_time", user_id="USER")
+ """
+ if geom_col is None:
+ tours = pd.read_sql(
+ sql,
+ con,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ params=params,
+ parse_dates=parse_dates,
+ chunksize=chunksize,
+ )
+ else:
+ tours = gpd.GeoDataFrame.from_postgis(
+ sql,
+ con,
+ geom_col=geom_col,
+ crs=crs,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ params=params,
+ chunksize=chunksize,
+ )
+
+ return ti.io.read_tours_gpd(tours, **kwargs)
+
+
+@_handle_con_string
+def write_tours_postgis(
+ tours, name, con, schema=None, if_exists="fail", index=True, index_label=None, chunksize=None, dtype=None
+):
+ write_trips_postgis(
+ tours,
+ name=name,
+ con=con,
+ schema=schema,
+ if_exists=if_exists,
+ index=index,
+ index_label=index_label,
+ chunksize=chunksize,
+ dtype=dtype,
+ )
+
+
# helper docstring to change __doc__ of all write functions conveniently in one place
__doc = """Stores {long} to PostGIS. Usually, this is directly called on a {long}
DataFrame (see example below).
@@ -628,3 +740,4 @@ write_triplegs_postgis.__doc__ = __doc.format(long="triplegs", short="tpls")
write_staypoints_postgis.__doc__ = __doc.format(long="staypoints", short="sp")
write_locations_postgis.__doc__ = __doc.format(long="locations", short="locs")
write_trips_postgis.__doc__ = __doc.format(long="trips", short="trips")
+write_tours_postgis.__doc__ = __doc.format(long="tours", short="tours")
diff --git a/trackintel/model/tours.py b/trackintel/model/tours.py
index bc11abd..f36eabc 100644
--- a/trackintel/model/tours.py
+++ b/trackintel/model/tours.py
@@ -57,6 +57,16 @@ class ToursAccessor(object):
"""
ti.io.file.write_tours_csv(self._obj, filename, *args, **kwargs)
+ def to_postgis(
+ self, name, con, schema=None, if_exists="fail", index=True, index_label=None, chunksize=None, dtype=None
+ ):
+ """
+ Store this collection of tours to PostGIS.
+
+ See :func:`trackintel.io.postgis.write_tours_postgis`.
+ """
+ ti.io.postgis.write_tours_postgis(self._obj, name, con, schema, if_exists, index, index_label, chunksize, dtype)
+
def plot(self, *args, **kwargs):
"""
Plot this collection of tours.
|
mie-lab/trackintel
|
39c3b7383b9c3396601fb21422b11338495fa772
|
diff --git a/tests/io/test_dataset_reader.py b/tests/io/test_dataset_reader.py
index 165fdce..3e9f0f9 100644
--- a/tests/io/test_dataset_reader.py
+++ b/tests/io/test_dataset_reader.py
@@ -35,8 +35,8 @@ def matching_data():
Tripleg_2 overlaps and extents to the right but is almost not covered by label_0
Tripleg_3 overlaps label_1 to the right and the left but is almost fully covered by it.
"""
- one_hour = datetime.timedelta(hours=1)
- one_min = datetime.timedelta(minutes=1)
+ one_hour = pd.Timedelta("1h")
+ one_min = pd.Timedelta("1min")
time_1 = pd.Timestamp("1970-01-01", tz="utc")
triplegs = [
@@ -146,14 +146,6 @@ class Test_GetLabels:
assert len(labels[20]) == 223
assert all(df.columns.tolist() == ["started_at", "finished_at", "mode"] for df in labels.values())
- def test_defaultdict(self):
- """Test if non existing entries return a correct empty pd.DataFrame"""
- geolife_path = os.path.join("tests", "data", "geolife_modes")
- uids = ["010", "020", "178"]
- labels = _get_labels(geolife_path, uids)
- empty_df = pd.DataFrame(columns=["started_at", "finished_at", "mode"])
- assert_frame_equal(labels[-1], empty_df)
-
class Test_GetDf:
def test_example_data(self):
@@ -242,8 +234,7 @@ class TestGeolife_add_modes_to_triplegs:
def test_mode_matching_multi_user(self, matching_data):
tpls, labels_raw = matching_data
- # we add an empty DataFrame with labels in the end
- labels = {0: labels_raw, 1: pd.DataFrame(columns=labels_raw.columns)}
+ labels = {0: labels_raw}
# explicitly change the user_id of the second record
tpls.loc[1, "user_id"] = 1
diff --git a/tests/io/test_from_geopandas.py b/tests/io/test_from_geopandas.py
index c04a724..3dc0edd 100644
--- a/tests/io/test_from_geopandas.py
+++ b/tests/io/test_from_geopandas.py
@@ -91,6 +91,17 @@ class Test_Trackintel_Model:
pfs = _trackintel_model(pfs, tz_cols=["tracked_at"], tz="UTC")
assert_geodataframe_equal(pfs, example_positionfixes)
+ def test_multiple_timezones_in_col(self, example_positionfixes):
+ """Test if datetimes in column don't have the same timezone get casted to UTC."""
+ example_positionfixes["tracked_at"] = [
+ pd.Timestamp("2021-08-01 16:00:00", tz="Europe/Amsterdam"),
+ pd.Timestamp("2021-08-01 16:00:00", tz="Asia/Muscat"),
+ pd.Timestamp("2021-08-01 16:00:00", tz="Pacific/Niue"),
+ ]
+ pfs = _trackintel_model(example_positionfixes, tz_cols=["tracked_at"])
+ example_positionfixes["tracked_at"] = pd.to_datetime(example_positionfixes["tracked_at"], utc=True)
+ assert_geodataframe_equal(pfs, example_positionfixes)
+
class TestRead_Positionfixes_Gpd:
"""Test `read_positionfixes_gpd()` function."""
@@ -296,6 +307,17 @@ class TestRead_Trips_Gpd:
example_trips.rename(columns=mapper, inplace=True)
assert_geodataframe_equal(trips, example_trips)
+ def test_multiple_timezones_in_col(self, example_trips):
+ """Test if datetimes in column don't have the same timezone get casted to UTC."""
+ example_trips["started_at"] = [
+ pd.Timestamp("2021-08-01 16:00:00", tz="Europe/Amsterdam"),
+ pd.Timestamp("2021-08-01 16:00:00", tz="Asia/Muscat"),
+ pd.Timestamp("2021-08-01 16:00:00", tz="Pacific/Niue"),
+ ]
+ trips = read_trips_gpd(example_trips)
+ example_trips["started_at"] = pd.to_datetime(example_trips["started_at"], utc=True)
+ assert_geodataframe_equal(example_trips, trips)
+
@pytest.fixture
def example_tours():
diff --git a/tests/io/test_postgis.py b/tests/io/test_postgis.py
index 6b9f03a..3791d46 100644
--- a/tests/io/test_postgis.py
+++ b/tests/io/test_postgis.py
@@ -160,6 +160,25 @@ def example_trips():
return trips
[email protected]
+def example_tours():
+ """Tours to load into the database."""
+ t1 = pd.Timestamp("1971-01-01 00:00:00", tz="utc")
+ t2 = pd.Timestamp("1971-01-01 05:00:00", tz="utc")
+ t3 = pd.Timestamp("1971-01-02 07:00:00", tz="utc")
+ h = datetime.timedelta(hours=1)
+
+ list_dict = [
+ {"user_id": 0, "started_at": t1, "finished_at": t1 + h},
+ {"user_id": 0, "started_at": t2, "finished_at": t2 + h},
+ {"user_id": 1, "started_at": t3, "finished_at": t3 + h},
+ ]
+ tours = pd.DataFrame(data=list_dict)
+ tours.index.name = "id"
+ assert tours.as_tours
+ return tours
+
+
def del_table(con, table):
"""Delete table in con."""
try:
@@ -291,6 +310,28 @@ class TestPositionfixes:
finally:
del_table(conn, table)
+ def test_daylight_saving_tz(self, example_positionfixes, conn_postgis):
+ """Test if function can handle different tz informations in one column.
+
+ PostgreSQL saves all its datetimes in UTC and then on exports them to the local timezone.
+ That all works fine except when the local timezone changed in the past for example with daylight saving.
+ """
+ pfs = example_positionfixes.copy()
+ conn_string, conn = conn_postgis
+ table = "positionfixes"
+ sql = f"SELECT * FROM {table}"
+ t1 = pd.Timestamp("2021-08-01 16:00:00", tz="utc") # summer time
+ t2 = pd.Timestamp("2021-08-01 15:00:00", tz="utc") # summer time
+ t3 = pd.Timestamp("2021-02-01 14:00:00", tz="utc") # winter time
+ pfs["tracked_at"] = [t1, t2, t3]
+ geom_col = pfs.geometry.name
+ try:
+ pfs.as_positionfixes.to_postgis(table, conn_string)
+ pfs_db = ti.io.read_positionfixes_postgis(sql, conn_string, geom_col, index_col="id")
+ assert_geodataframe_equal(pfs, pfs_db)
+ finally:
+ del_table(conn, table)
+
class TestTriplegs:
def test_write(self, example_triplegs, conn_postgis):
@@ -582,6 +623,51 @@ class TestTrips:
del_table(conn, table)
+class TestTours:
+ """Test of postgis functions for tours."""
+
+ def test_write(self, example_tours, conn_postgis):
+ """Test if write of tours create correct schema in database."""
+ tours = example_tours
+ conn_string, conn = conn_postgis
+ table = "tours"
+ try:
+ tours.as_tours.to_postgis(table, conn_string)
+ columns_db, dtypes = get_table_schema(conn, table)
+ columns = tours.columns.tolist() + [tours.index.name]
+ assert len(columns_db) == len(columns)
+ assert set(columns_db) == set(columns)
+ finally:
+ del_table(conn, table)
+
+ def test_read(self, example_tours, conn_postgis):
+ """Test if tours written to and read back from database are the same."""
+ tours = example_tours
+ conn_string, conn = conn_postgis
+ table = "tours"
+ sql = f"SELECT * FROM {table}"
+
+ try:
+ tours.as_tours.to_postgis(table, conn_string)
+ tours_db = ti.io.read_tours_postgis(sql, conn_string, index_col="id")
+ assert_frame_equal(tours, tours_db)
+ finally:
+ del_table(conn, table)
+
+ def test_no_crs(self, example_tours, conn_postgis):
+ """Test if writing reading to postgis also works correctly without CRS."""
+ tours = example_tours
+ conn_string, conn = conn_postgis
+ table = "tours"
+ sql = f"SELECT * FROM {table}"
+ try:
+ tours.as_tours.to_postgis(table, conn_string)
+ tours_db = ti.io.read_tours_postgis(sql, conn_string, index_col="id")
+ assert_frame_equal(tours, tours_db)
+ finally:
+ del_table(conn, table)
+
+
class TestGetSrid:
def test_srid(self, example_positionfixes):
"""Test if `_get_srid` returns the correct srid."""
|
CLN: Iterate over label keys in `geolife_add_modes_to_triplegs`
In #354 the type of the labels dict changed to defaultdict. Now if we iterate over all users we create a lot of empty dataframes in the defaultdict. Two proposed changes to change that:
1. change type back to dict
2. iterate [here](https://github.com/mie-lab/trackintel/blob/feaf7f37b38c1a5659cd8f6ef6982c3b505110dd/trackintel/io/dataset_reader.py#L226) over labels key instead of unique user_ids
|
0.0
|
39c3b7383b9c3396601fb21422b11338495fa772
|
[
"tests/io/test_from_geopandas.py::Test_Trackintel_Model::test_multiple_timezones_in_col",
"tests/io/test_from_geopandas.py::TestRead_Trips_Gpd::test_multiple_timezones_in_col"
] |
[
"tests/io/test_dataset_reader.py::TestReadGeolife::test_loop_read",
"tests/io/test_dataset_reader.py::TestReadGeolife::test_print_progress_flag",
"tests/io/test_dataset_reader.py::TestReadGeolife::test_label_reading",
"tests/io/test_dataset_reader.py::TestReadGeolife::test_wrong_folder_name",
"tests/io/test_dataset_reader.py::TestReadGeolife::test_no_user_folders",
"tests/io/test_dataset_reader.py::Test_GetLabels::test_example_data",
"tests/io/test_dataset_reader.py::Test_GetDf::test_example_data",
"tests/io/test_dataset_reader.py::TestGeolife_add_modes_to_triplegs::test_impossible_matching",
"tests/io/test_from_geopandas.py::Test_Trackintel_Model::test_renaming",
"tests/io/test_from_geopandas.py::Test_Trackintel_Model::test_setting_geometry",
"tests/io/test_from_geopandas.py::Test_Trackintel_Model::test_set_crs",
"tests/io/test_from_geopandas.py::Test_Trackintel_Model::test_already_set_geometry",
"tests/io/test_from_geopandas.py::Test_Trackintel_Model::test_error_no_set_geometry",
"tests/io/test_from_geopandas.py::Test_Trackintel_Model::test_tz_cols",
"tests/io/test_from_geopandas.py::TestRead_Positionfixes_Gpd::test_csv",
"tests/io/test_from_geopandas.py::TestRead_Positionfixes_Gpd::test_mapper",
"tests/io/test_from_geopandas.py::TestRead_Triplegs_Gpd::test_csv",
"tests/io/test_from_geopandas.py::TestRead_Triplegs_Gpd::test_mapper",
"tests/io/test_from_geopandas.py::TestRead_Staypoints_Gpd::test_csv",
"tests/io/test_from_geopandas.py::TestRead_Staypoints_Gpd::test_mapper",
"tests/io/test_from_geopandas.py::TestRead_Locations_Gpd::test_csv",
"tests/io/test_from_geopandas.py::TestRead_Locations_Gpd::test_extent_col",
"tests/io/test_from_geopandas.py::TestRead_Locations_Gpd::test_mapper",
"tests/io/test_from_geopandas.py::TestRead_Trips_Gpd::test_csv",
"tests/io/test_from_geopandas.py::TestRead_Trips_Gpd::test_with_geometry",
"tests/io/test_from_geopandas.py::TestRead_Trips_Gpd::test_without_geometry",
"tests/io/test_from_geopandas.py::TestRead_Trips_Gpd::test_mapper",
"tests/io/test_from_geopandas.py::TestRead_Tours_Gpd::test_csv",
"tests/io/test_from_geopandas.py::TestRead_Tours_Gpd::test_mapper",
"tests/io/test_postgis.py::TestGetSrid::test_srid"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-10-02 12:35:26+00:00
|
mit
| 3,926 |
|
mie-lab__trackintel-384
|
diff --git a/trackintel/analysis/tracking_quality.py b/trackintel/analysis/tracking_quality.py
index 254b9c6..20e3e3a 100644
--- a/trackintel/analysis/tracking_quality.py
+++ b/trackintel/analysis/tracking_quality.py
@@ -1,8 +1,8 @@
-import datetime
+import warnings
-import pandas as pd
+import geopandas as gpd
import numpy as np
-import warnings
+import pandas as pd
def temporal_tracking_quality(source, granularity="all", max_iter=60):
@@ -238,35 +238,31 @@ def _split_overlaps(source, granularity="day", max_iter=60):
GeoDataFrame (as trackintel datamodels)
The GeoDataFrame object after the splitting
"""
- df = source.copy()
- change_flag = __get_split_index(df, granularity=granularity)
+ if granularity == "hour":
+ # every split over hour splits also over day
+ # this way to split of an entry over a month takes 30+24 iterations instead of 30*24.
+ df = _split_overlaps(source, granularity="day", max_iter=max_iter)
+ else:
+ df = source.copy()
+ change_flag = _get_split_index(df, granularity=granularity)
iter_count = 0
+ freq = "D" if granularity == "day" else "H"
# Iteratively split one day/hour from multi day/hour entries until no entry spans over multiple days/hours
while change_flag.sum() > 0:
-
# calculate new finished_at timestamp (00:00 midnight)
- finished_at_temp = df.loc[change_flag, "finished_at"].copy()
- if granularity == "day":
- df.loc[change_flag, "finished_at"] = df.loc[change_flag, "started_at"].apply(
- lambda x: x.replace(hour=23, minute=59, second=59) + datetime.timedelta(seconds=1)
- )
- elif granularity == "hour":
- df.loc[change_flag, "finished_at"] = df.loc[change_flag, "started_at"].apply(
- lambda x: x.replace(minute=59, second=59) + datetime.timedelta(seconds=1)
- )
+ new_df = df.loc[change_flag].copy()
+ df.loc[change_flag, "finished_at"] = (df.loc[change_flag, "started_at"] + pd.Timestamp.resolution).dt.ceil(freq)
# create new entries with remaining timestamp
- new_df = df.loc[change_flag].copy()
new_df.loc[change_flag, "started_at"] = df.loc[change_flag, "finished_at"]
- new_df.loc[change_flag, "finished_at"] = finished_at_temp
df = pd.concat((df, new_df), ignore_index=True, sort=True)
- change_flag = __get_split_index(df, granularity=granularity)
+ change_flag = _get_split_index(df, granularity=granularity)
iter_count += 1
- if iter_count > max_iter:
+ if iter_count >= max_iter:
warnings.warn(
f"Maximum iteration {max_iter} reached when splitting the input dataframe by {granularity}. "
"Consider checking the timeframe of the input or parsing a higher 'max_iter' parameter."
@@ -275,11 +271,10 @@ def _split_overlaps(source, granularity="day", max_iter=60):
if "duration" in df.columns:
df["duration"] = df["finished_at"] - df["started_at"]
-
return df
-def __get_split_index(df, granularity="day"):
+def _get_split_index(df, granularity="day"):
"""
Get the index that needs to be splitted.
@@ -297,10 +292,8 @@ def __get_split_index(df, granularity="day"):
change_flag: pd.Series
Boolean index indicating which records needs to be splitted
"""
- change_flag = df["started_at"].dt.date != (df["finished_at"] - pd.to_timedelta("1s")).dt.date
- if granularity == "hour":
- hour_flag = df["started_at"].dt.hour != (df["finished_at"] - pd.to_timedelta("1s")).dt.hour
- # union of day and hour change flag
- change_flag = change_flag | hour_flag
-
- return change_flag
+ freq = "D" if granularity == "day" else "H"
+ cond1 = df["started_at"].dt.floor(freq) != (df["finished_at"] - pd.Timedelta.resolution).dt.floor(freq)
+ # catch corner case where both on same border and subtracting would lead to error
+ cond2 = df["started_at"] != df["finished_at"]
+ return cond1 & cond2
|
mie-lab/trackintel
|
edfa84bc8183fdb75531102621eff5ffd945f601
|
diff --git a/tests/analysis/test_tracking_quality.py b/tests/analysis/test_tracking_quality.py
index c22f42b..4511ec4 100644
--- a/tests/analysis/test_tracking_quality.py
+++ b/tests/analysis/test_tracking_quality.py
@@ -7,6 +7,7 @@ import geopandas as gpd
from shapely.geometry import Point
import trackintel as ti
+from trackintel.analysis.tracking_quality import _get_split_index
@pytest.fixture
@@ -279,7 +280,7 @@ class TestSplit_overlaps:
splitted = ti.analysis.tracking_quality._split_overlaps(sp_tpls, granularity="day")
# no record spans several days after the split
- multi_day_records = (splitted["finished_at"] - pd.to_timedelta("1s")).dt.day - splitted["started_at"].dt.day
+ multi_day_records = (splitted["finished_at"] - pd.Timestamp.resolution).dt.day - splitted["started_at"].dt.day
assert (multi_day_records == 0).all()
def test_split_overlaps_hours(self, testdata_sp_tpls_geolife_long):
@@ -295,23 +296,22 @@ class TestSplit_overlaps:
splitted = ti.analysis.tracking_quality._split_overlaps(sp_tpls, granularity="hour")
# no record spans several hours after the split
- hour_diff = (splitted["finished_at"] - pd.to_timedelta("1s")).dt.hour - splitted["started_at"].dt.hour
+ hour_diff = (splitted["finished_at"] - pd.Timestamp.resolution).dt.hour - splitted["started_at"].dt.hour
assert (hour_diff == 0).all()
def test_split_overlaps_hours_case2(self, testdata_sp_tpls_geolife_long):
"""Test if _split_overlaps() function can split record that have the same hour but different days."""
- sp_tpls = testdata_sp_tpls_geolife_long
-
# get the first two records
- head2 = sp_tpls.head(2).copy()
+ head2 = testdata_sp_tpls_geolife_long.head(2)
+
# construct the finished_at exactly one day after started_at
- head2["finished_at"] = head2.apply(lambda x: x["started_at"].replace(day=x["started_at"].day + 1), axis=1)
+ head2["finished_at"] = head2["started_at"] + pd.Timedelta("1d")
# the records have the same hour
- hour_diff = (head2["finished_at"] - pd.to_timedelta("1s")).dt.hour - head2["started_at"].dt.hour
+ hour_diff = (head2["finished_at"] - pd.Timestamp.resolution).dt.hour - head2["started_at"].dt.hour
assert (hour_diff == 0).all()
# but have different days
- day_diff = (head2["finished_at"] - pd.to_timedelta("1s")).dt.day - head2["started_at"].dt.day
+ day_diff = (head2["finished_at"] - pd.Timestamp.resolution).dt.day - head2["started_at"].dt.day
assert (day_diff > 0).all()
# split the records according to hour
@@ -319,7 +319,7 @@ class TestSplit_overlaps:
splitted = ti.analysis.tracking_quality._split_overlaps(head2, granularity="hour")
# no record has different days after the split
- day_diff = (splitted["finished_at"] - pd.to_timedelta("1s")).dt.day - splitted["started_at"].dt.day
+ day_diff = (splitted["finished_at"] - pd.Timestamp.resolution).dt.day - splitted["started_at"].dt.day
assert (day_diff == 0).all()
def test_split_overlaps_duration(self, testdata_sp_tpls_geolife_long):
@@ -354,3 +354,46 @@ class TestSplit_overlaps:
with pytest.warns(UserWarning):
ti.analysis.tracking_quality._split_overlaps(sp, granularity="day")
+
+ def test_exact_midnight_split(self):
+ """Test if split finishes and starts on midnight on the ns (pandas resolution)."""
+ midnight = pd.Timestamp("2022-03-18 00:00:00", tz="utc")
+ start = midnight - pd.Timestamp.resolution
+ end = midnight + pd.Timestamp.resolution
+ data = [
+ {"user_id": 0, "started_at": start, "finished_at": end, "geom": Point(0.0, 0.0)},
+ {"user_id": 1, "started_at": start, "finished_at": midnight, "geom": Point(0.0, 0.0)},
+ {"user_id": 2, "started_at": midnight, "finished_at": end, "geom": Point(0.0, 0.0)},
+ {"user_id": 3, "started_at": midnight, "finished_at": midnight, "geom": Point(0.0, 0.0)},
+ ]
+ sp = gpd.GeoDataFrame(data=data, geometry="geom", crs="EPSG:4326")
+ sp_res = ti.analysis.tracking_quality._split_overlaps(sp, granularity="hour")
+ sp_res.sort_values(by="user_id", inplace=True, kind="stable")
+ assert (sp_res["started_at"] == [start, midnight, start, midnight, midnight]).all()
+ assert (sp_res["finished_at"] == [midnight, end, midnight, end, midnight]).all()
+
+
+class TestGet_split_index:
+ """Test if __get_split_index splits correctly"""
+
+ def test_midnight_ns(self):
+ """Test datetimes 1 ns around midnight."""
+ # 9 possibilities, 3 per starts before, on and after an hour point
+ # h1 h2
+ # -- | -- ... -- | --
+ # s1 s2 s3 ... e1 e2 e3
+ midnight = pd.Timestamp("2022-03-18 00:00:00", tz="utc")
+ start1 = midnight - pd.Timestamp.resolution
+ start2 = midnight
+ start3 = midnight + pd.Timestamp.resolution
+ starts = [start1, start2, start3]
+ ends = [s + pd.Timedelta("1h") for s in starts]
+
+ data = [
+ {"started_at": start, "finished_at": end, "res": (start == starts[0]) or (end == ends[2])}
+ for start in starts
+ for end in ends
+ ]
+ df = pd.DataFrame(data=data)
+ calculated_result = _get_split_index(df, "hour")
+ assert (calculated_result == df["res"]).all()
|
ENH: _split_overlap() function does not deal with miliseconds
The splitting of records implemented in the `_split_overlap()` function only replaces the time larger than 1 second, causing the newly created records not to start exactly at 12 midnight. The codes [here](https://github.com/mie-lab/trackintel/blob/1ff2323c868cfb82c0d0497cf57281c821e044d1/trackintel/analysis/tracking_quality.py#L253) and [here](https://github.com/mie-lab/trackintel/blob/1ff2323c868cfb82c0d0497cf57281c821e044d1/trackintel/analysis/tracking_quality.py#L257) need to be changed.

|
0.0
|
edfa84bc8183fdb75531102621eff5ffd945f601
|
[
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_tracking_quality_all",
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_tracking_quality_day",
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_tracking_quality_week",
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_tracking_quality_weekday",
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_tracking_quality_hour",
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_tracking_quality_error",
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_tracking_quality_user_error",
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_staypoints_accessors",
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_triplegs_accessors",
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_trips_accessors",
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_non_positive_duration_warning",
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_absolute_extent",
"tests/analysis/test_tracking_quality.py::TestTemporal_tracking_quality::test_non_positive_duration_filtered",
"tests/analysis/test_tracking_quality.py::TestSplit_overlaps::test_split_overlaps_days",
"tests/analysis/test_tracking_quality.py::TestSplit_overlaps::test_split_overlaps_hours",
"tests/analysis/test_tracking_quality.py::TestSplit_overlaps::test_split_overlaps_hours_case2",
"tests/analysis/test_tracking_quality.py::TestSplit_overlaps::test_split_overlaps_duration",
"tests/analysis/test_tracking_quality.py::TestSplit_overlaps::test_max_iter_warning",
"tests/analysis/test_tracking_quality.py::TestSplit_overlaps::test_exact_midnight_split",
"tests/analysis/test_tracking_quality.py::TestGet_split_index::test_midnight_ns"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-03-18 23:14:19+00:00
|
mit
| 3,927 |
|
mie-lab__trackintel-408
|
diff --git a/trackintel/analysis/location_identification.py b/trackintel/analysis/location_identification.py
index 33e691c..6682e4a 100644
--- a/trackintel/analysis/location_identification.py
+++ b/trackintel/analysis/location_identification.py
@@ -62,7 +62,7 @@ def location_identifier(staypoints, method="FREQ", pre_filter=True, **pre_filter
if pre_filter:
f = pre_filter_locations(sp, **pre_filter_kwargs)
else:
- f = pd.Series(np.full(len(sp.index), True))
+ f = pd.Series(np.full(len(sp.index), True), index=sp.index)
if method == "FREQ":
method_val = freq_method(sp[f], "home", "work")
|
mie-lab/trackintel
|
c585d0f49f78b2bf1f3a1cba41c979caa4ca9cb2
|
diff --git a/tests/analysis/test_location_identification.py b/tests/analysis/test_location_identification.py
index a256ca5..af0917c 100644
--- a/tests/analysis/test_location_identification.py
+++ b/tests/analysis/test_location_identification.py
@@ -253,6 +253,15 @@ class TestLocation_Identifier:
osna = osna_method(example_osna)
assert_geodataframe_equal(li, osna)
+ def test_pre_filter_index(self, example_freq):
+ """Test if pre_filter=False works with non-serial index"""
+ # issue-#403
+ example_freq.index = *reversed(example_freq.index[1:]), example_freq.index[0]
+ example_freq.index += 100 # move it so that there is no overlap to a range index
+ li = location_identifier(example_freq, method="FREQ", pre_filter=False)
+ fr = freq_method(example_freq)
+ assert_geodataframe_equal(li, fr)
+
@pytest.fixture
def example_osna():
|
Bug in location_identifier for pre_filter=False and non-serial index
The function [location_identifier](https://github.com/mie-lab/trackintel/blob/master/trackintel/analysis/location_identification.py#L7) assumes that an index is serial e.g., incremental from 0 to n. It throws an error in fthe index is not serial and sorts the dataframe (may not be intended) if the index is serial but not in order.
The problem is that for `pre_filter=False` [here](https://github.com/mie-lab/trackintel/blob/master/trackintel/analysis/location_identification.py#L65) a boolean series with the same length as the staypoint dataframe and serial index is created.
From the geopandas documentation for what can be input to `loc` for indexing:
_An alignable boolean Series. The index of the key will be aligned before masking._ [source](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.loc.html)
A fix could be to either use the original index when creating `f` in line [65](https://github.com/mie-lab/trackintel/blob/master/trackintel/analysis/location_identification.py#L65), to use a boolean array without index or to use iloc instead of loc.
|
0.0
|
c585d0f49f78b2bf1f3a1cba41c979caa4ca9cb2
|
[
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_pre_filter_index"
] |
[
"tests/analysis/test_location_identification.py::TestPre_Filter::test_no_kw",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_thresh_sp",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_thresh_loc",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_thresh_sp_at_loc",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_tresh_loc_time",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_loc_period",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_agg_level",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_agg_level_error",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_non_continous_index",
"tests/analysis/test_location_identification.py::TestFreq_method::test_default_labels",
"tests/analysis/test_location_identification.py::TestFreq_method::test_custom_labels",
"tests/analysis/test_location_identification.py::TestFreq_method::test_duration",
"tests/analysis/test_location_identification.py::Test_Freq_Transform::test_function",
"tests/analysis/test_location_identification.py::Test_Freq_Assign::test_function",
"tests/analysis/test_location_identification.py::Test_Freq_Assign::test_more_labels_than_entries",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_unkown_method",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_no_location_column",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_pre_filter",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_freq_method",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_osna_method",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_default",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_overlap",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_weekends",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_two_users",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_leisure_weighting",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_one_work_location",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_one_rest_location",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_one_leisure_location",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_prior_activity_label",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_multiple_users_with_only_one_location",
"tests/analysis/test_location_identification.py::Test_osna_label_timeframes::test_weekend",
"tests/analysis/test_location_identification.py::Test_osna_label_timeframes::test_weekday"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-05-20 19:04:34+00:00
|
mit
| 3,928 |
|
mie-lab__trackintel-411
|
diff --git a/trackintel/analysis/location_identification.py b/trackintel/analysis/location_identification.py
index 33e691c..6682e4a 100644
--- a/trackintel/analysis/location_identification.py
+++ b/trackintel/analysis/location_identification.py
@@ -62,7 +62,7 @@ def location_identifier(staypoints, method="FREQ", pre_filter=True, **pre_filter
if pre_filter:
f = pre_filter_locations(sp, **pre_filter_kwargs)
else:
- f = pd.Series(np.full(len(sp.index), True))
+ f = pd.Series(np.full(len(sp.index), True), index=sp.index)
if method == "FREQ":
method_val = freq_method(sp[f], "home", "work")
diff --git a/trackintel/visualization/modal_split.py b/trackintel/visualization/modal_split.py
index 83937a1..0448212 100644
--- a/trackintel/visualization/modal_split.py
+++ b/trackintel/visualization/modal_split.py
@@ -8,6 +8,7 @@ def plot_modal_split(
df_modal_split_in,
out_path=None,
date_fmt_x_axis="%W",
+ fig=None,
axis=None,
title=None,
x_label=None,
@@ -33,19 +34,21 @@ def plot_modal_split(
title : str, optional
x_label : str, optional
y_label : str, optional
+ fig : matplotlib.figure
+ Only used if axis is provided as well.
axis : matplotlib axes
x_pad : float, default: 10
- used to set ax.xaxis.labelpad
+ Used to set ax.xaxis.labelpad
y_pad : float, default: 10
- used to set ax.yaxis.labelpad
+ Used to set ax.yaxis.labelpad
title_pad : float, default: 1.02
- passed on to `matplotlib.pyplot.title`
- skip_xticks : int, default: 0
- if larger than 0, every nth x-tick label is skipped.
+ Passed on to `matplotlib.pyplot.title`
+ skip_xticks : int, default: 1
+ Every nth x-tick label is kept.
n_col_legend : int
- passed on as `ncol` to matplotlib.pyplot.legend()
+ Passed on as `ncol` to matplotlib.pyplot.legend()
borderaxespad : float
- passed on to matplotlib.pyplot.legend()
+ Passed on to matplotlib.pyplot.legend()
Returns
-------
@@ -86,7 +89,7 @@ def plot_modal_split(
# skip ticks for X axis
if skip_xticks > 0:
for i, tick in enumerate(ax.xaxis.get_major_ticks()):
- if i % skip_xticks == 0:
+ if i % skip_xticks != 0:
tick.set_visible(False)
# We use a nice trick to put the legend out of the plot and to scale it automatically
@@ -105,11 +108,13 @@ def plot_modal_split(
)
if title is not None:
- plt.title(title, y=title_pad)
+ ax.set_title(title, y=title_pad)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
- fig.autofmt_xdate()
+
+ if fig is not None:
+ fig.autofmt_xdate()
plt.tight_layout()
|
mie-lab/trackintel
|
c585d0f49f78b2bf1f3a1cba41c979caa4ca9cb2
|
diff --git a/tests/analysis/test_location_identification.py b/tests/analysis/test_location_identification.py
index a256ca5..af0917c 100644
--- a/tests/analysis/test_location_identification.py
+++ b/tests/analysis/test_location_identification.py
@@ -253,6 +253,15 @@ class TestLocation_Identifier:
osna = osna_method(example_osna)
assert_geodataframe_equal(li, osna)
+ def test_pre_filter_index(self, example_freq):
+ """Test if pre_filter=False works with non-serial index"""
+ # issue-#403
+ example_freq.index = *reversed(example_freq.index[1:]), example_freq.index[0]
+ example_freq.index += 100 # move it so that there is no overlap to a range index
+ li = location_identifier(example_freq, method="FREQ", pre_filter=False)
+ fr = freq_method(example_freq)
+ assert_geodataframe_equal(li, fr)
+
@pytest.fixture
def example_osna():
diff --git a/tests/visualization/test_modal_split.py b/tests/visualization/test_modal_split.py
index 306a0bd..4df63b7 100644
--- a/tests/visualization/test_modal_split.py
+++ b/tests/visualization/test_modal_split.py
@@ -8,10 +8,11 @@ import pytest
from trackintel.analysis.modal_split import calculate_modal_split
from trackintel.io.dataset_reader import read_geolife, geolife_add_modes_to_triplegs
from trackintel.visualization.modal_split import plot_modal_split
+from trackintel.visualization.util import regular_figure
@pytest.fixture
-def get_geolife_triplegs_with_modes():
+def geolife_triplegs_with_modes():
"""Get modal split for a small part of the geolife dataset."""
pfs, labels = read_geolife(os.path.join("tests", "data", "geolife_modes"))
pfs, sp = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
@@ -22,7 +23,7 @@ def get_geolife_triplegs_with_modes():
@pytest.fixture
-def get_test_triplegs_with_modes():
+def triplegs_with_modes():
"""Get modal split for randomly generated data."""
n = 200
day_1_h1 = pd.Timestamp("1970-01-01 00:00:00", tz="utc")
@@ -33,44 +34,39 @@ def get_test_triplegs_with_modes():
df["user_id"] = np.random.randint(1, 5, size=n)
df["started_at"] = np.random.randint(1, 30, size=n) * one_day
df["started_at"] = df["started_at"] + day_1_h1
-
return df
class TestPlot_modal_split:
- def test_create_plot_geolife(self, get_geolife_triplegs_with_modes):
+ def test_create_plot_geolife(self, geolife_triplegs_with_modes):
"""Check if we can run the plot function with geolife data without error"""
- modal_split = calculate_modal_split(get_geolife_triplegs_with_modes, freq="d", per_user=False)
+ modal_split = calculate_modal_split(geolife_triplegs_with_modes, freq="d", per_user=False)
plot_modal_split(modal_split)
- assert True
- def test_check_dtype_error(self, get_geolife_triplegs_with_modes):
+ def test_check_dtype_error(self, geolife_triplegs_with_modes):
"""Check if error is thrown correctly when index is not datetime
freq=None calculates the modal split over the whole period
"""
- modal_split = calculate_modal_split(get_geolife_triplegs_with_modes, freq=None, per_user=False)
+ modal_split = calculate_modal_split(geolife_triplegs_with_modes, freq=None, per_user=False)
with pytest.raises(ValueError):
plot_modal_split(modal_split)
- assert True
- def test_multi_user_error(self, get_test_triplegs_with_modes):
+ def test_multi_user_error(self, triplegs_with_modes):
"""Create a modal split plot based on randomly generated test data"""
- modal_split = calculate_modal_split(get_test_triplegs_with_modes, freq="d", per_user=True, norm=True)
+ modal_split = calculate_modal_split(triplegs_with_modes, freq="d", per_user=True, norm=True)
with pytest.raises(ValueError):
plot_modal_split(modal_split)
# make sure that there is no error if the data was correctly created
- modal_split = calculate_modal_split(get_test_triplegs_with_modes, freq="d", per_user=False, norm=True)
+ modal_split = calculate_modal_split(triplegs_with_modes, freq="d", per_user=False, norm=True)
plot_modal_split(modal_split)
- assert True
-
- def test_create_plot_testdata(self, get_test_triplegs_with_modes):
+ def test_create_plot_testdata(self, triplegs_with_modes):
"""Create a modal split plot based on randomly generated test data"""
tmp_file = os.path.join("tests", "data", "modal_split_plot.png")
- modal_split = calculate_modal_split(get_test_triplegs_with_modes, freq="d", per_user=False, norm=True)
+ modal_split = calculate_modal_split(triplegs_with_modes, freq="d", per_user=False, norm=True)
modal_split = modal_split[["walk", "bike", "train", "car", "bus"]] # change order for the looks of the plot
plot_modal_split(
@@ -80,3 +76,28 @@ class TestPlot_modal_split:
assert os.path.exists(tmp_file)
os.remove(tmp_file)
os.remove(tmp_file.replace("png", "pdf"))
+
+ def test_ax_arg(self, triplegs_with_modes):
+ """Test if ax is augmented if passed to function."""
+ _, axis = regular_figure()
+ modal_split = calculate_modal_split(triplegs_with_modes, freq="d", norm=True)
+ xlabel, ylabel, title = "xlabel", "ylabel", "title"
+ dateformat = "%d"
+ _, ax = plot_modal_split(
+ modal_split, date_fmt_x_axis=dateformat, x_label=xlabel, y_label=ylabel, title=title, axis=axis
+ )
+ assert axis is ax
+ assert ax.get_xlabel() == xlabel
+ assert ax.get_ylabel() == ylabel
+ assert ax.get_title() == title
+
+ def test_skip_xticks(self, triplegs_with_modes):
+ """Test if function set right ticks invisible."""
+ modal_split = calculate_modal_split(triplegs_with_modes, freq="d", norm=True)
+ mod = 4 # remove all but the mod 4 ticks
+ _, ax = regular_figure()
+ _, ax = plot_modal_split(modal_split)
+ assert all(t.get_visible() for _, t in enumerate(ax.xaxis.get_major_ticks()))
+ _, ax = regular_figure()
+ _, ax = plot_modal_split(modal_split, skip_xticks=mod)
+ assert all(t.get_visible() == (i % mod == 0) for i, t in enumerate(ax.xaxis.get_major_ticks()))
|
BUG: error in modal split plot when passing axis
The modal split plotting function throws an error if the `axis` argument is not None. The error occurs in [this line](https://github.com/mie-lab/trackintel/blob/ca4300e6056b8500a27907faca515478ca268524/trackintel/visualization/modal_split.py#L112)
because if an axis object is passed as an argument, the `fig` variable does not exist. A simple fix is probably to put that line into an if clause as well.
|
0.0
|
c585d0f49f78b2bf1f3a1cba41c979caa4ca9cb2
|
[
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_pre_filter_index",
"tests/visualization/test_modal_split.py::TestPlot_modal_split::test_ax_arg",
"tests/visualization/test_modal_split.py::TestPlot_modal_split::test_skip_xticks"
] |
[
"tests/analysis/test_location_identification.py::TestPre_Filter::test_no_kw",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_thresh_sp",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_thresh_loc",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_thresh_sp_at_loc",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_tresh_loc_time",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_loc_period",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_agg_level",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_agg_level_error",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_non_continous_index",
"tests/analysis/test_location_identification.py::TestFreq_method::test_default_labels",
"tests/analysis/test_location_identification.py::TestFreq_method::test_custom_labels",
"tests/analysis/test_location_identification.py::TestFreq_method::test_duration",
"tests/analysis/test_location_identification.py::Test_Freq_Transform::test_function",
"tests/analysis/test_location_identification.py::Test_Freq_Assign::test_function",
"tests/analysis/test_location_identification.py::Test_Freq_Assign::test_more_labels_than_entries",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_unkown_method",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_no_location_column",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_pre_filter",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_freq_method",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_osna_method",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_default",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_overlap",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_weekends",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_two_users",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_leisure_weighting",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_one_work_location",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_one_rest_location",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_one_leisure_location",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_prior_activity_label",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_multiple_users_with_only_one_location",
"tests/analysis/test_location_identification.py::Test_osna_label_timeframes::test_weekend",
"tests/analysis/test_location_identification.py::Test_osna_label_timeframes::test_weekday",
"tests/visualization/test_modal_split.py::TestPlot_modal_split::test_multi_user_error",
"tests/visualization/test_modal_split.py::TestPlot_modal_split::test_create_plot_testdata"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-05-22 19:30:53+00:00
|
mit
| 3,929 |
|
mie-lab__trackintel-446
|
diff --git a/trackintel/preprocessing/triplegs.py b/trackintel/preprocessing/triplegs.py
index d87343f..495d4a5 100644
--- a/trackintel/preprocessing/triplegs.py
+++ b/trackintel/preprocessing/triplegs.py
@@ -198,9 +198,10 @@ def generate_trips(staypoints, triplegs, gap_threshold=15, add_geometry=True):
# from tpls table, get the last point of the last tripleg on the trip
lambda x: Point(tpls.loc[x[-1], tpls.geometry.name].coords[-1])
)
- # convert to GeoDataFrame with MultiPoint column
+ # convert to GeoDataFrame with MultiPoint column and crs (not-None if possible)
trips["geom"] = [MultiPoint([x, y]) for x, y in zip(trips.origin_geom, trips.destination_geom)]
- trips = gpd.GeoDataFrame(trips, geometry="geom")
+ crs_trips = sp.crs if sp.crs else tpls.crs
+ trips = gpd.GeoDataFrame(trips, geometry="geom", crs=crs_trips)
# cleanup
trips.drop(["origin_geom", "destination_geom"], inplace=True, axis=1)
@@ -269,6 +270,14 @@ def _concat_staypoints_triplegs(staypoints, triplegs, add_geometry):
sp_tpls["is_activity"].fillna(False, inplace=True)
sp_tpls["sp_tpls_id"] = sp_tpls.index # store id for later reassignment
if add_geometry:
+ # Check if crs is set. Warn if None
+ if sp.crs is None:
+ warnings.warn("Staypoint crs is not set. Assuming same as for triplegs.")
+ if tpls.crs is None:
+ warnings.warn("Tripleg crs is not set. Assuming same as for staypoints.")
+ assert (
+ sp.crs == tpls.crs or sp.crs is None or tpls.crs is None
+ ), "CRS of staypoints and triplegs differ. Geometry cannot be joined safely."
sp_tpls["geom"] = pd.concat([sp.geometry, tpls.geometry])
sp_tpls.sort_values(by=["user_id", "started_at"], inplace=True)
|
mie-lab/trackintel
|
fe84a4f8b94a3105d0366e92377cb5e38c025b56
|
diff --git a/tests/preprocessing/test_staypoints.py b/tests/preprocessing/test_staypoints.py
index e0cdce0..5f50f4a 100644
--- a/tests/preprocessing/test_staypoints.py
+++ b/tests/preprocessing/test_staypoints.py
@@ -245,6 +245,12 @@ class TestGenerate_locations:
assert loc_dataset_num == 1
assert loc_user_num == 2
+ def test_crs(self, example_staypoints):
+ """Test whether the crs of the output locations is set correctly."""
+ sp = example_staypoints
+ sp, locs = sp.as_staypoints.generate_locations(method="dbscan", epsilon=20, num_samples=1)
+ assert locs.crs == sp.crs
+
def test_dbscan_min(self):
"""Test with small epsilon parameter."""
pfs_file = os.path.join("tests", "data", "positionfixes.csv")
diff --git a/tests/preprocessing/test_triplegs.py b/tests/preprocessing/test_triplegs.py
index 534b72b..341eda2 100644
--- a/tests/preprocessing/test_triplegs.py
+++ b/tests/preprocessing/test_triplegs.py
@@ -60,7 +60,7 @@ class TestGenerate_trips:
"""Test if we can generate the example trips based on example data."""
# load pregenerated trips
path = os.path.join("tests", "data", "geolife_long", "trips.csv")
- trips_loaded = ti.read_trips_csv(path, index_col="id", geom_col="geom", crs=None)
+ trips_loaded = ti.read_trips_csv(path, index_col="id", geom_col="geom", crs="EPSG:4326")
# create trips from geolife (based on positionfixes) - with gap_threshold 1e6
sp, tpls = example_triplegs_higher_gap_threshold
@@ -373,6 +373,35 @@ class TestGenerate_trips:
with pytest.raises(AttributeError, match=error_msg):
generate_trips(sp, tpls)
+ def test_crs(self, example_triplegs):
+ """Test that the resulting GeoDataFrame has the correct crs or a warning or error is thrown if not set"""
+ sp, tpls = example_triplegs
+ # Case 1: sp crs None --> throw warning and set to tpls crs
+ sp.crs = None
+ with pytest.warns(UserWarning):
+ _, _, trips = generate_trips(sp, tpls)
+ assert trips.crs == tpls.crs
+ # Case 2: Both crs None --> warn and set to None
+ tpls.crs = None
+ with pytest.warns(UserWarning):
+ _, _, trips = generate_trips(sp, tpls)
+ assert trips.crs is None
+ # Case 3: tpls crs is None --> throw warning and set to sp crs
+ sp.crs = "EPSG:4326"
+ with pytest.warns(UserWarning):
+ _, _, trips = generate_trips(sp, tpls)
+ assert trips.crs == "EPSG:4326"
+ # Case 4: Both crs set and correspond
+ tpls.crs = "EPSG:2056"
+ sp.crs = "EPSG:2056"
+ _, _, trips = generate_trips(sp, tpls)
+ assert trips.crs == "EPSG:2056"
+ # Case 5: Both crs set but differ --> throw error
+ sp.crs = "EPSG:4326"
+ error_msg = "CRS of staypoints and triplegs differ. Geometry cannot be joined safely."
+ with pytest.raises(AssertionError, match=error_msg):
+ generate_trips(sp, tpls)
+
def _create_debug_sp_tpls_data(sp, tpls, gap_threshold):
"""Preprocess sp and tpls for "test_generate_trips_*."""
|
BUG: crs not set for trips or locations
When I generate trips from staypoints and triplegs, the resulting Geodataframe has no crs defined, even though the crs was set for both staypoints and triplegs.
e.g.:
```
>> print(sp.crs, legs.crs)
EPSG:2056 EPSG:2056
>> sp, legs, trips = ti.preprocessing.triplegs.generate_trips(act_sp, legs, gap_threshold=15, add_geometry=True)
>> print(trips.crs)
None
```
The same happens for generating locations. This is not intended, is it? For further processing of the data, I'd expect that the crs is adopted.
|
0.0
|
fe84a4f8b94a3105d0366e92377cb5e38c025b56
|
[
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_crs"
] |
[
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_empty_generation",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_parallel_computing",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dbscan_hav_euc",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dbscan_haversine",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dbscan_loc",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dbscan_user_dataset",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_crs",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dbscan_min",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dbscan_max",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_missing_link",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_num_samples_high",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_num_samples_3",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dtype_consistent",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_index_start",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_print_progress_flag",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_index_stability",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_location_ids_and_noise",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_location_ids_and_noise_dataset",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_agg_level_error",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_method_error",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_triplegs",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_time",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_max_time_gap",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_time_gap_error",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_agg",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_error",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_duplicate_columns",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_trip_wo_geom",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_trip_coordinates",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_accessor",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_accessor_arguments",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_missing_link",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_dtype_consistent",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_compare_to_old_trip_function",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_index_start",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_gap_detection",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_id_management",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_only_staypoints_in_trip",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_loop_linestring_case",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_keeping_all_columns_sp_tpls",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_missing_is_activity_column"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-08-10 11:35:44+00:00
|
mit
| 3,930 |
|
mie-lab__trackintel-453
|
diff --git a/trackintel/preprocessing/positionfixes.py b/trackintel/preprocessing/positionfixes.py
index 9adcb64..4a1bdae 100644
--- a/trackintel/preprocessing/positionfixes.py
+++ b/trackintel/preprocessing/positionfixes.py
@@ -4,10 +4,10 @@ import warnings
import geopandas as gpd
import numpy as np
import pandas as pd
-from shapely.geometry import LineString, Point
+from shapely.geometry import LineString
-from trackintel.geogr.distances import haversine_dist
-from trackintel.preprocessing.util import applyParallel, _explode_agg
+from trackintel.geogr.distances import check_gdf_planar, haversine_dist
+from trackintel.preprocessing.util import _explode_agg, angle_centroid_multipoints, applyParallel
def generate_staypoints(
@@ -411,7 +411,7 @@ def _generate_staypoints_sliding_user(
y = df[geo_col].y.to_numpy()
ret_sp = []
- start = 0
+ curr = start = 0
for curr in range(1, len(df)):
# the gap of two consecutive positionfixes should not be too long
@@ -451,8 +451,12 @@ def __create_new_staypoints(start, end, pfs, elevation_flag, geo_col, last_flag=
# if end is the last pfs, we want to include the info from it as well
if last_flag:
end = len(pfs)
+ points = pfs[geo_col].iloc[start:end].unary_union
+ if check_gdf_planar(pfs):
+ new_sp[geo_col] = points.centroid
+ else:
+ new_sp[geo_col] = angle_centroid_multipoints(points)[0]
- new_sp[geo_col] = Point(pfs[geo_col].iloc[start:end].x.median(), pfs[geo_col].iloc[start:end].y.median())
if elevation_flag:
new_sp["elevation"] = pfs["elevation"].iloc[start:end].median()
new_sp["pfs_id"] = pfs.index[start:end].to_list()
diff --git a/trackintel/preprocessing/staypoints.py b/trackintel/preprocessing/staypoints.py
index 36b0bbd..9b62793 100644
--- a/trackintel/preprocessing/staypoints.py
+++ b/trackintel/preprocessing/staypoints.py
@@ -4,8 +4,8 @@ import pandas as pd
from sklearn.cluster import DBSCAN
import warnings
-from trackintel.geogr.distances import meters_to_decimal_degrees
-from trackintel.preprocessing.util import applyParallel
+from trackintel.geogr.distances import meters_to_decimal_degrees, check_gdf_planar
+from trackintel.preprocessing.util import applyParallel, angle_centroid_multipoints
def generate_locations(
@@ -136,9 +136,11 @@ def generate_locations(
# filter staypoints not belonging to locations
locs = locs.loc[locs["location_id"] != -1]
- with warnings.catch_warnings(): # TODO: fix bug for geographic crs #437
- warnings.simplefilter("ignore", category=UserWarning)
- locs["center"] = locs.geometry.centroid # creates warning for geographic crs
+ if check_gdf_planar(locs):
+ locs["center"] = locs.geometry.centroid
+ else:
+ # error of wrapping e.g. mean([-180, +180]) -> own function needed
+ locs["center"] = angle_centroid_multipoints(locs.geometry)
# extent is the convex hull of the geometry
locs["extent"] = locs.geometry.convex_hull
@@ -154,7 +156,7 @@ def generate_locations(
else:
locs.loc[pointLine_idx, "extent"] = locs.loc[pointLine_idx, "extent"].buffer(epsilon)
- locs = locs.set_geometry("center")
+ locs = locs.set_geometry("center", crs=sp.crs)
locs = locs[["user_id", "location_id", "center", "extent"]]
# index management
diff --git a/trackintel/preprocessing/util.py b/trackintel/preprocessing/util.py
index ab74ea9..a1f9c31 100644
--- a/trackintel/preprocessing/util.py
+++ b/trackintel/preprocessing/util.py
@@ -1,6 +1,11 @@
from datetime import timedelta
+
+import geopandas as gpd
+import numpy as np
import pandas as pd
+import pygeos
from joblib import Parallel, delayed
+from shapely.geometry.base import BaseGeometry
from tqdm import tqdm
@@ -103,3 +108,33 @@ def _explode_agg(column, agg, orig_df, agg_df):
temp = temp[temp[column].notna()]
temp.index = temp[column]
return orig_df.join(temp[agg], how="left")
+
+
+def angle_centroid_multipoints(geometry):
+ """Calculate the mean of angles of MultiPoints
+
+ Parameters
+ ----------
+ geometry : GeoSeries, shapely.geometry.Point, shapely.geometry.MultiPoint
+ Should contain only Points or MultiPoints any other lead to wrong results.
+
+ Returns
+ -------
+ geopandas.GeometryArray
+ Centroid of geometries (shapely.Point)
+ """
+ g = pygeos.from_shapely(geometry)
+ g, index = pygeos.get_coordinates(g, return_index=True)
+ # number of coordinate pairs per MultiPoint
+ count = np.bincount(index)
+ x, y = g[:, 0], g[:, 1]
+ # calculate mean of y Coordinates -> no wrapping
+ y = np.bincount(index, weights=y) / count
+ # calculate mean of x Coordinates with wrapping
+ x_rad = np.deg2rad(x)
+ x_sin = np.bincount(index, weights=np.sin(x_rad)) / count
+ x_cos = np.bincount(index, weights=np.cos(x_rad)) / count
+ x = np.rad2deg(np.arctan2(x_sin, x_cos))
+ # shapely Geometry has no crs information
+ crs = None if isinstance(geometry, BaseGeometry) else geometry.crs
+ return gpd.points_from_xy(x, y, crs=crs)
|
mie-lab/trackintel
|
d3ade20819306daa1b8ebd8cbcf508bbaab60dfc
|
diff --git a/tests/data/geolife_long/trips.csv b/tests/data/geolife_long/trips.csv
index e910b17..88202da 100644
--- a/tests/data/geolife_long/trips.csv
+++ b/tests/data/geolife_long/trips.csv
@@ -1,15 +1,15 @@
id,user_id,started_at,finished_at,origin_staypoint_id,destination_staypoint_id,geom
-0,0,2008-10-23 02:53:04+00:00,2008-10-23 03:05:00+00:00,,0.0,"MULTIPOINT (116.3184169999999966 39.9847019999999986, 116.2987250000000046 39.9840190000000035)"
-1,0,2008-10-23 04:08:07+00:00,2008-10-23 04:34:37+00:00,0.0,1.0,"MULTIPOINT (116.2987250000000046 39.9840190000000035, 116.3248009999999937 39.9997290000000021)"
-2,0,2008-10-23 09:42:25+00:00,2008-10-23 10:45:41+00:00,1.0,5.0,"MULTIPOINT (116.3248009999999937 39.9997290000000021, 116.3216824999999943 40.0089049999999986)"
-3,0,2008-10-23 11:09:22+00:00,2008-10-23 11:10:57+00:00,5.0,6.0,"MULTIPOINT (116.3216824999999943 40.0089049999999986, 116.3209779999999967 40.0092690000000033)"
-4,0,2008-10-24 02:09:59+00:00,2008-10-24 02:47:06+00:00,6.0,,"MULTIPOINT (116.3209779999999967 40.0092690000000033, 116.3211620000000011 40.0092089999999985)"
-5,1,2008-10-23 05:53:05+00:00,2008-10-23 06:01:42+00:00,,10.0,"MULTIPOINT (116.3192360000000036 39.9840939999999989, 116.3275380000000041 39.9780510000000007)"
-6,1,2008-10-23 10:32:53+00:00,2008-10-23 11:10:19+00:00,10.0,11.0,"MULTIPOINT (116.3275380000000041 39.9780510000000007, 116.3064135000000050 40.0138019999999983)"
-7,1,2008-10-23 11:49:08+00:00,2008-10-23 11:49:48+00:00,11.0,12.0,"MULTIPOINT (116.3064135000000050 40.0138019999999983, 116.3065320000000042 40.0138200000000026)"
-8,1,2008-10-23 23:43:46+00:00,2008-10-24 00:23:03+00:00,12.0,14.0,"MULTIPOINT (116.3065320000000042 40.0138200000000026, 116.3266370000000052 39.9779920000000004)"
-9,1,2008-10-24 01:45:41+00:00,2008-10-24 02:02:46+00:00,14.0,15.0,"MULTIPOINT (116.3266370000000052 39.9779920000000004, 116.3084029999999984 39.9810390000000027)"
-10,1,2008-10-24 02:28:19+00:00,2008-10-24 02:31:32+00:00,15.0,16.0,"MULTIPOINT (116.3084029999999984 39.9810390000000027, 116.3101889999999941 39.9813740000000024)"
-11,1,2008-10-24 03:16:35+00:00,2008-10-24 04:12:50+00:00,16.0,20.0,"MULTIPOINT (116.3101889999999941 39.9813740000000024, 116.3267620000000022 39.9779330000000002)"
-12,1,2008-10-24 05:28:05+00:00,2008-10-24 05:39:50+00:00,20.0,21.0,"MULTIPOINT (116.3267620000000022 39.9779330000000002, 116.3112610000000018 39.9827119999999994)"
-13,1,2008-10-24 06:08:42+00:00,2008-10-24 06:35:50+00:00,21.0,,"MULTIPOINT (116.3112610000000018 39.9827119999999994, 116.3270629999999954 39.9778990000000007)"
+0,0,2008-10-23 02:53:04+00:00,2008-10-23 03:05:00+00:00,,0.0,"MULTIPOINT (116.318417 39.984702, 116.29872033333335 39.98398866666667)"
+1,0,2008-10-23 04:08:07+00:00,2008-10-23 04:34:37+00:00,0.0,1.0,"MULTIPOINT (116.29872033333335 39.98398866666667, 116.324803 39.99972133333333)"
+2,0,2008-10-23 09:42:25+00:00,2008-10-23 10:45:41+00:00,1.0,5.0,"MULTIPOINT (116.324803 39.99972133333333, 116.321629 40.00890557142857)"
+3,0,2008-10-23 11:09:22+00:00,2008-10-23 11:10:57+00:00,5.0,6.0,"MULTIPOINT (116.321629 40.00890557142857, 116.32097166666667 40.00928)"
+4,0,2008-10-24 02:09:59+00:00,2008-10-24 02:47:06+00:00,6.0,,"MULTIPOINT (116.32097166666667 40.00928, 116.321162 40.009209)"
+5,1,2008-10-23 05:53:05+00:00,2008-10-23 06:01:42+00:00,,10.0,"MULTIPOINT (116.319236 39.984094, 116.32752433333334 39.978049666666664)"
+6,1,2008-10-23 10:32:53+00:00,2008-10-23 11:10:19+00:00,10.0,11.0,"MULTIPOINT (116.32752433333334 39.978049666666664, 116.30641350000002 40.013802)"
+7,1,2008-10-23 11:49:08+00:00,2008-10-23 11:49:48+00:00,11.0,12.0,"MULTIPOINT (116.30641350000002 40.013802, 116.30653043609027 40.01383060902254)"
+8,1,2008-10-23 23:43:46+00:00,2008-10-24 00:23:03+00:00,12.0,14.0,"MULTIPOINT (116.30653043609027 40.01383060902254, 116.32663666666667 39.977995)"
+9,1,2008-10-24 01:45:41+00:00,2008-10-24 02:02:46+00:00,14.0,15.0,"MULTIPOINT (116.32663666666667 39.977995, 116.3084012 39.981039200000005)"
+10,1,2008-10-24 02:28:19+00:00,2008-10-24 02:31:32+00:00,15.0,16.0,"MULTIPOINT (116.3084012 39.981039200000005, 116.31014576923077 39.98138838461539)"
+11,1,2008-10-24 03:16:35+00:00,2008-10-24 04:12:50+00:00,16.0,20.0,"MULTIPOINT (116.31014576923077 39.98138838461539, 116.32675288888888 39.97793)"
+12,1,2008-10-24 05:28:05+00:00,2008-10-24 05:39:50+00:00,20.0,21.0,"MULTIPOINT (116.32675288888888 39.97793, 116.31125442857143 39.982705714285714)"
+13,1,2008-10-24 06:08:42+00:00,2008-10-24 06:35:50+00:00,21.0,,"MULTIPOINT (116.31125442857143 39.982705714285714, 116.327063 39.977899)"
diff --git a/tests/preprocessing/test_positionfixes.py b/tests/preprocessing/test_positionfixes.py
index d63b1d8..ed5ed81 100644
--- a/tests/preprocessing/test_positionfixes.py
+++ b/tests/preprocessing/test_positionfixes.py
@@ -305,6 +305,24 @@ class Test_Generate_staypoints_sliding_user:
)
+class Test__create_new_staypoints:
+ """Test __create_new_staypoints."""
+
+ def test_planar_crs(self, geolife_pfs_sp_long):
+ """Test if planar crs are handled as well"""
+ pfs, _ = geolife_pfs_sp_long
+ _, sp_wgs84 = pfs.as_positionfixes.generate_staypoints(
+ method="sliding", dist_threshold=100, time_threshold=5.0, include_last=True
+ )
+ pfs = pfs.set_crs(2056, allow_override=True)
+ _, sp_lv95 = pfs.as_positionfixes.generate_staypoints(
+ method="sliding", dist_threshold=100, time_threshold=5.0, include_last=True
+ )
+ sp_lv95.set_crs(4326, allow_override=True, inplace=True)
+ # planar and non-planar differ only if we experience a wrap in coords like [+180, -180]
+ assert_geodataframe_equal(sp_wgs84, sp_lv95, check_less_precise=True)
+
+
class TestGenerate_triplegs:
"""Tests for generate_triplegs() method."""
diff --git a/tests/preprocessing/test_staypoints.py b/tests/preprocessing/test_staypoints.py
index 5f50f4a..0b7e86b 100644
--- a/tests/preprocessing/test_staypoints.py
+++ b/tests/preprocessing/test_staypoints.py
@@ -7,7 +7,7 @@ import pandas as pd
import pytest
from shapely.geometry import Point
from sklearn.cluster import DBSCAN
-from geopandas.testing import assert_geodataframe_equal
+from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal
import trackintel as ti
from trackintel.geogr.distances import calculate_distance_matrix
@@ -219,8 +219,7 @@ class TestGenerate_locations:
other_locs = gpd.GeoDataFrame(other_locs, columns=["user_id", "id", "center"], geometry="center", crs=sp.crs)
other_locs.set_index("id", inplace=True)
- assert all(other_locs["center"] == locs["center"])
- assert all(other_locs.index == locs.index)
+ assert_geoseries_equal(other_locs["center"], locs["center"], check_less_precise=True)
def test_dbscan_user_dataset(self):
"""Test user and dataset location generation."""
diff --git a/tests/preprocessing/test_triplegs.py b/tests/preprocessing/test_triplegs.py
index 341eda2..1fe7cc0 100644
--- a/tests/preprocessing/test_triplegs.py
+++ b/tests/preprocessing/test_triplegs.py
@@ -71,7 +71,7 @@ class TestGenerate_trips:
["user_id", "started_at", "finished_at", "origin_staypoint_id", "destination_staypoint_id", "geom"]
]
# test if generated trips are equal
- assert_geodataframe_equal(trips_loaded, trips)
+ assert_geodataframe_equal(trips_loaded, trips, check_less_precise=True)
def test_trip_wo_geom(self, example_triplegs_higher_gap_threshold):
"""Test if the add_geometry parameter shows correct behavior"""
diff --git a/tests/preprocessing/test_util.py b/tests/preprocessing/test_util.py
index cfca5aa..426322d 100644
--- a/tests/preprocessing/test_util.py
+++ b/tests/preprocessing/test_util.py
@@ -1,10 +1,13 @@
import datetime
+import geopandas as gpd
+from geopandas.testing import assert_geoseries_equal
import pandas as pd
-from pandas.testing import assert_frame_equal
import pytest
+from pandas.testing import assert_frame_equal
+from shapely.geometry import MultiPoint, Point
-from trackintel.preprocessing.util import calc_temp_overlap, _explode_agg
+from trackintel.preprocessing.util import _explode_agg, calc_temp_overlap, angle_centroid_multipoints
@pytest.fixture
@@ -88,3 +91,18 @@ class TestExplodeAgg:
returned_df = _explode_agg("id", "c", orig_df, agg_df)
solution_df = pd.DataFrame(orig)
assert_frame_equal(returned_df, solution_df)
+
+
+class TestAngleCentroidMultipoints:
+ """Test util method angle_centroid_multipoints"""
+
+ # test adapted from https://rosettacode.org/wiki/Averages/Mean_angle
+ a = Point((130, 45))
+ b = MultiPoint([(160, 10), (-170, 20)])
+ c = MultiPoint([(20, 0), (30, 10), (40, 20)])
+ d = MultiPoint([(350, 0), (10, 0)])
+ e = MultiPoint([(90, 0), (180, 0), (270, 0), (360, 0)])
+ g = gpd.GeoSeries([a, b, c, d, e])
+ g_solution = gpd.GeoSeries([a, Point([175, 15]), Point([30, 10]), Point(0, 0), Point(-90, 0)])
+ g = gpd.GeoSeries(angle_centroid_multipoints(g))
+ assert_geoseries_equal(g, g_solution, check_less_precise=True)
|
BUG: `.centroid` does not wrap for angles
In #431 we discovered an error in calculating the mean angle for a location.
We calculate the average coordinates with `.centroid`. This function assumes that the coordinate system is projected and therefor does not wrap around in a circle.
For example, if we compute the centroid of the multipoint `[(0, 179), (0, -179)]`, we get the point `(0, 0)` instead of the desired `(0, 180)`.
This is equivalent to this problem (https://rosettacode.org/wiki/Averages/Mean_angle), for a geographical explanation see (https://carto.com/blog/center-of-points/).
A suggested solution would be to implement a new function in `util.py`. You can also take a look at https://github.com/mie-lab/trackintel/blob/4f4868b6547bfceb05447e221bb681f8cd389fd7/trackintel/preprocessing/positionfixes.py#L455
That suffers from the same bug.
|
0.0
|
d3ade20819306daa1b8ebd8cbcf508bbaab60dfc
|
[
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_empty_generation",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_parallel_computing",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_pfs_warning",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_pfs_filtering",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_columns",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_sliding_min",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_sliding_max",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_missing_link",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_dtype_consistent",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_index_start",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_include_last",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_print_progress",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_temporal",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_gap_threshold",
"tests/preprocessing/test_positionfixes.py::Test_Generate_staypoints_sliding_user::test_unknown_distance_metric",
"tests/preprocessing/test_positionfixes.py::Test__create_new_staypoints::test_planar_crs",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_empty_generation",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_noncontinuous_unordered_index",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_invalid_isolates",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_duplicate_columns",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_user_without_sp",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_pfs_without_sp",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_stability",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_random_order",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_pfs_index",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_dtype_consistent",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_missing_link",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_index_start",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_invalid_inputs",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_temporal",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_sp_tpls_overlap",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_empty_generation",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_parallel_computing",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dbscan_hav_euc",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dbscan_haversine",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dbscan_loc",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dbscan_user_dataset",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_crs",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dbscan_min",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dbscan_max",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_missing_link",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_num_samples_high",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_num_samples_3",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_dtype_consistent",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_index_start",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_print_progress_flag",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_index_stability",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_location_ids_and_noise",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_location_ids_and_noise_dataset",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_agg_level_error",
"tests/preprocessing/test_staypoints.py::TestGenerate_locations::test_method_error",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_triplegs",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_time",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_max_time_gap",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_time_gap_error",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_agg",
"tests/preprocessing/test_staypoints.py::TestMergeStaypoints::test_merge_staypoints_error",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_duplicate_columns",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_trip_wo_geom",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_trip_coordinates",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_accessor",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_accessor_arguments",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_missing_link",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_dtype_consistent",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_compare_to_old_trip_function",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_index_start",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_gap_detection",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_generate_trips_id_management",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_only_staypoints_in_trip",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_loop_linestring_case",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_keeping_all_columns_sp_tpls",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_missing_is_activity_column",
"tests/preprocessing/test_triplegs.py::TestGenerate_trips::test_crs",
"tests/preprocessing/test_util.py::TestCalc_temp_overlap::test_same_interval",
"tests/preprocessing/test_util.py::TestCalc_temp_overlap::test_1_in_2",
"tests/preprocessing/test_util.py::TestCalc_temp_overlap::test_2_in_1",
"tests/preprocessing/test_util.py::TestCalc_temp_overlap::test_no_overlap",
"tests/preprocessing/test_util.py::TestCalc_temp_overlap::test_no_duration",
"tests/preprocessing/test_util.py::TestExplodeAgg::test_empty_agg"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-05 16:01:34+00:00
|
mit
| 3,931 |
|
mie-lab__trackintel-492
|
diff --git a/environment-dev.yml b/environment-dev.yml
index c1dfc62..0353105 100644
--- a/environment-dev.yml
+++ b/environment-dev.yml
@@ -6,7 +6,7 @@ dependencies:
- python
- numpy
- matplotlib
-- geopandas>=0.10.0
+- geopandas>=0.12.0
- scikit-learn
- networkx
- pint
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 10b132e..5e81a7f 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,6 +1,6 @@
numpy
matplotlib
-geopandas>=0.10.0
+geopandas>=0.12.0
scikit-learn
networkx
pint
diff --git a/trackintel/io/file.py b/trackintel/io/file.py
index 4d2263d..9e9743a 100644
--- a/trackintel/io/file.py
+++ b/trackintel/io/file.py
@@ -1,3 +1,4 @@
+import ast
import warnings
from functools import wraps
from inspect import signature
@@ -555,6 +556,7 @@ def read_tours_csv(*args, columns=None, index_col=None, tz=None, **kwargs):
>>> trackintel.read_tours_csv('data.csv', columns={'uuid':'user_id'})
"""
columns = {} if columns is None else columns
+ kwargs.setdefault("converters", {}).setdefault("trips", ast.literal_eval)
tours = pd.read_csv(*args, index_col=index_col, **kwargs)
tours.rename(columns=columns, inplace=True)
diff --git a/trackintel/io/postgis.py b/trackintel/io/postgis.py
index 97cb91b..93d98f4 100644
--- a/trackintel/io/postgis.py
+++ b/trackintel/io/postgis.py
@@ -7,6 +7,7 @@ from shapely import wkb
import pandas as pd
from geoalchemy2 import Geometry
from sqlalchemy import create_engine
+from sqlalchemy.types import JSON
import trackintel as ti
@@ -50,7 +51,7 @@ def read_positionfixes_postgis(
parse_dates=None,
params=None,
chunksize=None,
- **kwargs
+ read_gpd_kws=None,
):
"""Reads positionfixes from a PostGIS database.
@@ -92,7 +93,7 @@ def read_positionfixes_postgis(
If specified, return an iterator where chunksize is the number
of rows to include in each chunk.
- **kwargs
+ read_gpd_kws : dict, default None
Further keyword arguments as available in trackintels trackintel.io.read_positionfixes_gpd().
Especially useful to rename column names from the SQL table to trackintel conform column names.
See second example how to use it in code.
@@ -106,7 +107,8 @@ def read_positionfixes_postgis(
--------
>>> pfs = ti.io.read_positionfixes_postgis("SELECT * FROM positionfixes", con, geom_col="geom")
>>> pfs = ti.io.read_positionfixes_postgis("SELECT * FROM positionfixes", con, geom_col="geom",
- ... index_col="id", user_id="USER", tracked_at="time")
+ ... index_col="id",
+ read_gpd_kws={"user_id"="USER", "tracked_at": "time"})
"""
pfs = gpd.GeoDataFrame.from_postgis(
sql,
@@ -119,7 +121,7 @@ def read_positionfixes_postgis(
params=params,
chunksize=chunksize,
)
- return ti.io.read_positionfixes_gpd(pfs, **kwargs)
+ return ti.io.read_positionfixes_gpd(pfs, **(read_gpd_kws or {}))
@_handle_con_string
@@ -149,7 +151,7 @@ def read_triplegs_postgis(
parse_dates=None,
params=None,
chunksize=None,
- **kwargs
+ read_gpd_kws=None,
):
"""Reads triplegs from a PostGIS database.
@@ -191,7 +193,7 @@ def read_triplegs_postgis(
If specified, return an iterator where chunksize is the number
of rows to include in each chunk.
- **kwargs
+ read_gpd_kws : dict, default None
Further keyword arguments as available in trackintels trackintel.io.read_triplegs_gpd().
Especially useful to rename column names from the SQL table to trackintel conform column names.
See second example how to use it in code.
@@ -205,7 +207,7 @@ def read_triplegs_postgis(
--------
>>> tpls = ti.io.read_triplegs_postgis("SELECT * FROM triplegs", con, geom_col="geom")
>>> tpls = ti.io.read_triplegs_postgis("SELECT * FROM triplegs", con, geom_col="geom", index_col="id",
- ... started_at="start_time", finished_at="end_time", user_id="USER")
+ ... read_gpd_kws={"user_id": "USER"})
"""
tpls = gpd.GeoDataFrame.from_postgis(
sql,
@@ -218,7 +220,7 @@ def read_triplegs_postgis(
params=params,
chunksize=chunksize,
)
- return ti.io.read_triplegs_gpd(tpls, **kwargs)
+ return ti.io.read_triplegs_gpd(tpls, **(read_gpd_kws or {}))
@_handle_con_string
@@ -248,7 +250,7 @@ def read_staypoints_postgis(
parse_dates=None,
params=None,
chunksize=None,
- **kwargs
+ read_gpd_kws=None,
):
"""Read staypoints from a PostGIS database.
@@ -290,7 +292,7 @@ def read_staypoints_postgis(
If specified, return an iterator where chunksize is the number
of rows to include in each chunk.
- **kwargs
+ read_gpd_kws : dict, default None
Further keyword arguments as available in trackintels trackintel.io.read_staypoints_gpd().
Especially useful to rename column names from the SQL table to trackintel conform column names.
See second example how to use it in code.
@@ -305,7 +307,7 @@ def read_staypoints_postgis(
--------
>>> sp = ti.io.read_staypoints_postgis("SELECT * FROM staypoints", con, geom_col="geom")
>>> sp = ti.io.read_staypoints_postgis("SELECT * FROM staypoints", con, geom_col="geom", index_col="id",
- ... started_at="start_time", finished_at="end_time", user_id="USER")
+ ... read_gpd_kws={"user_id": "USER"})
"""
sp = gpd.GeoDataFrame.from_postgis(
sql,
@@ -319,7 +321,7 @@ def read_staypoints_postgis(
chunksize=chunksize,
)
- return ti.io.read_staypoints_gpd(sp, **kwargs)
+ return ti.io.read_staypoints_gpd(sp, **(read_gpd_kws or {}))
@_handle_con_string
@@ -349,7 +351,8 @@ def read_locations_postgis(
parse_dates=None,
params=None,
chunksize=None,
- **kwargs
+ extent=None,
+ read_gpd_kws=None,
):
"""Reads locations from a PostGIS database.
@@ -391,7 +394,10 @@ def read_locations_postgis(
If specified, return an iterator where chunksize is the number
of rows to include in each chunk.
- **kwargs
+ extent : string, default None
+ If specified read the extent column as geometry column.
+
+ read_gpd_kws : dict, default None
Further keyword arguments as available in trackintels trackintel.io.read_locations_gpd().
Especially useful to rename column names from the SQL table to trackintel conform column names.
See second example how to use it in code.
@@ -405,7 +411,7 @@ def read_locations_postgis(
--------
>>> locs = ti.io.read_locations_postgis("SELECT * FROM locations", con, center="center")
>>> locs = ti.io.read_locations_postgis("SELECT * FROM locations", con, center="geom", index_col="id",
- ... user_id="USER", extent="extent")
+ ... extent="extent, read_gpd_kws={"user_id": "USER"})
)
"""
locs = gpd.GeoDataFrame.from_postgis(
@@ -419,10 +425,10 @@ def read_locations_postgis(
params=params,
chunksize=chunksize,
)
- if "extent" in kwargs:
- locs[kwargs["extent"]] = gpd.GeoSeries.from_wkb(locs[kwargs["extent"]])
+ if extent is not None:
+ locs[extent] = gpd.GeoSeries.from_wkb(locs[extent])
- return ti.io.read_locations_gpd(locs, center=center, **kwargs)
+ return ti.io.read_locations_gpd(locs, center=center, **(read_gpd_kws or {}))
@_handle_con_string
@@ -466,7 +472,7 @@ def read_trips_postgis(
parse_dates=None,
params=None,
chunksize=None,
- **kwargs
+ read_gpd_kws=None,
):
"""Read trips from a PostGIS database.
@@ -508,7 +514,7 @@ def read_trips_postgis(
If specified, return an iterator where chunksize is the number
of rows to include in each chunk.
- **kwargs
+ read_gpd_kws : dict, default None
Further keyword arguments as available in trackintels trackintel.io.read_trips_gpd().
Especially useful to rename column names from the SQL table to trackintel conform column names.
See second example how to use it in code.
@@ -523,8 +529,8 @@ def read_trips_postgis(
--------
>>> trips = ti.io.read_trips_postgis("SELECT * FROM trips", con)
>>> trips = ti.io.read_trips_postgis("SELECT * FROM trips", con, geom_col="geom", index_col="id",
- ... started_at="start_time", finished_at="end_time", user_id="USER",
- ... origin_staypoint_id="ORIGIN", destination_staypoint_id="DEST")
+ ... read_gpd_kws={"user_id": "USER", "origin_staypoint_id": "ORIGIN",
+ "destination_staypoint_id": "DEST"})
"""
if geom_col is None:
@@ -550,13 +556,16 @@ def read_trips_postgis(
chunksize=chunksize,
)
- return ti.io.read_trips_gpd(trips, **kwargs)
+ return ti.io.read_trips_gpd(trips, **(read_gpd_kws or {}))
@_handle_con_string
def write_trips_postgis(
trips, name, con, schema=None, if_exists="fail", index=True, index_label=None, chunksize=None, dtype=None
):
+ if "trips" in trips.columns:
+ dtype = dtype or {}
+ dtype.setdefault("trips", JSON)
if isinstance(trips, gpd.GeoDataFrame):
trips.to_postgis(
name,
@@ -592,7 +601,7 @@ def read_tours_postgis(
parse_dates=None,
params=None,
chunksize=None,
- **kwargs
+ read_gpd_kws=None,
):
"""Read tours from a PostGIS database.
@@ -634,7 +643,7 @@ def read_tours_postgis(
If specified, return an iterator where chunksize is the number
of rows to include in each chunk.
- **kwargs
+ read_gpd_kws : dict, default None
Further keyword arguments as available in trackintels trackintel.io.read_tours_gpd().
Especially useful to rename column names from the SQL table to trackintel conform column names.
See second example how to use it in code.
@@ -647,8 +656,8 @@ def read_tours_postgis(
Examples
--------
>>> tours = ti.io.read_tours_postgis("SELECT * FROM tours", con)
- >>> tours = ti.io.read_tours_postgis("SELECT * FROM tours", con, index_col="id", started_at="start_time",
- ... finished_at="end_time", user_id="USER")
+ >>> tours = ti.io.read_tours_postgis("SELECT * FROM tours", con, index_col="id",
+ read_gpd_kws={"user_id": "USER"})
"""
if geom_col is None:
tours = pd.read_sql(
@@ -673,7 +682,7 @@ def read_tours_postgis(
chunksize=chunksize,
)
- return ti.io.read_tours_gpd(tours, **kwargs)
+ return ti.io.read_tours_gpd(tours, **(read_gpd_kws or {}))
@_handle_con_string
|
mie-lab/trackintel
|
eb7d6ce6986e30a29cd9acdc34c5c03b75bec082
|
diff --git a/tests/io/test_file.py b/tests/io/test_file.py
index fc78e63..01a50f0 100644
--- a/tests/io/test_file.py
+++ b/tests/io/test_file.py
@@ -2,6 +2,7 @@ import filecmp
import os
import pytest
import pandas as pd
+from pandas.testing import assert_frame_equal
import trackintel as ti
@@ -312,6 +313,25 @@ class TestTrips:
assert gdf.index.name is None
[email protected]
+def example_tours():
+ """Tours to load into the database."""
+ t1 = pd.Timestamp("1971-01-01 00:00:00", tz="utc")
+ t2 = pd.Timestamp("1971-01-01 05:00:00", tz="utc")
+ t3 = pd.Timestamp("1971-01-02 07:00:00", tz="utc")
+ h = pd.Timedelta(hours=1)
+
+ list_dict = [
+ {"user_id": 0, "started_at": t1, "finished_at": t1 + h, "trips": [0, 1, 2]},
+ {"user_id": 0, "started_at": t2, "finished_at": t2 + h, "trips": [2, 3, 4]},
+ {"user_id": 1, "started_at": t3, "finished_at": t3 + h, "trips": [4, 5, 6]},
+ ]
+ tours = pd.DataFrame(data=list_dict)
+ tours.index.name = "id"
+ tours.as_tours
+ return tours
+
+
class TestTours:
"""Test for 'read_tours_csv' and 'write_tours_csv' functions."""
@@ -325,6 +345,14 @@ class TestTours:
assert filecmp.cmp(orig_file, tmp_file, shallow=False)
os.remove(tmp_file)
+ def test_to_from_csv(self, example_tours):
+ """Test writing then reading functionality."""
+ tmp_file = os.path.join("tests", "data", "tours_test.csv")
+ example_tours.as_tours.to_csv(tmp_file)
+ read_tours = ti.read_tours_csv(tmp_file, index_col="id")
+ os.remove(tmp_file)
+ assert_frame_equal(example_tours, read_tours)
+
def test_to_csv_accessor(self):
"""Test basic reading and writing functions."""
orig_file = os.path.join("tests", "data", "geolife_long", "tours.csv")
@@ -334,7 +362,3 @@ class TestTours:
tours.as_tours.to_csv(tmp_file)
assert filecmp.cmp(orig_file, tmp_file, shallow=False)
os.remove(tmp_file)
-
- def test_from_to_postgis(self):
- # TODO Implement some tests for reading and writing tours.
- pass
diff --git a/tests/io/test_postgis.py b/tests/io/test_postgis.py
index 021b2b3..74c59cb 100644
--- a/tests/io/test_postgis.py
+++ b/tests/io/test_postgis.py
@@ -312,7 +312,7 @@ class TestPositionfixes:
try:
pfs.as_positionfixes.to_postgis(table, conn_string)
with pytest.warns(UserWarning):
- pfs_db = ti.io.read_positionfixes_postgis(sql, conn, geom_col, index_col="id", **rename_dict)
+ pfs_db = ti.io.read_positionfixes_postgis(sql, conn, geom_col, index_col="id", read_gpd_kws=rename_dict)
assert_geodataframe_equal(example_positionfixes, pfs_db)
finally:
del_table(conn, table)
@@ -406,7 +406,7 @@ class TestTriplegs:
try:
tpls.as_triplegs.to_postgis(table, conn_string)
with pytest.warns(UserWarning):
- tpls_db = ti.io.read_triplegs_postgis(sql, conn, geom_col, index_col="id", **rename_dict)
+ tpls_db = ti.io.read_triplegs_postgis(sql, conn, geom_col, index_col="id", read_gpd_kws=rename_dict)
assert_geodataframe_equal(example_triplegs, tpls_db)
finally:
del_table(conn, table)
@@ -571,7 +571,7 @@ class TestLocations:
try:
locs.as_locations.to_postgis(table, conn_string)
with pytest.warns(UserWarning):
- tpls_db = ti.io.read_locations_postgis(sql, conn, geom_col, index_col="id", **rename_dict)
+ tpls_db = ti.io.read_locations_postgis(sql, conn, geom_col, index_col="id", read_gpd_kws=rename_dict)
assert_geodataframe_equal(example_locations, tpls_db)
finally:
del_table(conn, table)
@@ -624,7 +624,7 @@ class TestTrips:
try:
trips.as_trips.to_postgis(table, create_engine(conn_string))
with pytest.warns(UserWarning):
- tpls_db = ti.io.read_trips_postgis(sql, conn, index_col="id", **rename_dict)
+ tpls_db = ti.io.read_trips_postgis(sql, conn, index_col="id", read_gpd_kws=rename_dict)
assert_frame_equal(example_trips, tpls_db)
finally:
del_table(conn, table)
@@ -716,6 +716,23 @@ class TestTours:
finally:
del_table(conn, table)
+ def test_trips_column(self, example_tours, conn_postgis):
+ """Test if list of trips is read correctly."""
+ tours = example_tours
+ tours["trips"] = [[1 + i, 10 + i, 100 + i] for i in range(len(tours))]
+ conn_string, conn = conn_postgis
+ table = "tours"
+ sql = f"SELECT * FROM {table}"
+
+ engine = create_engine(conn_string)
+ try:
+ tours.as_tours.to_postgis(table, engine)
+ with pytest.warns(UserWarning):
+ tours_db = ti.io.read_tours_postgis(sql, conn, index_col="id")
+ assert_frame_equal(tours, tours_db)
+ finally:
+ del_table(conn, table)
+
class TestGetSrid:
def test_srid(self, example_positionfixes):
|
List of trip ids in a tour dataframe is unstructured when reading it from csv.
similar to #443 but easier to fix.
Tours store a list of the trips they aggregate in the field "trips". Unfortunately the datatype of this list is not stable when writing+reading it from a csv file.
This can be easily fixed in the function `ti.read_tours_csv` by adding the following line if the column `trips` is present
```python
tours['trips'] = tours['trips'].apply(eval)
```
|
0.0
|
eb7d6ce6986e30a29cd9acdc34c5c03b75bec082
|
[
"tests/io/test_file.py::TestTours::test_to_from_csv"
] |
[
"tests/io/test_file.py::TestPositionfixes::test_from_to_csv",
"tests/io/test_file.py::TestPositionfixes::test_set_crs",
"tests/io/test_file.py::TestPositionfixes::test_set_datatime_tz",
"tests/io/test_file.py::TestPositionfixes::test_set_index_warning",
"tests/io/test_file.py::TestPositionfixes::test_set_index",
"tests/io/test_file.py::TestTriplegs::test_from_to_csv",
"tests/io/test_file.py::TestTriplegs::test_set_crs",
"tests/io/test_file.py::TestTriplegs::test_set_datatime_tz",
"tests/io/test_file.py::TestTriplegs::test_set_index_warning",
"tests/io/test_file.py::TestTriplegs::test_set_index",
"tests/io/test_file.py::TestStaypoints::test_from_to_csv",
"tests/io/test_file.py::TestStaypoints::test_set_crs",
"tests/io/test_file.py::TestStaypoints::test_set_datatime_tz",
"tests/io/test_file.py::TestStaypoints::test_set_index_warning",
"tests/io/test_file.py::TestStaypoints::test_set_index",
"tests/io/test_file.py::TestLocations::test_from_to_csv",
"tests/io/test_file.py::TestLocations::test_set_crs",
"tests/io/test_file.py::TestLocations::test_set_index_warning",
"tests/io/test_file.py::TestLocations::test_set_index",
"tests/io/test_file.py::TestTrips::test_from_to_csv",
"tests/io/test_file.py::TestTrips::test_set_datatime_tz",
"tests/io/test_file.py::TestTrips::test_set_index_warning",
"tests/io/test_file.py::TestTrips::test_set_index",
"tests/io/test_file.py::TestTours::test_from_to_csv",
"tests/io/test_file.py::TestTours::test_to_csv_accessor",
"tests/io/test_postgis.py::TestGetSrid::test_srid"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-26 17:53:30+00:00
|
mit
| 3,932 |
|
mie-lab__trackintel-497
|
diff --git a/trackintel/analysis/location_identification.py b/trackintel/analysis/location_identification.py
index e469491..79bafc6 100644
--- a/trackintel/analysis/location_identification.py
+++ b/trackintel/analysis/location_identification.py
@@ -197,6 +197,8 @@ def freq_method(staypoints, *labels):
group["duration"] = group["finished_at"] - group["started_at"]
# pandas keeps inner order of groups
sp.loc[sp["user_id"] == name, "purpose"] = _freq_transform(group, *labels)
+ if "purpose" not in sp.columns: # if empty sp
+ sp["purpose"] = None
return sp
diff --git a/trackintel/io/file.py b/trackintel/io/file.py
index 4d2263d..9e9743a 100644
--- a/trackintel/io/file.py
+++ b/trackintel/io/file.py
@@ -1,3 +1,4 @@
+import ast
import warnings
from functools import wraps
from inspect import signature
@@ -555,6 +556,7 @@ def read_tours_csv(*args, columns=None, index_col=None, tz=None, **kwargs):
>>> trackintel.read_tours_csv('data.csv', columns={'uuid':'user_id'})
"""
columns = {} if columns is None else columns
+ kwargs.setdefault("converters", {}).setdefault("trips", ast.literal_eval)
tours = pd.read_csv(*args, index_col=index_col, **kwargs)
tours.rename(columns=columns, inplace=True)
|
mie-lab/trackintel
|
4e76060bf415c477f688b930b1a9aa1a0f7da5aa
|
diff --git a/tests/analysis/test_location_identification.py b/tests/analysis/test_location_identification.py
index af0917c..dd4965b 100644
--- a/tests/analysis/test_location_identification.py
+++ b/tests/analysis/test_location_identification.py
@@ -182,6 +182,13 @@ class TestFreq_method:
assert freq["purpose"].count() == example_freq["purpose"].count()
assert_geodataframe_equal(example_freq, freq)
+ def test_empty_sp(self, example_freq):
+ """Test if empty sp also get purpose column."""
+ example_freq.drop(example_freq.index, inplace=True)
+ freq = freq_method(example_freq)
+ example_freq["purpose"] = None
+ assert_geodataframe_equal(example_freq, freq)
+
class Test_Freq_Transform:
"""Test help function _freq_transform."""
diff --git a/tests/io/test_file.py b/tests/io/test_file.py
index fc78e63..01a50f0 100644
--- a/tests/io/test_file.py
+++ b/tests/io/test_file.py
@@ -2,6 +2,7 @@ import filecmp
import os
import pytest
import pandas as pd
+from pandas.testing import assert_frame_equal
import trackintel as ti
@@ -312,6 +313,25 @@ class TestTrips:
assert gdf.index.name is None
[email protected]
+def example_tours():
+ """Tours to load into the database."""
+ t1 = pd.Timestamp("1971-01-01 00:00:00", tz="utc")
+ t2 = pd.Timestamp("1971-01-01 05:00:00", tz="utc")
+ t3 = pd.Timestamp("1971-01-02 07:00:00", tz="utc")
+ h = pd.Timedelta(hours=1)
+
+ list_dict = [
+ {"user_id": 0, "started_at": t1, "finished_at": t1 + h, "trips": [0, 1, 2]},
+ {"user_id": 0, "started_at": t2, "finished_at": t2 + h, "trips": [2, 3, 4]},
+ {"user_id": 1, "started_at": t3, "finished_at": t3 + h, "trips": [4, 5, 6]},
+ ]
+ tours = pd.DataFrame(data=list_dict)
+ tours.index.name = "id"
+ tours.as_tours
+ return tours
+
+
class TestTours:
"""Test for 'read_tours_csv' and 'write_tours_csv' functions."""
@@ -325,6 +345,14 @@ class TestTours:
assert filecmp.cmp(orig_file, tmp_file, shallow=False)
os.remove(tmp_file)
+ def test_to_from_csv(self, example_tours):
+ """Test writing then reading functionality."""
+ tmp_file = os.path.join("tests", "data", "tours_test.csv")
+ example_tours.as_tours.to_csv(tmp_file)
+ read_tours = ti.read_tours_csv(tmp_file, index_col="id")
+ os.remove(tmp_file)
+ assert_frame_equal(example_tours, read_tours)
+
def test_to_csv_accessor(self):
"""Test basic reading and writing functions."""
orig_file = os.path.join("tests", "data", "geolife_long", "tours.csv")
@@ -334,7 +362,3 @@ class TestTours:
tours.as_tours.to_csv(tmp_file)
assert filecmp.cmp(orig_file, tmp_file, shallow=False)
os.remove(tmp_file)
-
- def test_from_to_postgis(self):
- # TODO Implement some tests for reading and writing tours.
- pass
|
location_identifier raises key error if all staypoints are pre filtered
If the tracking quality is bad and all staypoints are pre-filtered, then the variable `method_val` does not get the column purpose and therefore [the line 74](https://github.com/mie-lab/trackintel/blob/master/trackintel/analysis/location_identification.py#L74) raises a key error.
I think the behavior should be that the staypoints are returned with a nan-filled or empty purpose column.
|
0.0
|
4e76060bf415c477f688b930b1a9aa1a0f7da5aa
|
[
"tests/analysis/test_location_identification.py::TestFreq_method::test_empty_sp",
"tests/io/test_file.py::TestTours::test_to_from_csv"
] |
[
"tests/analysis/test_location_identification.py::TestPre_Filter::test_no_kw",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_thresh_sp",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_thresh_loc",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_thresh_sp_at_loc",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_tresh_loc_time",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_loc_period",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_agg_level",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_agg_level_error",
"tests/analysis/test_location_identification.py::TestPre_Filter::test_non_continous_index",
"tests/analysis/test_location_identification.py::TestFreq_method::test_default_labels",
"tests/analysis/test_location_identification.py::TestFreq_method::test_custom_labels",
"tests/analysis/test_location_identification.py::TestFreq_method::test_duration",
"tests/analysis/test_location_identification.py::Test_Freq_Transform::test_function",
"tests/analysis/test_location_identification.py::Test_Freq_Assign::test_function",
"tests/analysis/test_location_identification.py::Test_Freq_Assign::test_more_labels_than_entries",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_unkown_method",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_no_location_column",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_pre_filter",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_freq_method",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_osna_method",
"tests/analysis/test_location_identification.py::TestLocation_Identifier::test_pre_filter_index",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_default",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_overlap",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_weekends",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_two_users",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_leisure_weighting",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_one_work_location",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_one_rest_location",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_only_one_leisure_location",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_prior_activity_label",
"tests/analysis/test_location_identification.py::TestOsna_Method::test_multiple_users_with_only_one_location",
"tests/analysis/test_location_identification.py::Test_osna_label_timeframes::test_weekend",
"tests/analysis/test_location_identification.py::Test_osna_label_timeframes::test_weekday",
"tests/io/test_file.py::TestPositionfixes::test_from_to_csv",
"tests/io/test_file.py::TestPositionfixes::test_set_crs",
"tests/io/test_file.py::TestPositionfixes::test_set_datatime_tz",
"tests/io/test_file.py::TestPositionfixes::test_set_index_warning",
"tests/io/test_file.py::TestPositionfixes::test_set_index",
"tests/io/test_file.py::TestTriplegs::test_from_to_csv",
"tests/io/test_file.py::TestTriplegs::test_set_crs",
"tests/io/test_file.py::TestTriplegs::test_set_datatime_tz",
"tests/io/test_file.py::TestTriplegs::test_set_index_warning",
"tests/io/test_file.py::TestTriplegs::test_set_index",
"tests/io/test_file.py::TestStaypoints::test_from_to_csv",
"tests/io/test_file.py::TestStaypoints::test_set_crs",
"tests/io/test_file.py::TestStaypoints::test_set_datatime_tz",
"tests/io/test_file.py::TestStaypoints::test_set_index_warning",
"tests/io/test_file.py::TestStaypoints::test_set_index",
"tests/io/test_file.py::TestLocations::test_from_to_csv",
"tests/io/test_file.py::TestLocations::test_set_crs",
"tests/io/test_file.py::TestLocations::test_set_index_warning",
"tests/io/test_file.py::TestLocations::test_set_index",
"tests/io/test_file.py::TestTrips::test_from_to_csv",
"tests/io/test_file.py::TestTrips::test_set_datatime_tz",
"tests/io/test_file.py::TestTrips::test_set_index_warning",
"tests/io/test_file.py::TestTrips::test_set_index",
"tests/io/test_file.py::TestTours::test_from_to_csv",
"tests/io/test_file.py::TestTours::test_to_csv_accessor"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-31 09:41:02+00:00
|
mit
| 3,933 |
|
mie-lab__trackintel-516
|
diff --git a/README.md b/README.md
index 5c6ffdf..833c49e 100644
--- a/README.md
+++ b/README.md
@@ -8,6 +8,7 @@
[](https://codecov.io/gh/mie-lab/trackintel)
[](https://github.com/psf/black)
[](https://mie-lab.github.io/trackintel/)
+[](https://pepy.tech/project/trackintel)
*trackintel* is a library for the analysis of spatio-temporal tracking data with a focus on human mobility. The core of *trackintel* is the hierarchical data model for movement data that is used in GIS, transport planning and related fields. We provide functionalities for the full life-cycle of human mobility data analysis: import and export of tracking data of different types (e.g, trackpoints, check-ins, trajectories), preprocessing, data quality assessment, semantic enrichment, quantitative analysis and mining tasks, and visualization of data and results.
Trackintel is based on [Pandas](https://pandas.pydata.org/) and [GeoPandas](https://geopandas.org/#).
diff --git a/trackintel/preprocessing/positionfixes.py b/trackintel/preprocessing/positionfixes.py
index 3e57066..07af030 100644
--- a/trackintel/preprocessing/positionfixes.py
+++ b/trackintel/preprocessing/positionfixes.py
@@ -362,7 +362,7 @@ def generate_triplegs(
posfix_grouper = pfs.groupby("tripleg_id")
tpls = posfix_grouper.agg(
- {"user_id": ["mean"], "tracked_at": [min, max], pfs.geometry.name: list}
+ {"user_id": ["first"], "tracked_at": [min, max], pfs.geometry.name: list}
) # could add a "number of pfs": can be any column "count"
# prepare dataframe: Rename columns; read/set geometry/crs;
|
mie-lab/trackintel
|
11c36f3b2567b2d1c012e284ee3f89b5723caea0
|
diff --git a/tests/preprocessing/test_positionfixes.py b/tests/preprocessing/test_positionfixes.py
index d0362b9..e8c6f71 100644
--- a/tests/preprocessing/test_positionfixes.py
+++ b/tests/preprocessing/test_positionfixes.py
@@ -552,5 +552,5 @@ class TestGenerate_triplegs:
# remove isolated - not needed for this test
pfs = pfs[~pfs.index.isin([1, 2])].copy()
# set user ID to string
- pfs["user_id"] = pfs["user_id"].astype(str)
- pfs, tpls = pfs.as_positionfixes.generate_triplegs()
+ pfs["user_id"] = pfs["user_id"].astype(str) + "not_numerical_interpretable_str"
+ pfs, _ = pfs.as_positionfixes.generate_triplegs()
|
Positionfix aggregation by tripleg id fails if user_id is a string
The following minimal working example triggers an exception in `generate_triplegs` because `user_id` cannot be aggregated with the `mean` method:
```python
import pandas as pd
import geopandas as gpd
import pandas as pd
from trackintel.preprocessing.positionfixes import (
generate_staypoints,
generate_triplegs,
)
from trackintel.preprocessing.staypoints import generate_locations
positionfixes = gpd.GeoDataFrame(
{
"user_id": "test",
"tracked_at": pd.date_range("2023-01-01T00:00:00Z", freq="60s", periods=10),
},
geometry=gpd.points_from_xy([0] * 5 + [1] * 5, [0] * 5 + [1] * 5),
crs="EPSG:4326",
)
positionfixes, staypoints = generate_staypoints(positionfixes)
staypoints, _ = generate_locations(staypoints)
positionfixes, triplegs = generate_triplegs(positionfixes)
```
Traceback:
```python
Traceback (most recent call last):
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/groupby.py", line 1490, in array_func
result = self.grouper._cython_operation(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/ops.py", line 959, in _cython_operation
return cy_op.cython_operation(
^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/ops.py", line 657, in cython_operation
return self._cython_op_ndim_compat(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/ops.py", line 482, in _cython_op_ndim_compat
res = self._call_cython_op(
^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/ops.py", line 541, in _call_cython_op
func = self._get_cython_function(self.kind, self.how, values.dtype, is_numeric)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/ops.py", line 173, in _get_cython_function
raise NotImplementedError(
NotImplementedError: function is not implemented for this dtype: [how->mean,dtype->object]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/nanops.py", line 1692, in _ensure_numeric
x = float(x)
^^^^^^^^
ValueError: could not convert string to float: 'testtesttesttesttest'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/nanops.py", line 1696, in _ensure_numeric
x = complex(x)
^^^^^^^^^^
ValueError: complex() arg is a malformed string
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/kpenner/dev/trackintel/test.py", line 12, in <module>
positionfixes, triplegs = generate_triplegs(positionfixes)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/trackintel/preprocessing/positionfixes.py", line 364, in generate_triplegs
tpls = posfix_grouper.agg(
^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/generic.py", line 1269, in aggregate
result = op.agg()
^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/apply.py", line 163, in agg
return self.agg_dict_like()
^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/apply.py", line 420, in agg_dict_like
results = {
^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/apply.py", line 421, in <dictcomp>
key: obj._gotitem(key, ndim=1).agg(how) for key, how in arg.items()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/generic.py", line 238, in aggregate
ret = self._aggregate_multiple_funcs(func, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/generic.py", line 316, in _aggregate_multiple_funcs
results[key] = self.aggregate(func, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/generic.py", line 232, in aggregate
return getattr(self, func)(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/groupby.py", line 1855, in mean
result = self._cython_agg_general(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/groupby.py", line 1507, in _cython_agg_general
new_mgr = data.grouped_reduce(array_func)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/internals/base.py", line 197, in grouped_reduce
res = func(arr)
^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/groupby.py", line 1503, in array_func
result = self._agg_py_fallback(values, ndim=data.ndim, alt=alt)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/groupby.py", line 1457, in _agg_py_fallback
res_values = self.grouper.agg_series(ser, alt, preserve_dtype=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/ops.py", line 994, in agg_series
result = self._aggregate_series_pure_python(obj, func)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/ops.py", line 1015, in _aggregate_series_pure_python
res = func(group)
^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/groupby/groupby.py", line 1857, in <lambda>
alt=lambda x: Series(x).mean(numeric_only=numeric_only),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/generic.py", line 11556, in mean
return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/generic.py", line 11201, in mean
return self._stat_function(
^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/generic.py", line 11158, in _stat_function
return self._reduce(
^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/series.py", line 4670, in _reduce
return op(delegate, skipna=skipna, **kwds)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/nanops.py", line 96, in _f
return f(*args, **kwargs)
^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/nanops.py", line 158, in f
result = alt(values, axis=axis, skipna=skipna, **kwds)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/nanops.py", line 421, in new_func
result = func(values, axis=axis, skipna=skipna, mask=mask, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/nanops.py", line 727, in nanmean
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/kpenner/mambaforge/envs/trackintel/lib/python3.11/site-packages/pandas/core/nanops.py", line 1699, in _ensure_numeric
raise TypeError(f"Could not convert {x} to numeric") from err
TypeError: Could not convert testtesttesttesttest to numeric
```
https://github.com/mie-lab/trackintel/blob/11c36f3b2567b2d1c012e284ee3f89b5723caea0/trackintel/preprocessing/positionfixes.py#L364
|
0.0
|
11c36f3b2567b2d1c012e284ee3f89b5723caea0
|
[
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_str_userid"
] |
[
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_empty_generation",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_parallel_computing",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_pfs_warning",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_pfs_filtering",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_duplicate_columns",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_sliding_min",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_sliding_max",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_missing_link",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_dtype_consistent",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_index_start",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_include_last",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_print_progress",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_temporal",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_gap_threshold",
"tests/preprocessing/test_positionfixes.py::TestGenerate_staypoints::test_str_userid",
"tests/preprocessing/test_positionfixes.py::Test_Generate_staypoints_sliding_user::test_unknown_distance_metric",
"tests/preprocessing/test_positionfixes.py::Test__create_new_staypoints::test_planar_crs",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_empty_generation",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_noncontinuous_unordered_index",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_invalid_isolates",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_duplicate_columns",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_user_without_sp",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_pfs_without_sp",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_random_order",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_pfs_index",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_dtype_consistent",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_missing_link",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_index_start",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_invalid_inputs",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_temporal",
"tests/preprocessing/test_positionfixes.py::TestGenerate_triplegs::test_sp_tpls_overlap"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-25 05:56:07+00:00
|
mit
| 3,934 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.