instance_id
stringlengths 10
57
| base_commit
stringlengths 40
40
| created_at
stringdate 2014-04-30 14:58:36
2025-04-30 20:14:11
| environment_setup_commit
stringlengths 40
40
| hints_text
stringlengths 0
273k
| patch
stringlengths 251
7.06M
| problem_statement
stringlengths 11
52.5k
| repo
stringlengths 7
53
| test_patch
stringlengths 231
997k
| meta
dict | version
stringclasses 851
values | install_config
dict | requirements
stringlengths 93
34.2k
⌀ | environment
stringlengths 760
20.5k
⌀ | FAIL_TO_PASS
listlengths 1
9.39k
| FAIL_TO_FAIL
listlengths 0
2.69k
| PASS_TO_PASS
listlengths 0
7.87k
| PASS_TO_FAIL
listlengths 0
192
| license_name
stringclasses 55
values | __index_level_0__
int64 0
21.4k
| before_filepaths
listlengths 1
105
| after_filepaths
listlengths 1
105
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
marshmallow-code__marshmallow-293 | 39bed8d628e2d08da5026df2df5ec6b9e9bbadf3 | 2015-10-07 22:51:36 | 1dbcae9c439d1a268717feb089351fc3c5180ac3 | diff --git a/docs/quickstart.rst b/docs/quickstart.rst
index 4e7dcdb5..959e915a 100644
--- a/docs/quickstart.rst
+++ b/docs/quickstart.rst
@@ -303,6 +303,22 @@ Dictionaries or lists are also accepted as the custom error message, in case you
# 'age': ['Age is required.'],
# 'city': {'message': 'City required', 'code': 400}}
+Partial Loading
++++++++++++++++
+
+When using the same schema in multiple places, you may only want to check required fields some of the time when deserializing. You can ignore missing fields entirely by setting ``partial=True``.
+
+.. code-block:: python
+ :emphasize-lines: 5,6
+
+ class UserSchema(Schema):
+ name = fields.String(required=True)
+ age = fields.Integer(required=True)
+
+ data, errors = UserSchema().load({'age': 42}, partial=True)
+ # OR UserSchema(partial=True).load({'age': 42})
+ data, errors # => ({'age': 42}, {})
+
Schema.validate
+++++++++++++++
diff --git a/marshmallow/marshalling.py b/marshmallow/marshalling.py
index 5caca583..6c315c03 100644
--- a/marshmallow/marshalling.py
+++ b/marshmallow/marshalling.py
@@ -208,7 +208,7 @@ class Unmarshaller(ErrorStore):
data=output
)
- def deserialize(self, data, fields_dict, many=False,
+ def deserialize(self, data, fields_dict, many=False, partial=False,
dict_class=dict, index_errors=True, index=None):
"""Deserialize ``data`` based on the schema defined by ``fields_dict``.
@@ -216,6 +216,7 @@ class Unmarshaller(ErrorStore):
:param dict fields_dict: Mapping of field names to :class:`Field` objects.
:param bool many: Set to `True` if ``data`` should be deserialized as
a collection.
+ :param bool partial: If `True`, ignore missing fields.
:param type dict_class: Dictionary class used to construct the output.
:param bool index_errors: Whether to store the index of invalid items in
``self.errors`` when ``many=True``.
@@ -229,7 +230,7 @@ class Unmarshaller(ErrorStore):
if many and data is not None:
self._pending = True
ret = [self.deserialize(d, fields_dict, many=False,
- dict_class=dict_class,
+ partial=partial, dict_class=dict_class,
index=idx, index_errors=index_errors)
for idx, d in enumerate(data)]
@@ -265,6 +266,8 @@ class Unmarshaller(ErrorStore):
field_name = field_obj.load_from
raw_value = data.get(field_obj.load_from, missing)
if raw_value is missing:
+ if partial:
+ continue
_miss = field_obj.missing
raw_value = _miss() if callable(_miss) else _miss
if raw_value is missing and not field_obj.required:
diff --git a/marshmallow/schema.py b/marshmallow/schema.py
index 6275bd0a..6b38c984 100644
--- a/marshmallow/schema.py
+++ b/marshmallow/schema.py
@@ -12,7 +12,7 @@ import types
import uuid
import warnings
from collections import namedtuple
-from functools import partial
+import functools
from marshmallow import base, fields, utils, class_registry, marshalling
from marshmallow.compat import (with_metaclass, iteritems, text_type,
@@ -251,6 +251,7 @@ class BaseSchema(base.SchemaABC):
:param tuple load_only: A list or tuple of fields to skip during serialization
:param tuple dump_only: A list or tuple of fields to skip during
deserialization, read-only fields
+ :param bool partial: If `True`, ignore missing fields when deserializing.
.. versionchanged:: 2.0.0
`__validators__`, `__preprocessors__`, and `__data_handlers__` are removed in favor of
@@ -320,7 +321,8 @@ class BaseSchema(base.SchemaABC):
pass
def __init__(self, extra=None, only=(), exclude=(), prefix='', strict=False,
- many=False, context=None, load_only=(), dump_only=()):
+ many=False, context=None, load_only=(), dump_only=(),
+ partial=False):
# copy declared fields from metaclass
self.declared_fields = copy.deepcopy(self._declared_fields)
self.many = many
@@ -331,6 +333,7 @@ class BaseSchema(base.SchemaABC):
self.ordered = self.opts.ordered
self.load_only = set(load_only) or set(self.opts.load_only)
self.dump_only = set(dump_only) or set(self.opts.dump_only)
+ self.partial = partial
#: Dictionary mapping field_names -> :class:`Field` objects
self.fields = self.dict_class()
#: Callable marshalling object
@@ -519,19 +522,21 @@ class BaseSchema(base.SchemaABC):
ret = self.opts.json_module.dumps(deserialized, *args, **kwargs)
return MarshalResult(ret, errors)
- def load(self, data, many=None):
+ def load(self, data, many=None, partial=None):
"""Deserialize a data structure to an object defined by this Schema's
fields and :meth:`make_object`.
:param dict data: The data to deserialize.
:param bool many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
+ :param bool partial: Whether to ignore missing fields. If `None`, the
+ value for `self.partial` is used.
:return: A tuple of the form (``data``, ``errors``)
:rtype: `UnmarshalResult`, a `collections.namedtuple`
.. versionadded:: 1.0.0
"""
- result, errors = self._do_load(data, many, postprocess=True)
+ result, errors = self._do_load(data, many, partial, postprocess=True)
return UnmarshalResult(data=result, errors=errors)
def loads(self, json_data, many=None, *args, **kwargs):
@@ -540,13 +545,20 @@ class BaseSchema(base.SchemaABC):
:param str json_data: A JSON string of the data to deserialize.
:param bool many: Whether to deserialize `obj` as a collection. If `None`, the
value for `self.many` is used.
+ :param bool partial: Whether to ignore missing fields. If `None`, the
+ value for `self.partial` is used.
:return: A tuple of the form (``data``, ``errors``)
:rtype: `UnmarshalResult`, a `collections.namedtuple`
.. versionadded:: 1.0.0
"""
+ # TODO: This avoids breaking backward compatibility if people were
+ # passing in positional args after `many` for use by `json.loads`, but
+ # ideally we shouldn't have to do this.
+ partial = kwargs.pop('partial', None)
+
data = self.opts.json_module.loads(json_data, *args, **kwargs)
- return self.load(data, many=many)
+ return self.load(data, many=many, partial=partial)
def validate(self, data, many=None):
"""Validate `data` against the schema, returning a dictionary of
@@ -565,17 +577,20 @@ class BaseSchema(base.SchemaABC):
##### Private Helpers #####
- def _do_load(self, data, many=None, postprocess=True):
+ def _do_load(self, data, many=None, partial=None, postprocess=True):
"""Deserialize `data`, returning the deserialized result and a dictonary of
validation errors.
:param data: The data to deserialize.
:param bool many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
+ :param bool partial: Whether to ignore missing fields. If `None`, the
+ value for `self.partial` is used.
:param bool postprocess: Whether to run post_load methods..
:return: A tuple of the form (`data`, `errors`)
"""
many = self.many if many is None else bool(many)
+ partial = self.partial if partial is None else bool(partial)
processed_data = self._invoke_load_processors(PRE_LOAD, data, many, original_data=data)
@@ -584,6 +599,7 @@ class BaseSchema(base.SchemaABC):
processed_data,
self.fields,
many=many,
+ partial=partial,
dict_class=self.dict_class,
index_errors=self.opts.index_errors,
)
@@ -780,7 +796,7 @@ class BaseSchema(base.SchemaABC):
validator_kwargs = validator.__marshmallow_kwargs__[(VALIDATES_SCHEMA, pass_many)]
pass_original = validator_kwargs.get('pass_original', False)
if pass_many:
- validator = partial(validator, many=many)
+ validator = functools.partial(validator, many=many)
if many:
for idx, item in enumerate(data):
try:
| "Partial" deserialization support
For implementing `PATCH` handlers on REST endpoints, it would be useful to have a concept of partial deserialization.
This would mean ignoring missing required fields and default values for missing fields.
I know this sounds a bit weird, but it matches a standard CRUD endpoint fairly well - `POST` or `PUT` to that endpoint should use the full validation w/r/t required fields or defaults, but `PATCH` is intended to apply a partial update and only modify what was actually changed.
I can handle this in userspace by catching `ValidationError`s for missing fields, but I don't think I can do the same for ignoring default field values.
Here's the equivalent API in DRF: http://www.django-rest-framework.org/api-guide/serializers/#partial-updates. From my POV I think any `partial` arg (if you think it makes sense) would best be positioned as a named argument on `load`, though. | marshmallow-code/marshmallow | diff --git a/tests/test_deserialization.py b/tests/test_deserialization.py
index 3b26dc5c..f886f5e6 100644
--- a/tests/test_deserialization.py
+++ b/tests/test_deserialization.py
@@ -1089,6 +1089,28 @@ class TestSchemaDeserialization:
assert len(errors['foo']) == 1
assert 'Missing data for required field.' in errors['foo']
+ @pytest.mark.parametrize('partial_schema',
+ [
+ True,
+ False
+ ])
+ def test_partial_deserialization(self, partial_schema):
+ class MySchema(Schema):
+ foo = fields.Field(required=True)
+ bar = fields.Field(required=True)
+
+ schema_args = {}
+ load_args = {}
+ if partial_schema:
+ schema_args['partial'] = True
+ else:
+ load_args['partial'] = True
+ data, errors = MySchema(**schema_args).load({'foo': 3}, **load_args)
+
+ assert data['foo'] == 3
+ assert 'bar' not in data
+ assert not errors
+
validators_gen = (func for func in [lambda x: x <= 24, lambda x: 18 <= x])
validators_gen_float = (func for func in
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 3
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | backports.tarfile==1.2.0
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
cryptography==44.0.2
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
flake8==2.4.1
id==1.5.0
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
invoke==2.2.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
keyring==25.6.0
markdown-it-py==3.0.0
-e git+https://github.com/marshmallow-code/marshmallow.git@39bed8d628e2d08da5026df2df5ec6b9e9bbadf3#egg=marshmallow
mccabe==0.3.1
mdurl==0.1.2
more-itertools==10.6.0
nh3==0.2.21
packaging==24.2
pep8==1.7.1
platformdirs==4.3.7
pluggy==1.5.0
py==1.11.0
pycparser==2.22
pyflakes==0.8.1
Pygments==2.19.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
simplejson==3.20.1
six==1.17.0
tomli==2.2.1
tox==4.25.0
twine==6.1.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: marshmallow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- backports-tarfile==1.2.0
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- cryptography==44.0.2
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- flake8==2.4.1
- id==1.5.0
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- invoke==2.2.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- keyring==25.6.0
- markdown-it-py==3.0.0
- mccabe==0.3.1
- mdurl==0.1.2
- more-itertools==10.6.0
- nh3==0.2.21
- packaging==24.2
- pep8==1.7.1
- platformdirs==4.3.7
- pluggy==1.5.0
- py==1.11.0
- pycparser==2.22
- pyflakes==0.8.1
- pygments==2.19.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- simplejson==3.20.1
- six==1.17.0
- tomli==2.2.1
- tox==4.25.0
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/marshmallow
| [
"tests/test_deserialization.py::TestSchemaDeserialization::test_partial_deserialization[True]",
"tests/test_deserialization.py::TestSchemaDeserialization::test_partial_deserialization[False]"
]
| []
| [
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[String]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Integer]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Boolean]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Float]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Number]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[DateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[LocalDateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Time]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Date]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[TimeDelta]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Dict]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Url]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Email]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[FormattedString]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[UUID]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Decimal]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[String]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Integer]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Boolean]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Float]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Number]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[DateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[LocalDateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Time]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Date]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[TimeDelta]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Dict]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Url]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Email]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[FormattedString]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[UUID]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Decimal]",
"tests/test_deserialization.py::TestDeserializingNone::test_allow_none_is_true_if_missing_is_true",
"tests/test_deserialization.py::TestDeserializingNone::test_list_field_deserialize_none_to_empty_list",
"tests/test_deserialization.py::TestFieldDeserialization::test_float_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[bad]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[in_val2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_integer_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_with_places",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_with_places_and_rounding",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_deserialization_string",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_special_values",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_special_values_not_permitted",
"tests/test_deserialization.py::TestFieldDeserialization::test_string_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values_invalid[notvalid]",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values_invalid[123]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[not-a-datetime]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[42]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[in_value3]",
"tests/test_deserialization.py::TestFieldDeserialization::test_custom_date_format_datetime_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_rfc_datetime_field_deserialization[rfc]",
"tests/test_deserialization.py::TestFieldDeserialization::test_rfc_datetime_field_deserialization[rfc822]",
"tests/test_deserialization.py::TestFieldDeserialization::test_iso_datetime_field_deserialization[iso]",
"tests/test_deserialization.py::TestFieldDeserialization::test_iso_datetime_field_deserialization[iso8601]",
"tests/test_deserialization.py::TestFieldDeserialization::test_localdatetime_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_time_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[badvalue]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[in_data2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[42]",
"tests/test_deserialization.py::TestFieldDeserialization::test_timedelta_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[badvalue]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[in_value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[9999999999]",
"tests/test_deserialization.py::TestFieldDeserialization::test_date_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[123]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[in_value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_dict_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_url_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_relative_url_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_email_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_is_noop_by_default",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_with_callable",
"tests/test_deserialization.py::TestFieldDeserialization::test_uuid_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[malformed]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[123]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[in_value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_function_must_be_callable",
"tests/test_deserialization.py::TestFieldDeserialization::test_method_field_deserialization_is_noop_by_default",
"tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_method",
"tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_method_must_be_a_method",
"tests/test_deserialization.py::TestFieldDeserialization::test_query_select_field_func_key_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_query_select_field_string_key_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_query_select_list_field_func_key_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_query_select_list_field_string_key_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_datetime_list_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_invalid_item",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[notalist]",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[42]",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_constant_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_constant_is_always_included_in_deserialized_data",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_function",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_class_that_returns_bool",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_that_raises_error_with_list",
"tests/test_deserialization.py::TestFieldDeserialization::test_validator_must_return_false_to_raise_error",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_validator_with_nonascii_input",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validators",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_custom_error_message",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_to_dict",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_values",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_many",
"tests/test_deserialization.py::TestSchemaDeserialization::test_exclude",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_deserialization_to_dict",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_list_deserialization_to_dict",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_none_not_allowed",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_many_non_not_allowed",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_required_missing",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_many_required_missing",
"tests/test_deserialization.py::TestSchemaDeserialization::test_none_deserialization",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_none_deserialization",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_error_returns_field_name_not_attribute_name",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_error_returns_load_from_not_attribute_name",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_load_from_param",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_dump_only_param",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_value",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_callable",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_none",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_returns_errors",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_returns_errors_with_multiple_validators",
"tests/test_deserialization.py::TestSchemaDeserialization::test_strict_mode_deserialization",
"tests/test_deserialization.py::TestSchemaDeserialization::test_strict_mode_many",
"tests/test_deserialization.py::TestSchemaDeserialization::test_strict_mode_deserialization_with_multiple_validators",
"tests/test_deserialization.py::TestSchemaDeserialization::test_uncaught_validation_errors_are_stored",
"tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_a_field",
"tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_an_email_field",
"tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_a_url_field",
"tests/test_deserialization.py::TestSchemaDeserialization::test_required_value_only_passed_to_validators_if_provided",
"tests/test_deserialization.py::TestValidation::test_integer_with_validator",
"tests/test_deserialization.py::TestValidation::test_integer_with_validators[field0]",
"tests/test_deserialization.py::TestValidation::test_integer_with_validators[field1]",
"tests/test_deserialization.py::TestValidation::test_integer_with_validators[field2]",
"tests/test_deserialization.py::TestValidation::test_float_with_validators[field0]",
"tests/test_deserialization.py::TestValidation::test_float_with_validators[field1]",
"tests/test_deserialization.py::TestValidation::test_float_with_validators[field2]",
"tests/test_deserialization.py::TestValidation::test_string_validator",
"tests/test_deserialization.py::TestValidation::test_function_validator",
"tests/test_deserialization.py::TestValidation::test_function_validators[field0]",
"tests/test_deserialization.py::TestValidation::test_function_validators[field1]",
"tests/test_deserialization.py::TestValidation::test_function_validators[field2]",
"tests/test_deserialization.py::TestValidation::test_method_validator",
"tests/test_deserialization.py::TestValidation::test_nested_data_is_stored_when_validation_fails",
"tests/test_deserialization.py::test_required_field_failure[String]",
"tests/test_deserialization.py::test_required_field_failure[Integer]",
"tests/test_deserialization.py::test_required_field_failure[Boolean]",
"tests/test_deserialization.py::test_required_field_failure[Float]",
"tests/test_deserialization.py::test_required_field_failure[Number]",
"tests/test_deserialization.py::test_required_field_failure[DateTime]",
"tests/test_deserialization.py::test_required_field_failure[LocalDateTime]",
"tests/test_deserialization.py::test_required_field_failure[Time]",
"tests/test_deserialization.py::test_required_field_failure[Date]",
"tests/test_deserialization.py::test_required_field_failure[TimeDelta]",
"tests/test_deserialization.py::test_required_field_failure[Dict]",
"tests/test_deserialization.py::test_required_field_failure[Url]",
"tests/test_deserialization.py::test_required_field_failure[Email]",
"tests/test_deserialization.py::test_required_field_failure[UUID]",
"tests/test_deserialization.py::test_required_field_failure[Decimal]",
"tests/test_deserialization.py::test_required_message_can_be_changed[My",
"tests/test_deserialization.py::test_required_message_can_be_changed[message1]",
"tests/test_deserialization.py::test_required_message_can_be_changed[message2]",
"tests/test_deserialization.py::test_deserialize_doesnt_raise_exception_if_strict_is_false_and_input_type_is_incorrect",
"tests/test_deserialization.py::test_deserialize_raises_exception_if_strict_is_true_and_input_type_is_incorrect"
]
| []
| MIT License | 263 | [
"docs/quickstart.rst",
"marshmallow/schema.py",
"marshmallow/marshalling.py"
]
| [
"docs/quickstart.rst",
"marshmallow/schema.py",
"marshmallow/marshalling.py"
]
|
|
docker__docker-py-806 | f479720d517a7db7f886916190b3032d29d18f10 | 2015-10-09 19:03:05 | f479720d517a7db7f886916190b3032d29d18f10 | dnephin: some CI failures, otherwise looks good | diff --git a/docker/auth/auth.py b/docker/auth/auth.py
index 366bc67e..1ee9f812 100644
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -102,7 +102,7 @@ def decode_auth(auth):
def encode_header(auth):
auth_json = json.dumps(auth).encode('ascii')
- return base64.b64encode(auth_json)
+ return base64.urlsafe_b64encode(auth_json)
def parse_auth(entries):
| Auth fails with long passwords
See https://github.com/docker/docker/issues/16840
docker-py is encoding `X-Registry-Auth` with regular base64 and not the url safe version of base64 that jwt tokens use. | docker/docker-py | diff --git a/tests/utils_test.py b/tests/utils_test.py
index b1adde26..04183f9f 100644
--- a/tests/utils_test.py
+++ b/tests/utils_test.py
@@ -19,7 +19,9 @@ from docker.utils import (
exclude_paths, convert_volume_binds, decode_json_header
)
from docker.utils.ports import build_port_bindings, split_port
-from docker.auth import resolve_repository_name, resolve_authconfig
+from docker.auth import (
+ resolve_repository_name, resolve_authconfig, encode_header
+)
from . import base
from .helpers import make_tree
@@ -376,12 +378,21 @@ class UtilsTest(base.BaseTestCase):
obj = {'a': 'b', 'c': 1}
data = None
if six.PY3:
- data = base64.b64encode(bytes(json.dumps(obj), 'utf-8'))
+ data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8'))
else:
- data = base64.b64encode(json.dumps(obj))
+ data = base64.urlsafe_b64encode(json.dumps(obj))
decoded_data = decode_json_header(data)
self.assertEqual(obj, decoded_data)
+ def test_803_urlsafe_encode(self):
+ auth_data = {
+ 'username': 'root',
+ 'password': 'GR?XGR?XGR?XGR?X'
+ }
+ encoded = encode_header(auth_data)
+ assert b'/' not in encoded
+ assert b'_' in encoded
+
def test_resolve_repository_name(self):
# docker hub library image
self.assertEqual(
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi @ file:///croot/certifi_1671487769961/work/certifi
coverage==7.2.7
-e git+https://github.com/docker/docker-py.git@f479720d517a7db7f886916190b3032d29d18f10#egg=docker_py
exceptiongroup==1.2.2
importlib-metadata==6.7.0
iniconfig==2.0.0
packaging==24.0
pluggy==1.2.0
pytest==7.4.4
pytest-cov==4.1.0
requests==2.5.3
six==1.17.0
tomli==2.0.1
typing_extensions==4.7.1
websocket-client==0.32.0
zipp==3.15.0
| name: docker-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.2.7
- exceptiongroup==1.2.2
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- packaging==24.0
- pluggy==1.2.0
- pytest==7.4.4
- pytest-cov==4.1.0
- requests==2.5.3
- six==1.17.0
- tomli==2.0.1
- typing-extensions==4.7.1
- websocket-client==0.32.0
- zipp==3.15.0
prefix: /opt/conda/envs/docker-py
| [
"tests/utils_test.py::UtilsTest::test_803_urlsafe_encode"
]
| []
| [
"tests/utils_test.py::HostConfigTest::test_create_host_config_invalid_cpu_cfs_types",
"tests/utils_test.py::HostConfigTest::test_create_host_config_no_options",
"tests/utils_test.py::HostConfigTest::test_create_host_config_no_options_newer_api_version",
"tests/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_period",
"tests/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_quota",
"tests/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit",
"tests/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit_capitals",
"tests/utils_test.py::UlimitTest::test_create_host_config_obj_ulimit",
"tests/utils_test.py::UlimitTest::test_ulimit_invalid_type",
"tests/utils_test.py::LogConfigTest::test_create_host_config_dict_logconfig",
"tests/utils_test.py::LogConfigTest::test_create_host_config_obj_logconfig",
"tests/utils_test.py::LogConfigTest::test_logconfig_invalid_config_type",
"tests/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_empty",
"tests/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_no_cert_path",
"tests/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_tls",
"tests/utils_test.py::UtilsTest::test_convert_filters",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_compact",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_complete",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_empty",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_list",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_no_mode",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_unicode_bytes_input",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_unicode_unicode_input",
"tests/utils_test.py::UtilsTest::test_decode_json_header",
"tests/utils_test.py::UtilsTest::test_parse_bytes",
"tests/utils_test.py::UtilsTest::test_parse_env_file_commented_line",
"tests/utils_test.py::UtilsTest::test_parse_env_file_invalid_line",
"tests/utils_test.py::UtilsTest::test_parse_env_file_proper",
"tests/utils_test.py::UtilsTest::test_parse_host",
"tests/utils_test.py::UtilsTest::test_parse_host_empty_value",
"tests/utils_test.py::UtilsTest::test_parse_repository_tag",
"tests/utils_test.py::UtilsTest::test_resolve_authconfig",
"tests/utils_test.py::UtilsTest::test_resolve_registry_and_auth",
"tests/utils_test.py::UtilsTest::test_resolve_repository_name",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_port_ranges",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_ports",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_port_ranges",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_ports",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_one_port",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_port_range",
"tests/utils_test.py::PortsTest::test_host_only_with_colon",
"tests/utils_test.py::PortsTest::test_non_matching_length_port_ranges",
"tests/utils_test.py::PortsTest::test_port_and_range_invalid",
"tests/utils_test.py::PortsTest::test_port_only_with_colon",
"tests/utils_test.py::PortsTest::test_split_port_invalid",
"tests/utils_test.py::PortsTest::test_split_port_no_host_port",
"tests/utils_test.py::PortsTest::test_split_port_range_no_host_port",
"tests/utils_test.py::PortsTest::test_split_port_range_with_host_ip_no_port",
"tests/utils_test.py::PortsTest::test_split_port_range_with_host_port",
"tests/utils_test.py::PortsTest::test_split_port_range_with_protocol",
"tests/utils_test.py::PortsTest::test_split_port_with_host_ip",
"tests/utils_test.py::PortsTest::test_split_port_with_host_ip_no_port",
"tests/utils_test.py::PortsTest::test_split_port_with_host_port",
"tests/utils_test.py::PortsTest::test_split_port_with_protocol",
"tests/utils_test.py::ExcludePathsTest::test_directory",
"tests/utils_test.py::ExcludePathsTest::test_directory_with_single_exception",
"tests/utils_test.py::ExcludePathsTest::test_directory_with_subdir_exception",
"tests/utils_test.py::ExcludePathsTest::test_directory_with_trailing_slash",
"tests/utils_test.py::ExcludePathsTest::test_directory_with_wildcard_exception",
"tests/utils_test.py::ExcludePathsTest::test_exclude_custom_dockerfile",
"tests/utils_test.py::ExcludePathsTest::test_exclude_dockerfile_dockerignore",
"tests/utils_test.py::ExcludePathsTest::test_no_dupes",
"tests/utils_test.py::ExcludePathsTest::test_no_excludes",
"tests/utils_test.py::ExcludePathsTest::test_question_mark",
"tests/utils_test.py::ExcludePathsTest::test_single_filename",
"tests/utils_test.py::ExcludePathsTest::test_single_filename_trailing_slash",
"tests/utils_test.py::ExcludePathsTest::test_single_subdir_single_filename",
"tests/utils_test.py::ExcludePathsTest::test_single_subdir_wildcard_filename",
"tests/utils_test.py::ExcludePathsTest::test_subdirectory",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_exclude",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_filename_end",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_filename_start",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_subdir_single_filename",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_subdir_wildcard_filename",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_with_exception",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_with_wildcard_exception"
]
| []
| Apache License 2.0 | 264 | [
"docker/auth/auth.py"
]
| [
"docker/auth/auth.py"
]
|
vortec__versionbump-6 | 1b62d24471870d220f1f05ee1421a688f51d4923 | 2015-10-10 14:43:31 | 1b62d24471870d220f1f05ee1421a688f51d4923 | diff --git a/.travis.yml b/.travis.yml
index 2ab01b1..b7ed065 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,13 +3,9 @@ python:
- 2.7
- 3.4
- pypy
-before_install:
- - pip install codecov
install:
- pip install .
- pip install behave dont-fudge-up
script:
- py.test -v tests
- behave tests/features
-after_success:
- - codecov
diff --git a/versionbump/filebump.py b/versionbump/filebump.py
index 65713cb..98d29f9 100644
--- a/versionbump/filebump.py
+++ b/versionbump/filebump.py
@@ -32,6 +32,7 @@ class FileBump(object):
def write_cache_to_file(self):
self.fo.seek(0)
self.fo.write(self.file_cache)
+ self.fo.truncate()
def replace_version_in_cache(self, old_version, new_version):
self.file_cache = self.file_cache.replace(old_version, new_version)
| FileBump does not correctly adjust the target file size
When adjusting the version in a file, the file size is not correctly adjusted.
For example:
File contents before bump:
```
0.1.22222
```
Command used:
```
versionbump -c 0.1.22222 minor version.txt
```
File contents after bump:
```
0.2.0
222
```
Ideally the file contents would be:
```
0.2.0
``` | vortec/versionbump | diff --git a/tests/unit/test_filebump.py b/tests/unit/test_filebump.py
index 7bddb5f..12bc26f 100644
--- a/tests/unit/test_filebump.py
+++ b/tests/unit/test_filebump.py
@@ -51,3 +51,12 @@ def test_it_doesnt_find_the_version():
empty_fo = StringIO('')
with pytest.raises(ValueError):
FileBump(empty_fo, '1.2.3')
+
+
+def test_it_truncates_the_file():
+ version = '0.0.111'
+ fo = StringIO(version)
+ fb = FileBump(fo, version)
+ fb.bump('minor')
+ fo.seek(0)
+ assert fo.read() == '0.1.0'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"behave",
"pytest",
"codecov"
],
"pre_install": null,
"python": "3.4",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
behave==1.2.6
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
idna==3.10
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
parse==1.20.2
parse_type==0.6.4
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
requests==2.27.1
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
-e git+https://github.com/vortec/versionbump.git@1b62d24471870d220f1f05ee1421a688f51d4923#egg=versionbump
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: versionbump
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- behave==1.2.6
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- idna==3.10
- parse==1.20.2
- parse-type==0.6.4
- requests==2.27.1
- six==1.17.0
- urllib3==1.26.20
prefix: /opt/conda/envs/versionbump
| [
"tests/unit/test_filebump.py::test_it_truncates_the_file"
]
| []
| [
"tests/unit/test_filebump.py::test_file_gets_parsed_correctly",
"tests/unit/test_filebump.py::test_file_cache",
"tests/unit/test_filebump.py::test_bump",
"tests/unit/test_filebump.py::test_cache_write_to_file",
"tests/unit/test_filebump.py::test_print_output",
"tests/unit/test_filebump.py::test_it_doesnt_find_the_version"
]
| []
| MIT License | 265 | [
".travis.yml",
"versionbump/filebump.py"
]
| [
".travis.yml",
"versionbump/filebump.py"
]
|
|
marshmallow-code__marshmallow-299 | 1dbcae9c439d1a268717feb089351fc3c5180ac3 | 2015-10-14 03:40:58 | 1dbcae9c439d1a268717feb089351fc3c5180ac3 | nelfin: Not really sure what's happened, all the Travis builds on Python 3.x failed with:
```pytb
Traceback (most recent call last):
File "setup.py", line 81, in <module>
cmdclass={'test': PyTest},
File "/opt/python/3.3.5/lib/python3.3/distutils/core.py", line 148, in setup
dist.run_commands()
File "/opt/python/3.3.5/lib/python3.3/distutils/dist.py", line 930, in run_commands
self.run_command(cmd)
File "/opt/python/3.3.5/lib/python3.3/distutils/dist.py", line 948, in run_command
cmd_obj.ensure_finalized()
File "/opt/python/3.3.5/lib/python3.3/distutils/cmd.py", line 107, in ensure_finalized
self.finalize_options()
File "setup.py", line 15, in finalize_options
self.test_args = ['--verbose', 'tests/']
AttributeError: can't set attribute
```
but `tox -e py34` works fine for me. :confused:
nelfin: Looks like this is being bitten by https://bitbucket.org/pypa/setuptools/commits/cf565b66b855dd4df189b679206f9fb113681737:
```diff
diff --git a/setuptools/command/test.py b/setuptools/command/test.py
--- a/setuptools/command/test.py
+++ b/setuptools/command/test.py
@@ -72,10 +72,6 @@
"You may specify a module or a suite, but not both"
)
- self.test_args = [self.test_suite]
-
- if self.verbose:
- self.test_args.insert(0, '--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
@@ -83,6 +79,11 @@
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
+ @property
+ def test_args(self):
+ verbose = ['--verbose'] if self.verbose else []
+ return verbose + [self.test_suite]
+
def with_project_on_sys_path(self, func):
with_2to3 = PY3 and getattr(self.distribution, 'use_2to3', False)
```
nelfin: Depends on #301
sloria: Thanks for this patch. I will looks into this more closely tomorrow.
Updating to the latest 2.1-line branch should fix the builds. | diff --git a/marshmallow/marshalling.py b/marshmallow/marshalling.py
index 5caca583..7961f087 100644
--- a/marshmallow/marshalling.py
+++ b/marshmallow/marshalling.py
@@ -21,6 +21,8 @@ __all__ = [
'Unmarshaller',
]
+# Key used for field-level validation errors on nested fields
+FIELD = '_field'
class ErrorStore(object):
@@ -68,6 +70,8 @@ class ErrorStore(object):
# Warning: Mutation!
if isinstance(err.messages, dict):
errors[field_name] = err.messages
+ elif isinstance(errors.get(field_name), dict):
+ errors[field_name].setdefault(FIELD, []).extend(err.messages)
else:
errors.setdefault(field_name, []).extend(err.messages)
# When a Nested field fails validation, the marshalled data is stored
| Field-level validation errors cannot be saved on a nested field with many=True
When validating a field, all of its error messages are saved and appended to the list of messages for that field name in `call_and_store`, but for `fields.Nested` with `many=True` the error messages for child elements of the collection are saved and stored in a dictionary keyed by the element index. This causes the following exception on this minimal test case:
```python
class Inner(Schema):
req = fields.Field(required=True)
class Outer(Schema):
inner = fields.Nested(Inner, many=True)
@validates('inner')
def validates_inner(self, data):
raise ValidationError('not a chance')
outer = Outer()
_, errors = outer.load({'inner': [{}]})
```
```pytb
Traceback (most recent call last):
File "/home/andrew/misc/marshmallow/tests/test_schema.py", line 1238, in test_all_errors_on_many_nested_field_with_validates_decorator
_, errors = outer.load({'inner': [{}]})
File "/home/andrew/misc/marshmallow/marshmallow/schema.py", line 539, in load
result, errors = self._do_load(data, many, partial=partial, postprocess=True)
File "/home/andrew/misc/marshmallow/marshmallow/schema.py", line 610, in _do_load
self._invoke_field_validators(data=result, many=many)
File "/home/andrew/misc/marshmallow/marshmallow/schema.py", line 789, in _invoke_field_validators
field_obj=field_obj
File "/home/andrew/misc/marshmallow/marshmallow/marshalling.py", line 74, in call_and_store
errors.setdefault(field_name, []).extend(err.messages)
AttributeError: 'dict' object has no attribute 'extend'
``` | marshmallow-code/marshmallow | diff --git a/tests/test_schema.py b/tests/test_schema.py
index 81d5d0fb..a18c7787 100644
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -1222,6 +1222,23 @@ class TestNestedSchema:
_, errors = schema.load({'inner': 1})
assert errors['inner']['_schema'] == ['Invalid input type.']
+ # regression test for https://github.com/marshmallow-code/marshmallow/issues/298
+ def test_all_errors_on_many_nested_field_with_validates_decorator(self):
+ class Inner(Schema):
+ req = fields.Field(required=True)
+
+ class Outer(Schema):
+ inner = fields.Nested(Inner, many=True)
+
+ @validates('inner')
+ def validates_inner(self, data):
+ raise ValidationError('not a chance')
+
+ outer = Outer()
+ _, errors = outer.load({'inner': [{}]})
+ assert 'inner' in errors
+ assert '_field' in errors['inner']
+
def test_missing_required_nested_field(self):
class Inner(Schema):
inner_req = fields.Field(required=True, error_messages={'required': 'Oops'})
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | backports.tarfile==1.2.0
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
cryptography==44.0.2
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
flake8==2.4.1
id==1.5.0
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
invoke==2.2.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
keyring==25.6.0
markdown-it-py==3.0.0
-e git+https://github.com/marshmallow-code/marshmallow.git@1dbcae9c439d1a268717feb089351fc3c5180ac3#egg=marshmallow
mccabe==0.3.1
mdurl==0.1.2
more-itertools==10.6.0
nh3==0.2.21
packaging==24.2
pep8==1.7.1
platformdirs==4.3.7
pluggy==1.5.0
py==1.11.0
pycparser==2.22
pyflakes==0.8.1
Pygments==2.19.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
simplejson==3.20.1
six==1.17.0
tomli==2.2.1
tox==4.25.0
twine==6.1.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: marshmallow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- backports-tarfile==1.2.0
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- cryptography==44.0.2
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- flake8==2.4.1
- id==1.5.0
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- invoke==2.2.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- keyring==25.6.0
- markdown-it-py==3.0.0
- mccabe==0.3.1
- mdurl==0.1.2
- more-itertools==10.6.0
- nh3==0.2.21
- packaging==24.2
- pep8==1.7.1
- platformdirs==4.3.7
- pluggy==1.5.0
- py==1.11.0
- pycparser==2.22
- pyflakes==0.8.1
- pygments==2.19.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- simplejson==3.20.1
- six==1.17.0
- tomli==2.2.1
- tox==4.25.0
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/marshmallow
| [
"tests/test_schema.py::TestNestedSchema::test_all_errors_on_many_nested_field_with_validates_decorator"
]
| []
| [
"tests/test_schema.py::test_serializing_basic_object[UserSchema]",
"tests/test_schema.py::test_serializing_basic_object[UserMetaSchema]",
"tests/test_schema.py::test_serializer_dump",
"tests/test_schema.py::test_dump_returns_dict_of_errors",
"tests/test_schema.py::test_dump_with_strict_mode_raises_error[UserSchema]",
"tests/test_schema.py::test_dump_with_strict_mode_raises_error[UserMetaSchema]",
"tests/test_schema.py::test_dump_resets_errors",
"tests/test_schema.py::test_load_resets_errors",
"tests/test_schema.py::test_dump_resets_error_fields",
"tests/test_schema.py::test_load_resets_error_fields",
"tests/test_schema.py::test_errored_fields_do_not_appear_in_output",
"tests/test_schema.py::test_load_many_stores_error_indices",
"tests/test_schema.py::test_dump_many",
"tests/test_schema.py::test_multiple_errors_can_be_stored_for_a_given_index",
"tests/test_schema.py::test_dump_many_stores_error_indices",
"tests/test_schema.py::test_dump_many_doesnt_stores_error_indices_when_index_errors_is_false",
"tests/test_schema.py::test_dump_returns_a_marshalresult",
"tests/test_schema.py::test_dumps_returns_a_marshalresult",
"tests/test_schema.py::test_dumping_single_object_with_collection_schema",
"tests/test_schema.py::test_loading_single_object_with_collection_schema",
"tests/test_schema.py::test_dumps_many",
"tests/test_schema.py::test_load_returns_an_unmarshalresult",
"tests/test_schema.py::test_load_many",
"tests/test_schema.py::test_loads_returns_an_unmarshalresult",
"tests/test_schema.py::test_loads_many",
"tests/test_schema.py::test_loads_deserializes_from_json",
"tests/test_schema.py::test_serializing_none",
"tests/test_schema.py::test_default_many_symmetry",
"tests/test_schema.py::test_on_bind_field_hook",
"tests/test_schema.py::TestValidate::test_validate_returns_errors_dict",
"tests/test_schema.py::TestValidate::test_validate_many",
"tests/test_schema.py::TestValidate::test_validate_many_doesnt_store_index_if_index_errors_option_is_false",
"tests/test_schema.py::TestValidate::test_validate_strict",
"tests/test_schema.py::TestValidate::test_validate_required",
"tests/test_schema.py::test_fields_are_not_copies[UserSchema]",
"tests/test_schema.py::test_fields_are_not_copies[UserMetaSchema]",
"tests/test_schema.py::test_dumps_returns_json",
"tests/test_schema.py::test_naive_datetime_field",
"tests/test_schema.py::test_datetime_formatted_field",
"tests/test_schema.py::test_datetime_iso_field",
"tests/test_schema.py::test_tz_datetime_field",
"tests/test_schema.py::test_local_datetime_field",
"tests/test_schema.py::test_class_variable",
"tests/test_schema.py::test_serialize_many[UserSchema]",
"tests/test_schema.py::test_serialize_many[UserMetaSchema]",
"tests/test_schema.py::test_inheriting_schema",
"tests/test_schema.py::test_custom_field",
"tests/test_schema.py::test_url_field",
"tests/test_schema.py::test_relative_url_field",
"tests/test_schema.py::test_stores_invalid_url_error[UserSchema]",
"tests/test_schema.py::test_stores_invalid_url_error[UserMetaSchema]",
"tests/test_schema.py::test_email_field[UserSchema]",
"tests/test_schema.py::test_email_field[UserMetaSchema]",
"tests/test_schema.py::test_stored_invalid_email",
"tests/test_schema.py::test_integer_field",
"tests/test_schema.py::test_as_string",
"tests/test_schema.py::test_extra",
"tests/test_schema.py::test_extra_many",
"tests/test_schema.py::test_method_field[UserSchema]",
"tests/test_schema.py::test_method_field[UserMetaSchema]",
"tests/test_schema.py::test_function_field",
"tests/test_schema.py::test_prefix[UserSchema]",
"tests/test_schema.py::test_prefix[UserMetaSchema]",
"tests/test_schema.py::test_fields_must_be_declared_as_instances",
"tests/test_schema.py::test_serializing_generator[UserSchema]",
"tests/test_schema.py::test_serializing_generator[UserMetaSchema]",
"tests/test_schema.py::test_serializing_empty_list_returns_empty_list",
"tests/test_schema.py::test_serializing_dict",
"tests/test_schema.py::test_serializing_dict_with_meta_fields",
"tests/test_schema.py::test_exclude_in_init[UserSchema]",
"tests/test_schema.py::test_exclude_in_init[UserMetaSchema]",
"tests/test_schema.py::test_only_in_init[UserSchema]",
"tests/test_schema.py::test_only_in_init[UserMetaSchema]",
"tests/test_schema.py::test_invalid_only_param",
"tests/test_schema.py::test_can_serialize_uuid",
"tests/test_schema.py::test_can_serialize_time",
"tests/test_schema.py::test_invalid_time",
"tests/test_schema.py::test_invalid_date",
"tests/test_schema.py::test_invalid_email",
"tests/test_schema.py::test_invalid_url",
"tests/test_schema.py::test_invalid_dict_but_okay",
"tests/test_schema.py::test_custom_json",
"tests/test_schema.py::test_custom_error_message",
"tests/test_schema.py::test_load_errors_with_many",
"tests/test_schema.py::test_error_raised_if_fields_option_is_not_list",
"tests/test_schema.py::test_error_raised_if_additional_option_is_not_list",
"tests/test_schema.py::test_only_and_exclude",
"tests/test_schema.py::test_exclude_invalid_attribute",
"tests/test_schema.py::test_only_with_invalid_attribute",
"tests/test_schema.py::test_only_bounded_by_fields",
"tests/test_schema.py::test_nested_only_and_exclude",
"tests/test_schema.py::test_nested_with_sets",
"tests/test_schema.py::test_meta_serializer_fields",
"tests/test_schema.py::test_meta_fields_mapping",
"tests/test_schema.py::test_meta_field_not_on_obj_raises_attribute_error",
"tests/test_schema.py::test_exclude_fields",
"tests/test_schema.py::test_fields_option_must_be_list_or_tuple",
"tests/test_schema.py::test_exclude_option_must_be_list_or_tuple",
"tests/test_schema.py::test_dateformat_option",
"tests/test_schema.py::test_default_dateformat",
"tests/test_schema.py::test_inherit_meta",
"tests/test_schema.py::test_inherit_meta_override",
"tests/test_schema.py::test_additional",
"tests/test_schema.py::test_cant_set_both_additional_and_fields",
"tests/test_schema.py::test_serializing_none_meta",
"tests/test_schema.py::TestErrorHandler::test_error_handler_decorator_is_deprecated",
"tests/test_schema.py::TestErrorHandler::test_dump_with_custom_error_handler",
"tests/test_schema.py::TestErrorHandler::test_load_with_custom_error_handler",
"tests/test_schema.py::TestErrorHandler::test_load_with_custom_error_handler_and_partially_valid_data",
"tests/test_schema.py::TestErrorHandler::test_custom_error_handler_with_validates_decorator",
"tests/test_schema.py::TestErrorHandler::test_custom_error_handler_with_validates_schema_decorator",
"tests/test_schema.py::TestErrorHandler::test_validate_with_custom_error_handler",
"tests/test_schema.py::TestFieldValidation::test_errors_are_cleared_after_loading_collection",
"tests/test_schema.py::TestFieldValidation::test_raises_error_with_list",
"tests/test_schema.py::TestFieldValidation::test_raises_error_with_dict",
"tests/test_schema.py::test_schema_repr",
"tests/test_schema.py::TestNestedSchema::test_flat_nested",
"tests/test_schema.py::TestNestedSchema::test_nested_many_with_missing_attribute",
"tests/test_schema.py::TestNestedSchema::test_nested_with_attribute_none",
"tests/test_schema.py::TestNestedSchema::test_flat_nested2",
"tests/test_schema.py::TestNestedSchema::test_nested_field_does_not_validate_required",
"tests/test_schema.py::TestNestedSchema::test_nested_none",
"tests/test_schema.py::TestNestedSchema::test_nested",
"tests/test_schema.py::TestNestedSchema::test_nested_many_fields",
"tests/test_schema.py::TestNestedSchema::test_nested_meta_many",
"tests/test_schema.py::TestNestedSchema::test_nested_only",
"tests/test_schema.py::TestNestedSchema::test_exclude",
"tests/test_schema.py::TestNestedSchema::test_list_field",
"tests/test_schema.py::TestNestedSchema::test_nested_load_many",
"tests/test_schema.py::TestNestedSchema::test_nested_errors",
"tests/test_schema.py::TestNestedSchema::test_nested_strict",
"tests/test_schema.py::TestNestedSchema::test_nested_method_field",
"tests/test_schema.py::TestNestedSchema::test_nested_function_field",
"tests/test_schema.py::TestNestedSchema::test_nested_prefixed_field",
"tests/test_schema.py::TestNestedSchema::test_nested_prefixed_many_field",
"tests/test_schema.py::TestNestedSchema::test_invalid_float_field",
"tests/test_schema.py::TestNestedSchema::test_serializer_meta_with_nested_fields",
"tests/test_schema.py::TestNestedSchema::test_serializer_with_nested_meta_fields",
"tests/test_schema.py::TestNestedSchema::test_nested_fields_must_be_passed_a_serializer",
"tests/test_schema.py::TestNestedSchema::test_invalid_type_passed_to_nested_field",
"tests/test_schema.py::TestNestedSchema::test_missing_required_nested_field",
"tests/test_schema.py::TestSelfReference::test_nesting_schema_within_itself",
"tests/test_schema.py::TestSelfReference::test_nesting_schema_by_passing_class_name",
"tests/test_schema.py::TestSelfReference::test_nesting_within_itself_meta",
"tests/test_schema.py::TestSelfReference::test_recursive_missing_required_field",
"tests/test_schema.py::TestSelfReference::test_recursive_missing_required_field_one_level_in",
"tests/test_schema.py::TestSelfReference::test_nested_self_with_only_param",
"tests/test_schema.py::TestSelfReference::test_multiple_nested_self_fields",
"tests/test_schema.py::TestSelfReference::test_nested_many",
"tests/test_schema.py::test_serialization_with_required_field",
"tests/test_schema.py::test_deserialization_with_required_field",
"tests/test_schema.py::test_deserialization_with_required_field_and_custom_validator",
"tests/test_schema.py::TestContext::test_context_method",
"tests/test_schema.py::TestContext::test_context_method_function",
"tests/test_schema.py::TestContext::test_function_field_raises_error_when_context_not_available",
"tests/test_schema.py::TestContext::test_fields_context",
"tests/test_schema.py::TestContext::test_nested_fields_inherit_context",
"tests/test_schema.py::test_serializer_can_specify_nested_object_as_attribute",
"tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_schema_subclass",
"tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_non_schema_subclass",
"tests/test_schema.py::TestFieldInheritance::test_inheritance_follows_mro",
"tests/test_schema.py::TestAccessor::test_accessor_decorator_is_deprecated",
"tests/test_schema.py::TestAccessor::test_accessor_is_used",
"tests/test_schema.py::TestAccessor::test_accessor_with_many",
"tests/test_schema.py::TestRequiredFields::test_required_string_field_missing",
"tests/test_schema.py::TestRequiredFields::test_required_string_field_failure",
"tests/test_schema.py::TestRequiredFields::test_allow_none_param",
"tests/test_schema.py::TestRequiredFields::test_allow_none_custom_message",
"tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_dump_output",
"tests/test_schema.py::TestDefaults::test_none_is_serialized_to_none",
"tests/test_schema.py::TestDefaults::test_default_and_value_missing",
"tests/test_schema.py::TestDefaults::test_loading_none",
"tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_load_output",
"tests/test_schema.py::TestLoadOnly::test_load_only",
"tests/test_schema.py::TestLoadOnly::test_dump_only"
]
| []
| MIT License | 266 | [
"marshmallow/marshalling.py"
]
| [
"marshmallow/marshalling.py"
]
|
docker__docker-py-822 | 4c8c761bc15160be5eaa76d81edda17b067aa641 | 2015-10-21 22:58:11 | 9050e1c6e05b5b6807357def0aafc59e3b3ae378 | dnephin: Might be worth thinking about any other breaking changes you'd like to make before doing a 2.0 release. Sounds reasonable to me.
bfirsh: LGTM | diff --git a/docker/api/container.py b/docker/api/container.py
index 72c5852d..953a5f52 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -997,19 +997,16 @@ class ContainerApiMixin(object):
self._raise_for_status(res)
@utils.check_resource
- def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
- publish_all_ports=None, links=None, privileged=None,
- dns=None, dns_search=None, volumes_from=None, network_mode=None,
- restart_policy=None, cap_add=None, cap_drop=None, devices=None,
- extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
- security_opt=None, ulimits=None):
+ def start(self, container, *args, **kwargs):
"""
Start a container. Similar to the ``docker start`` command, but
doesn't support attach options.
- **Deprecation warning:** For API version > 1.15, it is highly
- recommended to provide host config options in the ``host_config``
- parameter of :py:meth:`~ContainerApiMixin.create_container`.
+ **Deprecation warning:** Passing configuration options in ``start`` is
+ no longer supported. Users are expected to provide host config options
+ in the ``host_config`` parameter of
+ :py:meth:`~ContainerApiMixin.create_container`.
+
Args:
container (str): The container to start
@@ -1017,6 +1014,8 @@ class ContainerApiMixin(object):
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
+ :py:class:`docker.errors.DeprecatedMethod`
+ If any argument besides ``container`` are provided.
Example:
@@ -1025,64 +1024,14 @@ class ContainerApiMixin(object):
... command='/bin/sleep 30')
>>> cli.start(container=container.get('Id'))
"""
- if utils.compare_version('1.10', self._version) < 0:
- if dns is not None:
- raise errors.InvalidVersion(
- 'dns is only supported for API version >= 1.10'
- )
- if volumes_from is not None:
- raise errors.InvalidVersion(
- 'volumes_from is only supported for API version >= 1.10'
- )
-
- if utils.compare_version('1.15', self._version) < 0:
- if security_opt is not None:
- raise errors.InvalidVersion(
- 'security_opt is only supported for API version >= 1.15'
- )
- if ipc_mode:
- raise errors.InvalidVersion(
- 'ipc_mode is only supported for API version >= 1.15'
- )
-
- if utils.compare_version('1.17', self._version) < 0:
- if read_only is not None:
- raise errors.InvalidVersion(
- 'read_only is only supported for API version >= 1.17'
- )
- if pid_mode is not None:
- raise errors.InvalidVersion(
- 'pid_mode is only supported for API version >= 1.17'
- )
-
- if utils.compare_version('1.18', self._version) < 0:
- if ulimits is not None:
- raise errors.InvalidVersion(
- 'ulimits is only supported for API version >= 1.18'
- )
-
- start_config_kwargs = dict(
- binds=binds, port_bindings=port_bindings, lxc_conf=lxc_conf,
- publish_all_ports=publish_all_ports, links=links, dns=dns,
- privileged=privileged, dns_search=dns_search, cap_add=cap_add,
- cap_drop=cap_drop, volumes_from=volumes_from, devices=devices,
- network_mode=network_mode, restart_policy=restart_policy,
- extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid_mode,
- ipc_mode=ipc_mode, security_opt=security_opt, ulimits=ulimits,
- )
- start_config = None
-
- if any(v is not None for v in start_config_kwargs.values()):
- if utils.compare_version('1.15', self._version) > 0:
- warnings.warn(
- 'Passing host config parameters in start() is deprecated. '
- 'Please use host_config in create_container instead!',
- DeprecationWarning
- )
- start_config = self.create_host_config(**start_config_kwargs)
-
+ if args or kwargs:
+ raise errors.DeprecatedMethod(
+ 'Providing configuration in the start() method is no longer '
+ 'supported. Use the host_config param in create_container '
+ 'instead.'
+ )
url = self._url("/containers/{0}/start", container)
- res = self._post_json(url, data=start_config)
+ res = self._post(url)
self._raise_for_status(res)
@utils.minimum_version('1.17')
| Passing host_config parameters in start() overrides the host_config that was passed in create()?
I had a `host_config` with `extra_hosts` defined. I used this `host_config` to create a container. When starting container, I passed extra volumes_from parameter. However, this lead to `extra_hosts` seemingly not doing their job.
After I moved `volumes_from` from `start()` to `create_host_config` everything started working OK.
I figured, I should report this, since I spent a couple of hours figuring this out. IMO, start() should override the necessary parts of host_config(and not the whole thing). Or raise an exception.
| docker/docker-py | diff --git a/tests/unit/api_container_test.py b/tests/unit/api_container_test.py
index 6c080641..abf36138 100644
--- a/tests/unit/api_container_test.py
+++ b/tests/unit/api_container_test.py
@@ -34,10 +34,7 @@ class StartContainerTest(BaseAPIClientTest):
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
- self.assertEqual(json.loads(args[1]['data']), {})
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert 'data' not in args[1]
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
@@ -63,25 +60,21 @@ class StartContainerTest(BaseAPIClientTest):
self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
def test_start_container_with_lxc_conf(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_lxc_conf_compat(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_binds_ro(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {
@@ -91,22 +84,18 @@ class StartContainerTest(BaseAPIClientTest):
}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_binds_rw(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {"bind": '/mnt', "ro": False}
}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_port_binds(self):
self.maxDiff = None
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
@@ -116,18 +105,14 @@ class StartContainerTest(BaseAPIClientTest):
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
- pytest.deprecated_call(call_start)
-
def test_start_container_with_links(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, links={'path': 'alias'}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_multiple_links(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
@@ -136,21 +121,15 @@ class StartContainerTest(BaseAPIClientTest):
}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_links_as_list_of_tuples(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[('path', 'alias')])
- pytest.deprecated_call(call_start)
-
def test_start_container_privileged(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_dict_instead_of_id(self):
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
@@ -159,10 +138,7 @@ class StartContainerTest(BaseAPIClientTest):
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
- self.assertEqual(json.loads(args[1]['data']), {})
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert 'data' not in args[1]
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/docker/docker-py.git@4c8c761bc15160be5eaa76d81edda17b067aa641#egg=docker_py
docker-pycreds==0.2.1
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.11.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
websocket-client==0.32.0
zipp==3.6.0
| name: docker-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- docker-pycreds==0.2.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.11.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- websocket-client==0.32.0
- zipp==3.6.0
prefix: /opt/conda/envs/docker-py
| [
"tests/unit/api_container_test.py::StartContainerTest::test_start_container",
"tests/unit/api_container_test.py::StartContainerTest::test_start_container_privileged",
"tests/unit/api_container_test.py::StartContainerTest::test_start_container_with_binds_ro",
"tests/unit/api_container_test.py::StartContainerTest::test_start_container_with_binds_rw",
"tests/unit/api_container_test.py::StartContainerTest::test_start_container_with_dict_instead_of_id",
"tests/unit/api_container_test.py::StartContainerTest::test_start_container_with_links",
"tests/unit/api_container_test.py::StartContainerTest::test_start_container_with_links_as_list_of_tuples",
"tests/unit/api_container_test.py::StartContainerTest::test_start_container_with_lxc_conf",
"tests/unit/api_container_test.py::StartContainerTest::test_start_container_with_lxc_conf_compat",
"tests/unit/api_container_test.py::StartContainerTest::test_start_container_with_multiple_links",
"tests/unit/api_container_test.py::StartContainerTest::test_start_container_with_port_binds"
]
| []
| [
"tests/unit/api_container_test.py::StartContainerTest::test_start_container_none",
"tests/unit/api_container_test.py::StartContainerTest::test_start_container_regression_573",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_empty_volumes_from",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_privileged",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_added_capabilities",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_aliases",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_binds",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_binds_list",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_binds_mode",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_binds_mode_and_ro_error",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_binds_ro",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_binds_rw",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_cgroup_parent",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_cpu_shares",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_cpuset",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_devices",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_dropped_capabilities",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_entrypoint",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_host_config_cpu_shares",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_host_config_cpuset",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_labels_dict",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_labels_list",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_links",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_links_as_list_of_tuples",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_lxc_conf",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_lxc_conf_compat",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_mac_address",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_mem_limit_as_int",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_mem_limit_as_string",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_mem_limit_as_string_with_g_unit",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_mem_limit_as_string_with_k_unit",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_mem_limit_as_string_with_m_unit",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_mem_limit_as_string_with_wrong_value",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_multiple_links",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_named_volume",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_port_binds",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_ports",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_restart_policy",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_stdin_open",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_stop_signal",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_sysctl",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_tmpfs_dict",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_tmpfs_list",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_unicode_envvars",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_volume_string",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_volumes_from",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_container_with_working_dir",
"tests/unit/api_container_test.py::CreateContainerTest::test_create_named_container",
"tests/unit/api_container_test.py::ContainerTest::test_container_stats",
"tests/unit/api_container_test.py::ContainerTest::test_container_top",
"tests/unit/api_container_test.py::ContainerTest::test_container_top_with_psargs",
"tests/unit/api_container_test.py::ContainerTest::test_container_update",
"tests/unit/api_container_test.py::ContainerTest::test_diff",
"tests/unit/api_container_test.py::ContainerTest::test_diff_with_dict_instead_of_id",
"tests/unit/api_container_test.py::ContainerTest::test_export",
"tests/unit/api_container_test.py::ContainerTest::test_export_with_dict_instead_of_id",
"tests/unit/api_container_test.py::ContainerTest::test_inspect_container",
"tests/unit/api_container_test.py::ContainerTest::test_inspect_container_undefined_id",
"tests/unit/api_container_test.py::ContainerTest::test_kill_container",
"tests/unit/api_container_test.py::ContainerTest::test_kill_container_with_dict_instead_of_id",
"tests/unit/api_container_test.py::ContainerTest::test_kill_container_with_signal",
"tests/unit/api_container_test.py::ContainerTest::test_list_containers",
"tests/unit/api_container_test.py::ContainerTest::test_log_following",
"tests/unit/api_container_test.py::ContainerTest::test_log_following_backwards",
"tests/unit/api_container_test.py::ContainerTest::test_log_since",
"tests/unit/api_container_test.py::ContainerTest::test_log_since_with_datetime",
"tests/unit/api_container_test.py::ContainerTest::test_log_streaming",
"tests/unit/api_container_test.py::ContainerTest::test_log_streaming_and_following",
"tests/unit/api_container_test.py::ContainerTest::test_log_tail",
"tests/unit/api_container_test.py::ContainerTest::test_log_tty",
"tests/unit/api_container_test.py::ContainerTest::test_logs",
"tests/unit/api_container_test.py::ContainerTest::test_logs_with_dict_instead_of_id",
"tests/unit/api_container_test.py::ContainerTest::test_pause_container",
"tests/unit/api_container_test.py::ContainerTest::test_port",
"tests/unit/api_container_test.py::ContainerTest::test_remove_container",
"tests/unit/api_container_test.py::ContainerTest::test_remove_container_with_dict_instead_of_id",
"tests/unit/api_container_test.py::ContainerTest::test_rename_container",
"tests/unit/api_container_test.py::ContainerTest::test_resize_container",
"tests/unit/api_container_test.py::ContainerTest::test_restart_container",
"tests/unit/api_container_test.py::ContainerTest::test_restart_container_with_dict_instead_of_id",
"tests/unit/api_container_test.py::ContainerTest::test_stop_container",
"tests/unit/api_container_test.py::ContainerTest::test_stop_container_with_dict_instead_of_id",
"tests/unit/api_container_test.py::ContainerTest::test_unpause_container",
"tests/unit/api_container_test.py::ContainerTest::test_wait",
"tests/unit/api_container_test.py::ContainerTest::test_wait_with_dict_instead_of_id"
]
| []
| Apache License 2.0 | 270 | [
"docker/api/container.py"
]
| [
"docker/api/container.py"
]
|
mogproject__mog-commons-python-8 | 71a072abdbeff70c14543ef9b307fae3277dc24a | 2015-10-24 16:43:22 | 0a6ffc13e621b0c2cbe35d20a6b938de570c0626 | diff --git a/src/mog_commons/__init__.py b/src/mog_commons/__init__.py
index 8ce9b36..7525d19 100644
--- a/src/mog_commons/__init__.py
+++ b/src/mog_commons/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '0.1.3'
+__version__ = '0.1.4'
diff --git a/src/mog_commons/collection.py b/src/mog_commons/collection.py
index 21a8f62..621e572 100644
--- a/src/mog_commons/collection.py
+++ b/src/mog_commons/collection.py
@@ -2,6 +2,8 @@ from __future__ import division, print_function, absolute_import, unicode_litera
import six
+__all__ = ['get_single_item', 'get_single_key', 'get_single_value', 'distinct']
+
def get_single_item(d):
"""Get an item from a dict which contains just one item."""
@@ -19,3 +21,10 @@ def get_single_value(d):
"""Get a value from a dict which contains just one item."""
assert len(d) == 1, 'Single-item dict must have just one item, not %d.' % len(d)
return next(six.itervalues(d))
+
+
+def distinct(xs):
+ """Get the list of distinct values with preserving order."""
+ # don't use collections.OrderedDict because we do support Python 2.6
+ seen = set()
+ return [x for x in xs if x not in seen and not seen.add(x)]
diff --git a/src/mog_commons/string.py b/src/mog_commons/string.py
index 0e6c894..321c6a2 100644
--- a/src/mog_commons/string.py
+++ b/src/mog_commons/string.py
@@ -3,6 +3,21 @@ from __future__ import division, print_function, absolute_import, unicode_litera
from unicodedata import east_asian_width
import six
+from mog_commons.collection import distinct
+
+__all__ = [
+ 'is_unicode',
+ 'is_strlike',
+ 'unicode_width',
+ 'to_unicode',
+ 'to_str',
+ 'to_bytes',
+ 'edge_just',
+ 'unicode_right',
+ 'unicode_left',
+ 'unicode_decode',
+]
+
__unicode_width_mapping = {'F': 2, 'H': 1, 'W': 2, 'Na': 1, 'A': 2, 'N': 1}
@@ -104,3 +119,22 @@ def unicode_right(s, width):
break
i -= 1
return s[i:]
+
+
+def unicode_decode(data, encoding_list):
+ """
+ Decode string data with one or more encodings, trying sequentially
+ :param data: bytes: encoded string data
+ :param encoding_list: list[string] or string: encoding names
+ :return: string: decoded string
+ """
+ assert encoding_list, 'encodings must not be empty.'
+
+ xs = distinct(encoding_list if isinstance(encoding_list, list) else [encoding_list])
+ init, last = xs[:-1], xs[-1]
+ for encoding in init:
+ try:
+ return data.decode(encoding)
+ except UnicodeDecodeError:
+ pass
+ return data.decode(last)
| Decode string with multiple encodings | mogproject/mog-commons-python | diff --git a/src/mog_commons/unittest.py b/src/mog_commons/unittest.py
index 27445ba..06bc135 100644
--- a/src/mog_commons/unittest.py
+++ b/src/mog_commons/unittest.py
@@ -19,6 +19,7 @@ class StringBuffer(object):
We don't use StringIO because there are many differences between PY2 and PY3.
"""
+
def __init__(self, init_buffer=None):
self._buffer = init_buffer or b''
@@ -37,9 +38,17 @@ class StringBuffer(object):
class TestCase(base_unittest.TestCase):
def assertRaisesRegexp(self, expected_exception, expected_regexp, callable_obj=None, *args, **kwargs):
- """Accept difference of the function name between PY2 and PY3."""
- f = base_unittest.TestCase.assertRaisesRegex if six.PY3 else base_unittest.TestCase.assertRaisesRegexp
- f(self, expected_exception, expected_regexp, callable_obj, *args, **kwargs)
+ """
+ Accept difference of the function name between PY2 and PY3.
+
+ We don't use built-in assertRaisesRegexp because it is unicode-unsafe.
+ """
+ with self.assertRaises(expected_exception) as cm:
+ callable_obj(*args, **kwargs)
+ if six.PY2:
+ self.assertRegexpMatches(str(cm.exception), expected_regexp)
+ else:
+ self.assertRegex(str(cm.exception), expected_regexp)
def assertOutput(self, expected_stdout, expected_stderr, function, encoding='utf-8'):
with self.withOutput() as (out, err):
diff --git a/tests/mog_commons/test_collection.py b/tests/mog_commons/test_collection.py
index 2bd136e..da6fda2 100644
--- a/tests/mog_commons/test_collection.py
+++ b/tests/mog_commons/test_collection.py
@@ -1,6 +1,6 @@
from __future__ import division, print_function, absolute_import, unicode_literals
-from mog_commons.collection import get_single_item, get_single_key, get_single_value
+from mog_commons.collection import *
from mog_commons import unittest
@@ -31,3 +31,12 @@ class TestCollection(unittest.TestCase):
{})
self.assertRaisesRegexp(AssertionError, 'Single-item dict must have just one item, not 2.', get_single_value,
{'x': 123, 'y': 45})
+
+ def test_distinct(self):
+ self.assertEqual(distinct([]), [])
+ self.assertEqual(distinct([1]), [1])
+ self.assertEqual(distinct([1] * 100), [1])
+ self.assertEqual(distinct([1, 2, 3, 4, 5]), [1, 2, 3, 4, 5])
+ self.assertEqual(distinct([1, 2, 1, 2, 1]), [1, 2])
+ self.assertEqual(distinct([2, 1, 2, 1, 1]), [2, 1])
+ self.assertEqual(distinct('mog-commons-python'), ['m', 'o', 'g', '-', 'c', 'n', 's', 'p', 'y', 't', 'h'])
diff --git a/tests/mog_commons/test_string.py b/tests/mog_commons/test_string.py
index 2f00173..399abc8 100644
--- a/tests/mog_commons/test_string.py
+++ b/tests/mog_commons/test_string.py
@@ -92,3 +92,19 @@ class TestString(unittest.TestCase):
self.assertEqual(string.unicode_right('あいうえお', 11), 'あいうえお')
self.assertEqual(string.unicode_right('あxいxうxえxお', 4), 'xお')
self.assertEqual(string.unicode_right('あxいxうxえxお', 5), 'えxお')
+
+ def test_unicode_decode(self):
+ self.assertRaisesRegexp(AssertionError, 'encodings must not be empty.', string.unicode_decode, 'abc', [])
+ self.assertEqual(string.unicode_decode(b'abc', 'ascii'), 'abc')
+ self.assertEqual(string.unicode_decode(b'abc', ['ascii']), 'abc')
+ self.assertRaisesRegexp(
+ UnicodeDecodeError, "'ascii' codec can't decode",
+ string.unicode_decode, 'あいうえお'.encode('utf-8'), 'ascii')
+ self.assertEqual(string.unicode_decode('あいうえお'.encode('utf-8'), ['ascii', 'sjis', 'utf-8']), 'あいうえお')
+ self.assertEqual(string.unicode_decode('あいうえお'.encode('utf-8'), ['ascii', 'utf-8', 'sjis']), 'あいうえお')
+ self.assertEqual(string.unicode_decode('あいうえお'.encode('utf-8'), ['utf-8', 'ascii', 'sjis']), 'あいうえお')
+ self.assertEqual(string.unicode_decode('あいうえお'.encode('utf-8'), ['utf-8', 'utf-8', 'utf-8']), 'あいうえお')
+ self.assertEqual(string.unicode_decode('あいうえお'.encode('sjis'), ['ascii', 'utf-8', 'sjis']), 'あいうえお')
+ self.assertRaisesRegexp(
+ UnicodeDecodeError, "'shift_jis' codec can't decode",
+ string.unicode_decode, 'あいうえお'.encode('utf-8'), ['ascii', 'sjis'])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 3
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"coverage",
"coveralls",
"pytest"
],
"pre_install": null,
"python": "3.4",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
docopt==0.6.2
idna==3.10
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
linecache2==1.0.0
-e git+https://github.com/mogproject/mog-commons-python.git@71a072abdbeff70c14543ef9b307fae3277dc24a#egg=mog_commons
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
requests==2.27.1
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
traceback2==1.4.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
unittest2==1.1.0
urllib3==1.26.20
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: mog-commons-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- docopt==0.6.2
- idna==3.10
- linecache2==1.0.0
- requests==2.27.1
- six==1.17.0
- traceback2==1.4.0
- unittest2==1.1.0
- urllib3==1.26.20
prefix: /opt/conda/envs/mog-commons-python
| [
"tests/mog_commons/test_collection.py::TestCollection::test_distinct",
"tests/mog_commons/test_string.py::TestString::test_unicode_decode"
]
| []
| [
"tests/mog_commons/test_collection.py::TestCollection::test_get_single_item",
"tests/mog_commons/test_collection.py::TestCollection::test_get_single_item_error",
"tests/mog_commons/test_collection.py::TestCollection::test_get_single_key",
"tests/mog_commons/test_collection.py::TestCollection::test_get_single_key_error",
"tests/mog_commons/test_collection.py::TestCollection::test_get_single_value",
"tests/mog_commons/test_collection.py::TestCollection::test_get_single_value_error",
"tests/mog_commons/test_string.py::TestString::test_edge_just",
"tests/mog_commons/test_string.py::TestString::test_edge_just_unicode",
"tests/mog_commons/test_string.py::TestString::test_to_bytes",
"tests/mog_commons/test_string.py::TestString::test_to_str",
"tests/mog_commons/test_string.py::TestString::test_to_str_py3",
"tests/mog_commons/test_string.py::TestString::test_to_unicode",
"tests/mog_commons/test_string.py::TestString::test_unicode_left",
"tests/mog_commons/test_string.py::TestString::test_unicode_right",
"tests/mog_commons/test_string.py::TestString::test_unicode_width"
]
| []
| Apache License 2.0 | 273 | [
"src/mog_commons/string.py",
"src/mog_commons/__init__.py",
"src/mog_commons/collection.py"
]
| [
"src/mog_commons/string.py",
"src/mog_commons/__init__.py",
"src/mog_commons/collection.py"
]
|
|
davidhalter__jedi-641 | e0753da6f1ed83185b84c0f9ab65eed67c80dcb4 | 2015-10-24 23:35:37 | 2ba78ab725f1e02dfef8bc50b0204cf656e8ee23 | diff --git a/AUTHORS.txt b/AUTHORS.txt
index 64aae316..bc258d6b 100644
--- a/AUTHORS.txt
+++ b/AUTHORS.txt
@@ -33,5 +33,6 @@ Savor d'Isavano (@KenetJervet) <[email protected]>
Phillip Berndt (@phillipberndt) <[email protected]>
Ian Lee (@IanLee1521) <[email protected]>
Farkhad Khatamov (@hatamov) <[email protected]>
+Kevin Kelley (@kelleyk) <[email protected]>
Note: (@user) means a github user name.
diff --git a/jedi/api/classes.py b/jedi/api/classes.py
index a1d42bd0..75275dd2 100644
--- a/jedi/api/classes.py
+++ b/jedi/api/classes.py
@@ -336,7 +336,7 @@ class BaseDefinition(object):
raise AttributeError()
followed = followed[0] # only check the first one.
- if followed.type == 'funcdef':
+ if followed.type in ('funcdef', 'lambda'):
if isinstance(followed, er.InstanceElement):
params = followed.params[1:]
else:
diff --git a/jedi/parser/tree.py b/jedi/parser/tree.py
index 619067e1..899d5f1c 100644
--- a/jedi/parser/tree.py
+++ b/jedi/parser/tree.py
@@ -748,6 +748,15 @@ def _create_params(parent, argslist_list):
class Function(ClassOrFunc):
"""
Used to store the parsed contents of a python function.
+
+ Children:
+ 0) <Keyword: def>
+ 1) <Name>
+ 2) parameter list (including open-paren and close-paren <Operator>s)
+ 3) <Operator: :>
+ 4) Node() representing function body
+ 5) ??
+ 6) annotation (if present)
"""
__slots__ = ('listeners',)
type = 'funcdef'
@@ -760,6 +769,7 @@ class Function(ClassOrFunc):
@property
def params(self):
+ # Contents of parameter lit minus the leading <Operator: (> and the trailing <Operator: )>.
return self.children[2].children[1:-1]
@property
@@ -791,10 +801,13 @@ class Function(ClassOrFunc):
:rtype: str
"""
- func_name = func_name or self.children[1]
- code = unicode(func_name) + self.children[2].get_code()
+ func_name = func_name or self.name
+ code = unicode(func_name) + self._get_paramlist_code()
return '\n'.join(textwrap.wrap(code, width))
+ def _get_paramlist_code(self):
+ return self.children[2].get_code()
+
@property
def doc(self):
""" Return a document string including call signature. """
@@ -805,6 +818,12 @@ class Function(ClassOrFunc):
class Lambda(Function):
"""
Lambdas are basically trimmed functions, so give it the same interface.
+
+ Children:
+ 0) <Keyword: lambda>
+ *) <Param x> for each argument x
+ -2) <Operator: :>
+ -1) Node() representing body
"""
type = 'lambda'
__slots__ = ()
@@ -813,9 +832,17 @@ class Lambda(Function):
# We don't want to call the Function constructor, call its parent.
super(Function, self).__init__(children)
self.listeners = set() # not used here, but in evaluation.
- lst = self.children[1:-2] # After `def foo`
+ lst = self.children[1:-2] # Everything between `lambda` and the `:` operator is a parameter.
self.children[1:-2] = _create_params(self, lst)
+ @property
+ def name(self):
+ # Borrow the position of the <Keyword: lambda> AST node.
+ return Name(self.children[0].position_modifier, '<lambda>', self.children[0].start_pos)
+
+ def _get_paramlist_code(self):
+ return '(' + ''.join(param.get_code() for param in self.params).strip() + ')'
+
@property
def params(self):
return self.children[1:-2]
@@ -823,6 +850,7 @@ class Lambda(Function):
def is_generator(self):
return False
+ @property
def yields(self):
return []
| 'Lambda' object has no attribute 'get_subscope_by_name'
Jedi 0.9.0, Python 2.7.3
Reproduction:
```Python
import jedi
script = """\
my_lambda = lambda x: x+1
my_lambda(1)
"""
sigs = jedi.Script(script, line=2, column=11).call_signatures()
print sigs[0].index
```
Traceback:
```
Traceback (most recent call last):
File "/home/forcer/Projects/elpy/elpy/rpc.py", line 100, in handle_request
result = method(*params)
File "/home/forcer/Projects/elpy/elpy/server.py", line 87, in rpc_get_calltip
get_source(source), offset)
File "/home/forcer/Projects/elpy/elpy/server.py", line 49, in _call_backend
return meth(*args, **kwargs)
File "/home/forcer/Projects/elpy/elpy/jedibackend.py", line 129, in rpc_get_calltip
"index": call.index,
File "/home/forcer/.virtualenvs/elpy/local/lib/python2.7/site-packages/jedi/api/classes.py", line 653, in index
if self._index >= len(self.params):
File "/home/forcer/.virtualenvs/elpy/local/lib/python2.7/site-packages/jedi/evaluate/cache.py", line 41, in wrapper
rv = function(obj, *args, **kwargs)
File "/home/forcer/.virtualenvs/elpy/local/lib/python2.7/site-packages/jedi/api/classes.py", line 348, in params
sub = followed.get_subscope_by_name('__init__')
File "/home/forcer/.virtualenvs/elpy/local/lib/python2.7/site-packages/jedi/evaluate/representation.py", line 563, in __getattr__
return getattr(self.base_func, name)
AttributeError: 'Lambda' object has no attribute 'get_subscope_by_name'
``` | davidhalter/jedi | diff --git a/test/test_parser/test_parser_tree.py b/test/test_parser/test_parser_tree.py
new file mode 100644
index 00000000..480230ba
--- /dev/null
+++ b/test/test_parser/test_parser_tree.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 # This file contains Unicode characters.
+
+from textwrap import dedent
+
+import pytest
+
+from jedi._compatibility import u, unicode
+from jedi.parser import Parser, load_grammar
+from jedi.parser import tree as pt
+
+
+class TestsFunctionAndLambdaParsing(object):
+
+ FIXTURES = [
+ ('def my_function(x, y, z):\n return x + y * z\n', {
+ 'name': 'my_function',
+ 'call_sig': 'my_function(x, y, z)',
+ 'params': ['x', 'y', 'z'],
+ }),
+ ('lambda x, y, z: x + y * z\n', {
+ 'name': '<lambda>',
+ 'call_sig': '<lambda>(x, y, z)',
+ 'params': ['x', 'y', 'z'],
+ }),
+ ]
+
+ @pytest.fixture(params=FIXTURES)
+ def node(self, request):
+ parsed = Parser(load_grammar(), dedent(u(request.param[0])))
+ request.keywords['expected'] = request.param[1]
+ return parsed.module.subscopes[0]
+
+ @pytest.fixture()
+ def expected(self, request, node):
+ return request.keywords['expected']
+
+ def test_name(self, node, expected):
+ assert isinstance(node.name, pt.Name)
+ assert unicode(node.name) == u(expected['name'])
+
+ def test_params(self, node, expected):
+ assert isinstance(node.params, list)
+ assert all(isinstance(x, pt.Param) for x in node.params)
+ assert [unicode(x.name) for x in node.params] == [u(x) for x in expected['params']]
+
+ def test_is_generator(self, node, expected):
+ assert node.is_generator() is expected.get('is_generator', False)
+
+ def test_yields(self, node, expected):
+ # TODO: There's a comment in the code noting that the current implementation is incorrect. This returns an
+ # empty list at the moment (not e.g. False).
+ if expected.get('yields', False):
+ assert node.yields
+ else:
+ assert not node.yields
+
+ def test_annotation(self, node, expected):
+ assert node.annotation() is expected.get('annotation', None)
+
+ def test_get_call_signature(self, node, expected):
+ assert node.get_call_signature() == expected['call_sig']
+
+ def test_doc(self, node, expected):
+ assert node.doc == expected.get('doc') or (expected['call_sig'] + '\n\n')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 3
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cache",
"docopt",
"colorama"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | colorama==0.4.6
docopt==0.6.2
exceptiongroup==1.2.2
execnet==2.1.1
iniconfig==2.1.0
-e git+https://github.com/davidhalter/jedi.git@e0753da6f1ed83185b84c0f9ab65eed67c80dcb4#egg=jedi
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cache==1.0
tomli==2.2.1
| name: jedi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- colorama==0.4.6
- docopt==0.6.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cache==1.0
- tomli==2.2.1
prefix: /opt/conda/envs/jedi
| [
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_name[node1]",
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_yields[node1]",
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_get_call_signature[node1]"
]
| []
| [
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_name[node0]",
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_params[node0]",
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_params[node1]",
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_is_generator[node0]",
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_is_generator[node1]",
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_yields[node0]",
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_annotation[node0]",
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_annotation[node1]",
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_get_call_signature[node0]",
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_doc[node0]",
"test/test_parser/test_parser_tree.py::TestsFunctionAndLambdaParsing::test_doc[node1]"
]
| []
| MIT License | 274 | [
"jedi/parser/tree.py",
"AUTHORS.txt",
"jedi/api/classes.py"
]
| [
"jedi/parser/tree.py",
"AUTHORS.txt",
"jedi/api/classes.py"
]
|
|
pystorm__pystorm-14 | 111356b63c7a44261fb4d0c827745e793ca8717e | 2015-10-27 16:46:13 | eaa0bf28f57e43950379dfaabac7174ad5db4740 | diff --git a/pystorm/component.py b/pystorm/component.py
index 23d82e8..80c5abf 100644
--- a/pystorm/component.py
+++ b/pystorm/component.py
@@ -3,14 +3,17 @@ from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
+import re
import signal
import sys
-from collections import deque, namedtuple
+from collections import defaultdict, deque, namedtuple
from logging.handlers import RotatingFileHandler
from os.path import join
from threading import RLock
from traceback import format_exc
+from six import iteritems
+
from .exceptions import StormWentAwayError
from .serializers.msgpack_serializer import MsgpackSerializer
from .serializers.json_serializer import JSONSerializer
@@ -36,6 +39,9 @@ _PYTHON_LOG_LEVELS = {'critical': logging.CRITICAL,
'debug': logging.DEBUG,
'trace': logging.DEBUG}
_SERIALIZERS = {"json": JSONSerializer, "msgpack": MsgpackSerializer}
+# Convert names to valid Python identifiers by replacing non-word characters
+# whitespace and leading digits with underscores.
+_IDENTIFIER_RE = re.compile(r'\W|^(?=\d)')
log = logging.getLogger(__name__)
@@ -121,7 +127,7 @@ Tuple = namedtuple('Tuple', 'id component stream task values')
:ivar task: the task the Tuple was generated from.
:type task: int
:ivar values: the payload of the Tuple where data is stored.
-:type values: list
+:type values: tuple (or namedtuple for Storm 0.10.0+)
"""
@@ -177,6 +183,7 @@ class Component(object):
self.context = None
self.pid = os.getpid()
self.logger = None
+ self._source_tuple_types = defaultdict(dict)
# pending commands/Tuples we read while trying to read task IDs
self._pending_commands = deque()
# pending task IDs we read while trying to read commands/Tuples
@@ -207,6 +214,15 @@ class Component(object):
self.topology_name = storm_conf.get('topology.name', '')
self.task_id = context.get('taskid', '')
self.component_name = context.get('componentid')
+ # source->stream->fields requires Storm 0.10.0 or later
+ source_stream_fields = context.get('source->stream->fields', {})
+ for source, stream_fields in iteritems(source_stream_fields):
+ for stream, fields in iteritems(stream_fields):
+ type_name = (_IDENTIFIER_RE.sub('_', source.title()) +
+ _IDENTIFIER_RE.sub('_', stream.title()) +
+ 'Tuple')
+ self._source_tuple_types[source][stream] = namedtuple(type_name,
+ fields)
# If using Storm before 0.10.0 componentid is not available
if self.component_name is None:
self.component_name = context.get('task->component', {})\
@@ -280,8 +296,12 @@ class Component(object):
def read_tuple(self):
cmd = self.read_command()
- return Tuple(cmd['id'], cmd['comp'], cmd['stream'], cmd['task'],
- cmd['tuple'])
+ source = cmd['comp']
+ stream = cmd['stream']
+ values = cmd['tuple']
+ val_type = self._source_tuple_types[source].get(stream)
+ return Tuple(cmd['id'], source, stream, cmd['task'],
+ tuple(values) if val_type is None else val_type(*values))
def read_handshake(self):
"""Read and process an initial handshake message from Storm."""
| Have Tuple.values be a namedtuple so fields can be accessed by name
_From @dan-blanchard on April 15, 2015 13:57_
This was brought up as part of our discussion of the rejected #120. What we want to do is:
- [x] Submit a PR to Storm that serializes [`TopologyContext.componentToStreamToFields`](https://github.com/apache/storm/blob/master/storm-core/src/jvm/backtype/storm/task/TopologyContext.java#L61) and sends that along as part of Multi-Lang handshake.
- [x] Add a `_stream_fields` dictionary attribute to the `Component` class that maps from streams to `namedtuple` types representing the names of fields/values in the tuple. This should get created at handshake time based on the contents of `componentToStreamToFields`.
- [x] Modify `Component.read_tuple()` to set `Tuple.values` to be a `namedtuple` of the appropriate type for the current stream (by looking it up in `Component._stream_fields`.
This will allow users to get values out of their tuples by accessing values directly by name (`word = tup.values.word`), or by unpacking (`word, count = tup.values`).
_Copied from original issue: Parsely/streamparse#127_ | pystorm/pystorm | diff --git a/test/pystorm/test_bolt.py b/test/pystorm/test_bolt.py
index f9586c3..80cf5c8 100644
--- a/test/pystorm/test_bolt.py
+++ b/test/pystorm/test_bolt.py
@@ -34,7 +34,7 @@ class BoltTests(unittest.TestCase):
tup_json = "{}\nend\n".format(json.dumps(self.tup_dict)).encode('utf-8')
self.tup = Tuple(self.tup_dict['id'], self.tup_dict['comp'],
self.tup_dict['stream'], self.tup_dict['task'],
- self.tup_dict['tuple'],)
+ tuple(self.tup_dict['tuple']),)
self.bolt = Bolt(input_stream=BytesIO(tup_json),
output_stream=BytesIO())
self.bolt.initialize({}, {})
@@ -190,7 +190,7 @@ class BoltTests(unittest.TestCase):
def test_heartbeat_response(self, send_message_mock, read_tuple_mock):
# Make sure we send sync for heartbeats
read_tuple_mock.return_value = Tuple(id='foo', task=-1,
- stream='__heartbeat', values=[],
+ stream='__heartbeat', values=(),
component='__system')
self.bolt._run()
send_message_mock.assert_called_with(self.bolt, {'command': 'sync'})
@@ -201,7 +201,7 @@ class BoltTests(unittest.TestCase):
# Make sure we send sync for heartbeats
read_tuple_mock.return_value = Tuple(id=None, task=-1,
component='__system',
- stream='__tick', values=[50])
+ stream='__tick', values=(50,))
self.bolt._run()
process_tick_mock.assert_called_with(self.bolt,
read_tuple_mock.return_value)
@@ -239,8 +239,8 @@ class BatchingBoltTests(unittest.TestCase):
tups_json = '\nend\n'.join([json.dumps(tup_dict) for tup_dict in
self.tup_dicts] + [''])
self.tups = [Tuple(tup_dict['id'], tup_dict['comp'], tup_dict['stream'],
- tup_dict['task'], tup_dict['tuple']) for tup_dict in
- self.tup_dicts]
+ tup_dict['task'], tuple(tup_dict['tuple']))
+ for tup_dict in self.tup_dicts]
self.nontick_tups = [tup for tup in self.tups if tup.stream != '__tick']
self.bolt = BatchingBolt(input_stream=BytesIO(tups_json.encode('utf-8')),
output_stream=BytesIO())
@@ -364,7 +364,7 @@ class BatchingBoltTests(unittest.TestCase):
def test_heartbeat_response(self, send_message_mock, read_tuple_mock):
# Make sure we send sync for heartbeats
read_tuple_mock.return_value = Tuple(id='foo', task=-1,
- stream='__heartbeat', values=[],
+ stream='__heartbeat', values=(),
component='__system')
self.bolt._run()
send_message_mock.assert_called_with(self.bolt, {'command': 'sync'})
@@ -375,7 +375,7 @@ class BatchingBoltTests(unittest.TestCase):
# Make sure we send sync for heartbeats
read_tuple_mock.return_value = Tuple(id=None, task=-1,
component='__system',
- stream='__tick', values=[50])
+ stream='__tick', values=(50,))
self.bolt._run()
process_tick_mock.assert_called_with(self.bolt,
read_tuple_mock.return_value)
diff --git a/test/pystorm/test_component.py b/test/pystorm/test_component.py
index f7228f4..c508ae1 100644
--- a/test/pystorm/test_component.py
+++ b/test/pystorm/test_component.py
@@ -7,6 +7,7 @@ from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import unittest
+from collections import namedtuple
from io import BytesIO
import simplejson as json
@@ -24,49 +25,48 @@ log = logging.getLogger(__name__)
class ComponentTests(unittest.TestCase):
-
- def test_read_handshake(self):
- handshake_dict = {
- "conf": {
- "topology.message.timeout.secs": 3,
- "topology.tick.tuple.freq.secs": 1,
- "topology.debug": True
- },
- "pidDir": ".",
- "context": {
- "task->component": {
- "1": "example-spout",
- "2": "__acker",
- "3": "example-bolt1",
- "4": "example-bolt2"
- },
- "taskid": 3,
- # Everything below this line is only available in Storm 0.10.0+
- "componentid": "example-bolt1",
- "stream->target->grouping": {
- "default": {
- "example-bolt2": {
- "type": "SHUFFLE"
- }
- }
- },
- "streams": ["default"],
- "stream->outputfields": {"default": ["word"]},
- "source->stream->grouping": {
- "example-spout": {
- "default": {
- "type": "FIELDS",
- "fields": ["word"]
- }
- }
- },
- "source->stream->fields": {
- "example-spout": {
- "default": ["word"]
- }
+ conf = {"topology.message.timeout.secs": 3,
+ "topology.tick.tuple.freq.secs": 1,
+ "topology.debug": True,
+ "topology.name": "foo"}
+ context = {
+ "task->component": {
+ "1": "example-spout",
+ "2": "__acker",
+ "3": "example-bolt1",
+ "4": "example-bolt2"
+ },
+ "taskid": 3,
+ # Everything below this line is only available in Storm 0.11.0+
+ "componentid": "example-bolt1",
+ "stream->target->grouping": {
+ "default": {
+ "example-bolt2": {
+ "type": "SHUFFLE"
+ }
+ }
+ },
+ "streams": ["default"],
+ "stream->outputfields": {"default": ["word"]},
+ "source->stream->grouping": {
+ "example-spout": {
+ "default": {
+ "type": "FIELDS",
+ "fields": ["word"]
}
}
+ },
+ "source->stream->fields": {
+ "example-spout": {
+ "default": ["sentence", "word", "number"]
+ }
}
+ }
+
+ def test_read_handshake(self):
+ handshake_dict = {"conf": self.conf,
+ "pidDir": ".",
+ "context": self.context}
pid_dir = handshake_dict['pidDir']
expected_conf = handshake_dict['conf']
expected_context = handshake_dict['context']
@@ -84,52 +84,18 @@ class ComponentTests(unittest.TestCase):
component.serializer.output_stream.buffer.getvalue())
def test_setup_component(self):
- conf = {"topology.message.timeout.secs": 3,
- "topology.tick.tuple.freq.secs": 1,
- "topology.debug": True,
- "topology.name": "foo"}
- context = {
- "task->component": {
- "1": "example-spout",
- "2": "__acker",
- "3": "example-bolt1",
- "4": "example-bolt2"
- },
- "taskid": 3,
- # Everything below this line is only available in Storm 0.11.0+
- "componentid": "example-bolt1",
- "stream->target->grouping": {
- "default": {
- "example-bolt2": {
- "type": "SHUFFLE"
- }
- }
- },
- "streams": ["default"],
- "stream->outputfields": {"default": ["word"]},
- "source->stream->grouping": {
- "example-spout": {
- "default": {
- "type": "FIELDS",
- "fields": ["word"]
- }
- }
- },
- "source->stream->fields": {
- "example-spout": {
- "default": ["word"]
- }
- }
- }
+ conf = self.conf
component = Component(input_stream=BytesIO(),
output_stream=BytesIO())
- component._setup_component(conf, context)
+ component._setup_component(conf, self.context)
+ self.assertEqual(component._source_tuple_types['example-spout']['default'].__name__,
+ 'Example_SpoutDefaultTuple')
self.assertEqual(component.topology_name, conf['topology.name'])
- self.assertEqual(component.task_id, context['taskid'])
+ self.assertEqual(component.task_id, self.context['taskid'])
self.assertEqual(component.component_name,
- context['task->component'][str(context['taskid'])])
+ self.context['task->component'][str(self.context['taskid'])])
self.assertEqual(component.storm_conf, conf)
- self.assertEqual(component.context, context)
+ self.assertEqual(component.context, self.context)
def test_read_message(self):
inputs = [# Task IDs
@@ -259,7 +225,7 @@ class ComponentTests(unittest.TestCase):
for msg in inputs[::2]:
output = json.loads(msg)
output['component'] = output['comp']
- output['values'] = output['tuple']
+ output['values'] = tuple(output['tuple'])
del output['comp']
del output['tuple']
outputs.append(Tuple(**output))
@@ -272,6 +238,38 @@ class ComponentTests(unittest.TestCase):
tup = component.read_tuple()
self.assertEqual(output, tup)
+ def test_read_tuple_named_fields(self):
+ # This is only valid for bolts, so we only need to test with task IDs
+ # and Tuples
+ inputs = [('{ "id": "-6955786537413359385", "comp": "example-spout", '
+ '"stream": "default", "task": 9, "tuple": ["snow white and '
+ 'the seven dwarfs", "field2", 3]}\n'), 'end\n']
+
+ component = Component(input_stream=BytesIO(''.join(inputs).encode('utf-8')),
+ output_stream=BytesIO())
+ component._setup_component(self.conf, self.context)
+
+ Example_SpoutDefaultTuple = namedtuple('Example_SpoutDefaultTuple',
+ field_names=['sentence', 'word',
+ 'number'])
+
+ outputs = []
+ for msg in inputs[::2]:
+ output = json.loads(msg)
+ output['component'] = output['comp']
+ output['values'] = Example_SpoutDefaultTuple(*output['tuple'])
+ del output['comp']
+ del output['tuple']
+ outputs.append(Tuple(**output))
+
+ for output in outputs:
+ log.info('Checking Tuple for %r', output)
+ tup = component.read_tuple()
+ self.assertEqual(output.values.sentence, tup.values.sentence)
+ self.assertEqual(output.values.word, tup.values.word)
+ self.assertEqual(output.values.number, tup.values.number)
+ self.assertEqual(output, tup)
+
def test_send_message(self):
component = Component(input_stream=BytesIO(), output_stream=BytesIO())
inputs = [{"command": "emit", "id": 4, "stream": "", "task": 9,
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
msgpack-python==0.5.6
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
-e git+https://github.com/pystorm/pystorm.git@111356b63c7a44261fb4d0c827745e793ca8717e#egg=pystorm
pytest==7.0.1
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: pystorm
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- msgpack-python==0.5.6
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/pystorm
| [
"test/pystorm/test_bolt.py::BoltTests::test_auto_ack_on",
"test/pystorm/test_bolt.py::BoltTests::test_run",
"test/pystorm/test_bolt.py::BatchingBoltTests::test_auto_ack_off",
"test/pystorm/test_bolt.py::BatchingBoltTests::test_auto_ack_on",
"test/pystorm/test_bolt.py::BatchingBoltTests::test_auto_fail_on",
"test/pystorm/test_bolt.py::BatchingBoltTests::test_batching",
"test/pystorm/test_bolt.py::BatchingBoltTests::test_group_key",
"test/pystorm/test_component.py::ComponentTests::test_read_tuple",
"test/pystorm/test_component.py::ComponentTests::test_read_tuple_named_fields",
"test/pystorm/test_component.py::ComponentTests::test_setup_component"
]
| []
| [
"test/pystorm/test_bolt.py::BoltTests::test_ack_id",
"test/pystorm/test_bolt.py::BoltTests::test_ack_tuple",
"test/pystorm/test_bolt.py::BoltTests::test_auto_ack_off",
"test/pystorm/test_bolt.py::BoltTests::test_auto_anchor_off",
"test/pystorm/test_bolt.py::BoltTests::test_auto_anchor_on",
"test/pystorm/test_bolt.py::BoltTests::test_auto_anchor_override",
"test/pystorm/test_bolt.py::BoltTests::test_auto_fail_off",
"test/pystorm/test_bolt.py::BoltTests::test_auto_fail_on",
"test/pystorm/test_bolt.py::BoltTests::test_emit_basic",
"test/pystorm/test_bolt.py::BoltTests::test_emit_direct",
"test/pystorm/test_bolt.py::BoltTests::test_emit_stream_anchors",
"test/pystorm/test_bolt.py::BoltTests::test_fail_id",
"test/pystorm/test_bolt.py::BoltTests::test_fail_tuple",
"test/pystorm/test_bolt.py::BoltTests::test_heartbeat_response",
"test/pystorm/test_bolt.py::BoltTests::test_process_tick",
"test/pystorm/test_bolt.py::BatchingBoltTests::test_auto_fail_off",
"test/pystorm/test_bolt.py::BatchingBoltTests::test_auto_fail_partial",
"test/pystorm/test_bolt.py::BatchingBoltTests::test_heartbeat_response",
"test/pystorm/test_bolt.py::BatchingBoltTests::test_process_tick",
"test/pystorm/test_component.py::ComponentTests::test_log",
"test/pystorm/test_component.py::ComponentTests::test_read_command",
"test/pystorm/test_component.py::ComponentTests::test_read_handshake",
"test/pystorm/test_component.py::ComponentTests::test_read_message",
"test/pystorm/test_component.py::ComponentTests::test_read_message_unicode",
"test/pystorm/test_component.py::ComponentTests::test_read_split_message",
"test/pystorm/test_component.py::ComponentTests::test_read_task_ids",
"test/pystorm/test_component.py::ComponentTests::test_send_message",
"test/pystorm/test_component.py::ComponentTests::test_send_message_unicode"
]
| []
| Apache License 2.0 | 276 | [
"pystorm/component.py"
]
| [
"pystorm/component.py"
]
|
|
scrapy__scrapy-1563 | dd9f777ba725d7a7dbb192302cc52a120005ad64 | 2015-10-29 06:21:42 | 6aa85aee2a274393307ac3e777180fcbdbdc9848 | diff --git a/scrapy/http/request/form.py b/scrapy/http/request/form.py
index a12a2fd07..4a9bd732e 100644
--- a/scrapy/http/request/form.py
+++ b/scrapy/http/request/form.py
@@ -11,6 +11,7 @@ from parsel.selector import create_root_node
import six
from scrapy.http.request import Request
from scrapy.utils.python import to_bytes, is_listlike
+from scrapy.utils.response import get_base_url
class FormRequest(Request):
@@ -44,7 +45,7 @@ class FormRequest(Request):
def _get_form_url(form, url):
if url is None:
- return form.action or form.base_url
+ return urljoin(form.base_url, form.action)
return urljoin(form.base_url, url)
@@ -58,7 +59,7 @@ def _urlencode(seq, enc):
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
text = response.body_as_unicode()
- root = create_root_node(text, lxml.html.HTMLParser, base_url=response.url)
+ root = create_root_node(text, lxml.html.HTMLParser, base_url=get_base_url(response))
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
| [Bug] Incorrectly picked URL in `scrapy.http.FormRequest.from_response` when there is a `<base>` tag
## Issue Description
Incorrectly picked URL in `scrapy.http.FormRequest.from_response` when there is a `<base>` tag.
## How to Reproduce the Issue & Version Used
```
[pengyu@GLaDOS tmp]$ python2
Python 2.7.10 (default, Sep 7 2015, 13:51:49)
[GCC 5.2.0] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import scrapy
>>> scrapy.__version__
u'1.0.3'
>>> html_body = '''
... <html>
... <head>
... <base href="http://b.com/">
... </head>
... <body>
... <form action="test_form">
... </form>
... </body>
... </html>
... '''
>>> response = scrapy.http.TextResponse(url='http://a.com/', body=html_body)
>>> request = scrapy.http.FormRequest.from_response(response)
>>> request.url
'http://a.com/test_form'
```
## Expected Result
`request.url` shall be `'http://b.com/test_form'`
## Suggested Fix
The issue can be fixed by fixing a few lines in `scrapy/http/request/form.py` | scrapy/scrapy | diff --git a/tests/test_http_request.py b/tests/test_http_request.py
index ff0941961..60fd855dd 100644
--- a/tests/test_http_request.py
+++ b/tests/test_http_request.py
@@ -801,6 +801,25 @@ class FormRequestTest(RequestTest):
self.assertEqual(fs[b'test2'], [b'val2'])
self.assertEqual(fs[b'button1'], [b''])
+ def test_html_base_form_action(self):
+ response = _buildresponse(
+ """
+ <html>
+ <head>
+ <base href="http://b.com/">
+ </head>
+ <body>
+ <form action="test_form">
+ </form>
+ </body>
+ </html>
+ """,
+ url='http://a.com/'
+ )
+ req = self.request_class.from_response(response)
+ self.assertEqual(req.url, 'http://b.com/test_form')
+
+
def _buildresponse(body, **kwargs):
kwargs.setdefault('body', body)
kwargs.setdefault('url', 'http://example.com')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libxml2-dev libxslt1-dev zlib1g-dev libffi-dev libssl-dev"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
Automat==24.8.1
cffi==1.17.1
constantly==23.10.4
coverage==7.8.0
cryptography==44.0.2
cssselect==1.3.0
exceptiongroup==1.2.2
execnet==2.1.1
hyperlink==21.0.0
idna==3.10
incremental==24.7.2
iniconfig==2.1.0
jmespath==1.0.1
lxml==5.3.1
packaging==24.2
parsel==1.10.0
pluggy==1.5.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycparser==2.22
PyDispatcher==2.0.7
pyOpenSSL==25.0.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
queuelib==1.7.0
-e git+https://github.com/scrapy/scrapy.git@dd9f777ba725d7a7dbb192302cc52a120005ad64#egg=Scrapy
service-identity==24.2.0
six==1.17.0
tomli==2.2.1
Twisted==24.11.0
typing_extensions==4.13.0
w3lib==2.3.1
zope.interface==7.2
| name: scrapy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- automat==24.8.1
- cffi==1.17.1
- constantly==23.10.4
- coverage==7.8.0
- cryptography==44.0.2
- cssselect==1.3.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- hyperlink==21.0.0
- idna==3.10
- incremental==24.7.2
- iniconfig==2.1.0
- jmespath==1.0.1
- lxml==5.3.1
- packaging==24.2
- parsel==1.10.0
- pluggy==1.5.0
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycparser==2.22
- pydispatcher==2.0.7
- pyopenssl==25.0.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- queuelib==1.7.0
- service-identity==24.2.0
- six==1.17.0
- tomli==2.2.1
- twisted==24.11.0
- typing-extensions==4.13.0
- w3lib==2.3.1
- zope-interface==7.2
prefix: /opt/conda/envs/scrapy
| [
"tests/test_http_request.py::FormRequestTest::test_html_base_form_action"
]
| [
"tests/test_http_request.py::FormRequestTest::test_from_response_button_notype",
"tests/test_http_request.py::FormRequestTest::test_from_response_button_novalue",
"tests/test_http_request.py::FormRequestTest::test_from_response_button_submit",
"tests/test_http_request.py::FormRequestTest::test_from_response_checkbox",
"tests/test_http_request.py::FormRequestTest::test_from_response_descendants",
"tests/test_http_request.py::FormRequestTest::test_from_response_dont_click",
"tests/test_http_request.py::FormRequestTest::test_from_response_dont_submit_image_as_input",
"tests/test_http_request.py::FormRequestTest::test_from_response_dont_submit_reset_as_input",
"tests/test_http_request.py::FormRequestTest::test_from_response_formid_exists",
"tests/test_http_request.py::FormRequestTest::test_from_response_formid_notexist",
"tests/test_http_request.py::FormRequestTest::test_from_response_formname_exists",
"tests/test_http_request.py::FormRequestTest::test_from_response_formname_notexist",
"tests/test_http_request.py::FormRequestTest::test_from_response_formname_notexists_fallback_formid",
"tests/test_http_request.py::FormRequestTest::test_from_response_get",
"tests/test_http_request.py::FormRequestTest::test_from_response_input_hidden",
"tests/test_http_request.py::FormRequestTest::test_from_response_input_text",
"tests/test_http_request.py::FormRequestTest::test_from_response_input_textarea",
"tests/test_http_request.py::FormRequestTest::test_from_response_invalid_html5",
"tests/test_http_request.py::FormRequestTest::test_from_response_multiple_clickdata",
"tests/test_http_request.py::FormRequestTest::test_from_response_multiple_forms_clickdata",
"tests/test_http_request.py::FormRequestTest::test_from_response_noformname",
"tests/test_http_request.py::FormRequestTest::test_from_response_nr_index_clickdata",
"tests/test_http_request.py::FormRequestTest::test_from_response_override_clickable",
"tests/test_http_request.py::FormRequestTest::test_from_response_override_params",
"tests/test_http_request.py::FormRequestTest::test_from_response_post",
"tests/test_http_request.py::FormRequestTest::test_from_response_radio",
"tests/test_http_request.py::FormRequestTest::test_from_response_select",
"tests/test_http_request.py::FormRequestTest::test_from_response_submit_first_clickable",
"tests/test_http_request.py::FormRequestTest::test_from_response_submit_not_first_clickable",
"tests/test_http_request.py::FormRequestTest::test_from_response_submit_novalue",
"tests/test_http_request.py::FormRequestTest::test_from_response_unicode_clickdata",
"tests/test_http_request.py::FormRequestTest::test_from_response_xpath"
]
| [
"tests/test_http_request.py::RequestTest::test_ajax_url",
"tests/test_http_request.py::RequestTest::test_body",
"tests/test_http_request.py::RequestTest::test_copy",
"tests/test_http_request.py::RequestTest::test_copy_inherited_classes",
"tests/test_http_request.py::RequestTest::test_eq",
"tests/test_http_request.py::RequestTest::test_headers",
"tests/test_http_request.py::RequestTest::test_immutable_attributes",
"tests/test_http_request.py::RequestTest::test_init",
"tests/test_http_request.py::RequestTest::test_method_always_str",
"tests/test_http_request.py::RequestTest::test_replace",
"tests/test_http_request.py::RequestTest::test_url",
"tests/test_http_request.py::RequestTest::test_url_no_scheme",
"tests/test_http_request.py::RequestTest::test_url_quoting",
"tests/test_http_request.py::FormRequestTest::test_ajax_url",
"tests/test_http_request.py::FormRequestTest::test_body",
"tests/test_http_request.py::FormRequestTest::test_copy",
"tests/test_http_request.py::FormRequestTest::test_copy_inherited_classes",
"tests/test_http_request.py::FormRequestTest::test_custom_encoding",
"tests/test_http_request.py::FormRequestTest::test_empty_formdata",
"tests/test_http_request.py::FormRequestTest::test_eq",
"tests/test_http_request.py::FormRequestTest::test_from_response_ambiguous_clickdata",
"tests/test_http_request.py::FormRequestTest::test_from_response_errors_formnumber",
"tests/test_http_request.py::FormRequestTest::test_from_response_errors_noform",
"tests/test_http_request.py::FormRequestTest::test_from_response_extra_headers",
"tests/test_http_request.py::FormRequestTest::test_from_response_formid_errors_formnumber",
"tests/test_http_request.py::FormRequestTest::test_from_response_formname_errors_formnumber",
"tests/test_http_request.py::FormRequestTest::test_from_response_invalid_nr_index_clickdata",
"tests/test_http_request.py::FormRequestTest::test_from_response_non_matching_clickdata",
"tests/test_http_request.py::FormRequestTest::test_from_response_override_method",
"tests/test_http_request.py::FormRequestTest::test_from_response_override_url",
"tests/test_http_request.py::FormRequestTest::test_headers",
"tests/test_http_request.py::FormRequestTest::test_immutable_attributes",
"tests/test_http_request.py::FormRequestTest::test_init",
"tests/test_http_request.py::FormRequestTest::test_method_always_str",
"tests/test_http_request.py::FormRequestTest::test_multi_key_values",
"tests/test_http_request.py::FormRequestTest::test_replace",
"tests/test_http_request.py::FormRequestTest::test_url",
"tests/test_http_request.py::FormRequestTest::test_url_no_scheme",
"tests/test_http_request.py::FormRequestTest::test_url_quoting",
"tests/test_http_request.py::XmlRpcRequestTest::test_ajax_url",
"tests/test_http_request.py::XmlRpcRequestTest::test_body",
"tests/test_http_request.py::XmlRpcRequestTest::test_copy",
"tests/test_http_request.py::XmlRpcRequestTest::test_copy_inherited_classes",
"tests/test_http_request.py::XmlRpcRequestTest::test_eq",
"tests/test_http_request.py::XmlRpcRequestTest::test_headers",
"tests/test_http_request.py::XmlRpcRequestTest::test_immutable_attributes",
"tests/test_http_request.py::XmlRpcRequestTest::test_init",
"tests/test_http_request.py::XmlRpcRequestTest::test_method_always_str",
"tests/test_http_request.py::XmlRpcRequestTest::test_replace",
"tests/test_http_request.py::XmlRpcRequestTest::test_url",
"tests/test_http_request.py::XmlRpcRequestTest::test_url_no_scheme",
"tests/test_http_request.py::XmlRpcRequestTest::test_url_quoting",
"tests/test_http_request.py::XmlRpcRequestTest::test_xmlrpc_dumps"
]
| []
| BSD 3-Clause "New" or "Revised" License | 279 | [
"scrapy/http/request/form.py"
]
| [
"scrapy/http/request/form.py"
]
|
|
joblib__joblib-262 | aeb018af545e025654176ffe9ae876bda1bddb89 | 2015-10-29 12:40:19 | 40341615cc2600675ce7457d9128fb030f6f89fa | lesteve: > Maybe I'm wrong, but why not adding here a condition "not obj.dtype.hasobject" and keep the previous behavior (using the except) ?
We could do that I am just slightly worried that this try except is not a very clean way of hiding failures. A few weird behaviours we recently saw:
* for Python 2.6 and @aabadie's PR with `with gzip.GZipFile as f`, you get a failure when you exit the context manager, meaning that you actually write a valid .z auxiliary file but then still pickle the array inside the master file because you encountered an error. This auxiliary file is never deleted.
* for Python 3, when trying to dump an array with dtype object, you encountered an error quite early on but you still write an invalid .z auxiliary file with just the header. This auxiliary file is never deleted.
* for Python 2.7, when trying to dump an array with dtype object, you don't even encounter an error and you end up writing str(array). You only see the problem at load time.
@aabadie as an aside, just a gentle reminder: always better to comment in the "Files changed" tab rather than in individual commits.
| diff --git a/joblib/numpy_pickle.py b/joblib/numpy_pickle.py
index 6d727d6..f2d43eb 100644
--- a/joblib/numpy_pickle.py
+++ b/joblib/numpy_pickle.py
@@ -215,7 +215,7 @@ class NumpyPickler(Pickler):
else:
self.file = BytesIO()
# Count the number of npy files that we have created:
- self._npy_counter = 0
+ self._npy_counter = 1
# By default we want a pickle protocol that only changes with
# the major python version and not the minor one
if protocol is None:
@@ -257,8 +257,8 @@ class NumpyPickler(Pickler):
files, rather than pickling them. Of course, this is a
total abuse of the Pickler class.
"""
- if self.np is not None and type(obj) in (self.np.ndarray,
- self.np.matrix, self.np.memmap):
+ if (self.np is not None and type(obj) in
+ (self.np.ndarray, self.np.matrix, self.np.memmap)):
size = obj.size * obj.itemsize
if self.compress and size < self.cache_size * _MEGA:
# When compressing, as we are not writing directly to the
@@ -267,19 +267,21 @@ class NumpyPickler(Pickler):
# Pickling doesn't work with memmaped arrays
obj = self.np.asarray(obj)
return Pickler.save(self, obj)
- self._npy_counter += 1
- try:
- filename = '%s_%02i.npy' % (self._filename,
- self._npy_counter)
- # This converts the array in a container
- obj, filename = self._write_array(obj, filename)
- self._filenames.append(filename)
- except:
- self._npy_counter -= 1
- # XXX: We should have a logging mechanism
- print('Failed to save %s to .npy file:\n%s' % (
+
+ if not obj.dtype.hasobject:
+ try:
+ filename = '%s_%02i.npy' % (self._filename,
+ self._npy_counter)
+ # This converts the array in a container
+ obj, filename = self._write_array(obj, filename)
+ self._filenames.append(filename)
+ self._npy_counter += 1
+ except Exception:
+ # XXX: We should have a logging mechanism
+ print('Failed to save %s to .npy file:\n%s' % (
type(obj),
traceback.format_exc()))
+
return Pickler.save(self, obj)
def close(self):
| joblib.dump bug with too large object-type arrays
issue from
https://github.com/scikit-learn/scikit-learn/issues/4889
The code
```python
import joblib
import os
import numpy as np
from gzip import GzipFile
from io import BytesIO
from urllib2 import urlopen
from os.path import join
from sklearn.datasets import get_data_home
URL10 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz')
data_home = get_data_home()
kddcup_dir = join(data_home, "test")
samples_path = join(kddcup_dir, "samples")
os.makedirs(kddcup_dir)
f = BytesIO(urlopen(URL10).read())
file = GzipFile(fileobj=f, mode='r')
X = []
for line in file.readlines():
X.append(line.replace('\n', '').split(','))
file.close()
X = np.asarray(X, dtype=object)
joblib.dump(X, samples_path, compress=9)
X = joblib.load(samples_path)
```
More precisely, it works if X has less than 300000 lines or if X is not of dtype `object`:
```python
Y = X[:300000,:] ### works
joblib.dump(Y, samples_path, compress=9)
Y = joblib.load(samples_path)
###
Y = X[:400000,:].astype(str) ### works
joblib.dump(Y, samples_path, compress=9)
Y = joblib.load(samples_path)
###
Y = X[:400000,:] ### doesnt work
joblib.dump(Y, samples_path, compress=9)
Y = joblib.load(samples_path)
```
The error raised by joblib.load is
```
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-32-25fe17420f08> in <module>()
----> 1 Y = joblib.load(samples_path)
/usr/lib/python2.7/dist-packages/joblib/numpy_pickle.pyc in load(filename, mmap_mode)
422
423 try:
--> 424 obj = unpickler.load()
425 finally:
426 if hasattr(unpickler, 'file_handle'):
/usr/lib/python2.7/pickle.pyc in load(self)
856 while 1:
857 key = read(1)
--> 858 dispatch[key](self)
859 except _Stop, stopinst:
860 return stopinst.value
/usr/lib/python2.7/dist-packages/joblib/numpy_pickle.pyc in load_build(self)
288 "but numpy didn't import correctly")
289 nd_array_wrapper = self.stack.pop()
--> 290 array = nd_array_wrapper.read(self)
291 self.stack.append(array)
292
/usr/lib/python2.7/dist-packages/joblib/numpy_pickle.pyc in read(self, unpickler)
158 array = unpickler.np.core.multiarray._reconstruct(*self.init_args)
159 with open(filename, 'rb') as f:
--> 160 data = read_zfile(f)
161 state = self.state + (data,)
162 array.__setstate__(state)
/usr/lib/python2.7/dist-packages/joblib/numpy_pickle.pyc in read_zfile(file_handle)
69 assert len(data) == length, (
70 "Incorrect data length while decompressing %s."
---> 71 "The file could be corrupted." % file_handle)
72 return data
73
AssertionError: Incorrect data length while decompressing <open file '/home/nicolas/scikit_learn_data/test/samples_01.npy.z', mode 'rb' at 0x7efcea714db0>.The file could be corrupted.
```
@lesteve what do you think? | joblib/joblib | diff --git a/joblib/test/test_numpy_pickle.py b/joblib/test/test_numpy_pickle.py
index ab80354..c6b8960 100644
--- a/joblib/test/test_numpy_pickle.py
+++ b/joblib/test/test_numpy_pickle.py
@@ -260,6 +260,7 @@ def test_z_file():
def test_compressed_pickle_dump_and_load():
expected_list = [np.arange(5, dtype=np.int64),
np.arange(5, dtype=np.float64),
+ np.array([1, 'abc', {'a': 1, 'b': 2}]),
# .tostring actually returns bytes and is a
# compatibility alias for .tobytes which was
# added in 1.9.0
@@ -269,17 +270,23 @@ def test_compressed_pickle_dump_and_load():
with tempfile.NamedTemporaryFile(suffix='.gz', dir=env['dir']) as f:
fname = f.name
- try:
- numpy_pickle.dump(expected_list, fname, compress=1)
- result_list = numpy_pickle.load(fname)
- for result, expected in zip(result_list, expected_list):
- if isinstance(expected, np.ndarray):
- nose.tools.assert_equal(result.dtype, expected.dtype)
- np.testing.assert_equal(result, expected)
- else:
- nose.tools.assert_equal(result, expected)
- finally:
- os.remove(fname)
+ # Need to test both code branches (whether array size is greater
+ # or smaller than cache_size)
+ for cache_size in [0, 1e9]:
+ try:
+ dumped_filenames = numpy_pickle.dump(
+ expected_list, fname, compress=1,
+ cache_size=cache_size)
+ result_list = numpy_pickle.load(fname)
+ for result, expected in zip(result_list, expected_list):
+ if isinstance(expected, np.ndarray):
+ nose.tools.assert_equal(result.dtype, expected.dtype)
+ np.testing.assert_equal(result, expected)
+ else:
+ nose.tools.assert_equal(result, expected)
+ finally:
+ for fn in dumped_filenames:
+ os.remove(fn)
def _check_pickle(filename, expected_list):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"nose",
"coverage",
"numpy>=1.6.1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/joblib/joblib.git@aeb018af545e025654176ffe9ae876bda1bddb89#egg=joblib
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: joblib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/joblib
| [
"joblib/test/test_numpy_pickle.py::test_memmap_persistence_mixed_dtypes"
]
| [
"joblib/test/test_numpy_pickle.py::test_joblib_pickle_across_python_versions"
]
| [
"joblib/test/test_numpy_pickle.py::test_value_error",
"joblib/test/test_numpy_pickle.py::test_numpy_persistence",
"joblib/test/test_numpy_pickle.py::test_memmap_persistence",
"joblib/test/test_numpy_pickle.py::test_masked_array_persistence",
"joblib/test/test_numpy_pickle.py::test_z_file",
"joblib/test/test_numpy_pickle.py::test_compressed_pickle_dump_and_load",
"joblib/test/test_numpy_pickle.py::test_numpy_subclass"
]
| []
| BSD 3-Clause "New" or "Revised" License | 280 | [
"joblib/numpy_pickle.py"
]
| [
"joblib/numpy_pickle.py"
]
|
docker__docker-py-832 | 47ab89ec2bd3bddf1221b856ffbaff333edeabb4 | 2015-10-29 15:17:54 | 1ca2bc58f0cf2e2cdda2734395bd3e7ad9b178bf | diff --git a/docker/auth/auth.py b/docker/auth/auth.py
index 2ed894ee..416dd7c4 100644
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -96,7 +96,7 @@ def decode_auth(auth):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
- return login.decode('ascii'), pwd.decode('ascii')
+ return login.decode('utf8'), pwd.decode('utf8')
def encode_header(auth):
| decode_auth function does not handle utf-8 logins or password
HI
I have found that the function **decode_auth** (line 96, [file](https://github.com/docker/docker-py/blob/master/docker/auth/auth.py)) fails when decoding UTF-8 passwords from the .dockercfg file, and **load_config** returning an empty config.
I have checked and docker hub can handle UTF-8 passwords, this code proves that:
```python
# coding=utf-8
from docker import Client
cred = { 'username': <user>, 'password': <utf-8 password> }
c = Client(base_url='unix://var/run/docker.sock')
res = c.pull(repository='<private container>', tag='latest', auth_config=cred)
print(res)
```
Thank you | docker/docker-py | diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py
index 9f4d439b..67830381 100644
--- a/tests/unit/auth_test.py
+++ b/tests/unit/auth_test.py
@@ -316,3 +316,33 @@ class LoadConfigTest(base.Cleanup, base.BaseTestCase):
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], '[email protected]')
self.assertEqual(cfg.get('auth'), None)
+
+ def test_load_config_custom_config_env_utf8(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder, 'config.json')
+ registry = 'https://your.private.registry.io'
+ auth_ = base64.b64encode(
+ b'sakuya\xc3\xa6:izayoi\xc3\xa6').decode('ascii')
+ config = {
+ 'auths': {
+ registry: {
+ 'auth': '{0}'.format(auth_),
+ 'email': '[email protected]'
+ }
+ }
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
+ cfg = auth.load_config(None)
+ assert registry in cfg
+ self.assertNotEqual(cfg[registry], None)
+ cfg = cfg[registry]
+ self.assertEqual(cfg['username'], b'sakuya\xc3\xa6'.decode('utf8'))
+ self.assertEqual(cfg['password'], b'izayoi\xc3\xa6'.decode('utf8'))
+ self.assertEqual(cfg['email'], '[email protected]')
+ self.assertEqual(cfg.get('auth'), None)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi @ file:///croot/certifi_1671487769961/work/certifi
coverage==7.2.7
-e git+https://github.com/docker/docker-py.git@47ab89ec2bd3bddf1221b856ffbaff333edeabb4#egg=docker_py
exceptiongroup==1.2.2
execnet==2.0.2
importlib-metadata==6.7.0
iniconfig==2.0.0
packaging==24.0
pluggy==1.2.0
pytest==7.4.4
pytest-cov==4.1.0
pytest-mock==3.11.1
pytest-xdist==3.5.0
requests==2.5.3
six==1.17.0
tomli==2.0.1
typing_extensions==4.7.1
websocket-client==0.32.0
zipp==3.15.0
| name: docker-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.2.7
- exceptiongroup==1.2.2
- execnet==2.0.2
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- packaging==24.0
- pluggy==1.2.0
- pytest==7.4.4
- pytest-cov==4.1.0
- pytest-mock==3.11.1
- pytest-xdist==3.5.0
- requests==2.5.3
- six==1.17.0
- tomli==2.0.1
- typing-extensions==4.7.1
- websocket-client==0.32.0
- zipp==3.15.0
prefix: /opt/conda/envs/docker-py
| [
"tests/unit/auth_test.py::LoadConfigTest::test_load_config_custom_config_env_utf8"
]
| []
| [
"tests/unit/auth_test.py::RegressionTest::test_803_urlsafe_encode",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_default_explicit_none",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_default_registry",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_fully_explicit",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_hostname_only",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_legacy_config",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_match",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_path",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_path_trailing_slash",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_path_wrong_insecure_proto",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_path_wrong_secure_proto",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_protocol",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_path_wrong_proto",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_registry_and_auth_hub_image",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_registry_and_auth_library_image",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_registry_and_auth_private_registry",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_registry_and_auth_unauthenticated_registry",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_repository_name_hub_image",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_repository_name_hub_library_image",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_repository_name_localhost",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_repository_name_localhost_with_username",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_repository_name_no_dots_but_port",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_repository_name_no_dots_but_port_and_username",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_repository_name_private_registry",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_repository_name_private_registry_with_port",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_repository_name_private_registry_with_username",
"tests/unit/auth_test.py::LoadConfigTest::test_load_config",
"tests/unit/auth_test.py::LoadConfigTest::test_load_config_custom_config_env",
"tests/unit/auth_test.py::LoadConfigTest::test_load_config_custom_config_env_with_auths",
"tests/unit/auth_test.py::LoadConfigTest::test_load_config_no_file",
"tests/unit/auth_test.py::LoadConfigTest::test_load_config_with_random_name"
]
| []
| Apache License 2.0 | 281 | [
"docker/auth/auth.py"
]
| [
"docker/auth/auth.py"
]
|
|
jonathanj__eliottree-40 | 4dae7890294edb4d845b00a8bb310bc08c555352 | 2015-10-30 07:46:22 | 26748c5e640b6d25d71eefad95920c41dab0f8db | diff --git a/eliottree/_cli.py b/eliottree/_cli.py
index 6b0d24e..c333d8c 100644
--- a/eliottree/_cli.py
+++ b/eliottree/_cli.py
@@ -2,36 +2,20 @@ import argparse
import codecs
import json
import sys
-from datetime import datetime
from itertools import chain
from six import PY3
from six.moves import map
-from toolz import compose
from eliottree import (
Tree, filter_by_jmespath, filter_by_uuid, render_task_nodes)
-def _convert_timestamp(task):
- """
- Convert a ``timestamp`` key to a ``datetime``.
- """
- task['timestamp'] = datetime.utcfromtimestamp(task['timestamp'])
- return task
-
-
-def build_task_nodes(files=None, select=None, task_uuid=None,
- human_readable=True):
+def build_task_nodes(files=None, select=None, task_uuid=None):
"""
Build the task nodes given some input data, query criteria and formatting
options.
"""
- def task_transformers():
- if human_readable:
- yield _convert_timestamp
- yield json.loads
-
def filter_funcs():
if select is not None:
for query in select:
@@ -47,8 +31,7 @@ def build_task_nodes(files=None, select=None, task_uuid=None,
files = [codecs.getreader('utf-8')(sys.stdin)]
tree = Tree()
- tasks = map(compose(*task_transformers()),
- chain.from_iterable(files))
+ tasks = map(json.loads, chain.from_iterable(files))
return tree.nodes(tree.merge_tasks(tasks, filter_funcs()))
@@ -65,13 +48,13 @@ def display_task_tree(args):
nodes = build_task_nodes(
files=args.files,
select=args.select,
- task_uuid=args.task_uuid,
- human_readable=args.human_readable)
+ task_uuid=args.task_uuid)
render_task_nodes(
write=write,
nodes=nodes,
ignored_task_keys=set(args.ignored_task_keys) or None,
- field_limit=args.field_limit)
+ field_limit=args.field_limit,
+ human_readable=args.human_readable)
def main():
diff --git a/eliottree/render.py b/eliottree/render.py
index 4876a3e..626683c 100644
--- a/eliottree/render.py
+++ b/eliottree/render.py
@@ -8,20 +8,42 @@ DEFAULT_IGNORED_KEYS = set([
u'message_type'])
-def _format_value(value):
+def _format_value_raw(value):
"""
- Format a value for a task tree.
+ Format a value.
"""
if isinstance(value, datetime):
if PY3:
return value.isoformat(' ')
else:
return value.isoformat(' ').decode('ascii')
- elif isinstance(value, text_type):
+ return None
+
+
+def _format_value_hint(value, hint):
+ """
+ Format a value given a rendering hint.
+ """
+ if hint == u'timestamp':
+ return _format_value_raw(datetime.utcfromtimestamp(value))
+ return None
+
+
+def _format_value(value, field_hint=None, human_readable=False):
+ """
+ Format a value for a task tree.
+ """
+ if isinstance(value, text_type):
return value
elif isinstance(value, binary_type):
# We guess bytes values are UTF-8.
return value.decode('utf-8', 'replace')
+ if human_readable:
+ formatted = _format_value_raw(value)
+ if formatted is None:
+ formatted = _format_value_hint(value, field_hint)
+ if formatted is not None:
+ return formatted
result = repr(value)
if isinstance(result, binary_type):
result = result.decode('utf-8', 'replace')
@@ -48,7 +70,7 @@ def _truncate_value(value, limit):
return value
-def _render_task(write, task, ignored_task_keys, field_limit):
+def _render_task(write, task, ignored_task_keys, field_limit, human_readable):
"""
Render a single ``_TaskNode`` as an ``ASCII`` tree.
@@ -64,6 +86,9 @@ def _render_task(write, task, ignored_task_keys, field_limit):
:type ignored_task_keys: ``set`` of ``text_type``
:param ignored_task_keys: Set of task key names to ignore.
+
+ :type human_readable: ``bool``
+ :param human_readable: Should this be rendered as human-readable?
"""
_write = _indented_write(write)
num_items = len(task)
@@ -78,9 +103,12 @@ def _render_task(write, task, ignored_task_keys, field_limit):
_render_task(write=_write,
task=value,
ignored_task_keys={},
- field_limit=field_limit)
+ field_limit=field_limit,
+ human_readable=human_readable)
else:
- _value = _format_value(value)
+ _value = _format_value(value,
+ field_hint=key,
+ human_readable=human_readable)
if field_limit:
first_line = _truncate_value(_value, field_limit)
else:
@@ -96,7 +124,8 @@ def _render_task(write, task, ignored_task_keys, field_limit):
_write(line + '\n')
-def _render_task_node(write, node, field_limit, ignored_task_keys):
+def _render_task_node(write, node, field_limit, ignored_task_keys,
+ human_readable):
"""
Render a single ``_TaskNode`` as an ``ASCII`` tree.
@@ -112,6 +141,9 @@ def _render_task_node(write, node, field_limit, ignored_task_keys):
:type ignored_task_keys: ``set`` of ``text_type``
:param ignored_task_keys: Set of task key names to ignore.
+
+ :type human_readable: ``bool``
+ :param human_readable: Should this be rendered as human-readable?
"""
_child_write = _indented_write(write)
write(
@@ -120,17 +152,20 @@ def _render_task_node(write, node, field_limit, ignored_task_keys):
write=_child_write,
task=node.task,
field_limit=field_limit,
- ignored_task_keys=ignored_task_keys)
+ ignored_task_keys=ignored_task_keys,
+ human_readable=human_readable)
for child in node.children():
_render_task_node(
write=_child_write,
node=child,
field_limit=field_limit,
- ignored_task_keys=ignored_task_keys)
+ ignored_task_keys=ignored_task_keys,
+ human_readable=human_readable)
-def render_task_nodes(write, nodes, field_limit, ignored_task_keys=None):
+def render_task_nodes(write, nodes, field_limit, ignored_task_keys=None,
+ human_readable=False):
"""
Render a tree of task nodes as an ``ASCII`` tree.
@@ -147,6 +182,9 @@ def render_task_nodes(write, nodes, field_limit, ignored_task_keys=None):
:type ignored_task_keys: ``set`` of ``text_type``
:param ignored_task_keys: Set of task key names to ignore.
+
+ :type human_readable: ``bool``
+ :param human_readable: Should this be rendered as human-readable?
"""
if ignored_task_keys is None:
ignored_task_keys = DEFAULT_IGNORED_KEYS
@@ -156,7 +194,8 @@ def render_task_nodes(write, nodes, field_limit, ignored_task_keys=None):
write=write,
node=node,
field_limit=field_limit,
- ignored_task_keys=ignored_task_keys)
+ ignored_task_keys=ignored_task_keys,
+ human_readable=human_readable)
write('\n')
diff --git a/setup.py b/setup.py
index e54a48d..9767156 100644
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,6 @@ setup(
install_requires=[
"six>=1.9.0",
"jmespath>=0.7.1",
- "toolz>=0.7.2",
],
extras_require={
"dev": ["pytest>=2.7.1", "testtools>=1.8.0"],
| Human-readable values should only be formatted when rendered instead of modifying the tree data
The problem with modifying the tree data is that it makes it very difficult to write queries against it if eliot-tree is changing it in undisclosed ways to suit it's renderer. | jonathanj/eliottree | diff --git a/eliottree/test/test_render.py b/eliottree/test/test_render.py
index 81d3128..81dea43 100644
--- a/eliottree/test/test_render.py
+++ b/eliottree/test/test_render.py
@@ -15,14 +15,14 @@ class FormatValueTests(TestCase):
"""
Tests for ``eliottree.render._format_value``.
"""
- def test_datetime(self):
+ def test_datetime_human_readable(self):
"""
Format ``datetime`` values as ISO8601.
"""
now = datetime(2015, 6, 6, 22, 57, 12)
self.assertThat(
- _format_value(now),
- Equals('2015-06-06 22:57:12'))
+ _format_value(now, human_readable=True),
+ Equals(u'2015-06-06 22:57:12'))
def test_unicode(self):
"""
@@ -59,6 +59,16 @@ class FormatValueTests(TestCase):
_format_value({'a': u('\N{SNOWMAN}')}),
Equals("{'a': u'\\u2603'}"))
+ def test_timestamp_hint(self):
+ """
+ Format "timestamp" hinted data as timestamps.
+ """
+ # datetime(2015, 6, 6, 22, 57, 12)
+ now = 1433631432
+ self.assertThat(
+ _format_value(now, field_hint='timestamp', human_readable=True),
+ Equals(u'2015-06-06 22:57:12'))
+
class RenderTaskNodesTests(TestCase):
"""
@@ -85,6 +95,28 @@ class RenderTaskNodesTests(TestCase):
' +-- app:action@2/succeeded\n'
' `-- timestamp: 1425356800\n\n'))
+ def test_tasks_human_readable(self):
+ """
+ Render two tasks of sequential levels, by default most standard Eliot
+ task keys are ignored, values are formatted to be human readable.
+ """
+ fd = StringIO()
+ tree = Tree()
+ tree.merge_tasks([action_task, action_task_end])
+ render_task_nodes(
+ write=fd.write,
+ nodes=tree.nodes(),
+ field_limit=0,
+ human_readable=True)
+ self.assertThat(
+ fd.getvalue(),
+ Equals(
+ 'f3a32bb3-ea6b-457c-aa99-08a3d0491ab4\n'
+ '+-- app:action@1/started\n'
+ ' `-- timestamp: 2015-03-03 04:26:40\n'
+ ' +-- app:action@2/succeeded\n'
+ ' `-- timestamp: 2015-03-03 04:26:40\n\n'))
+
def test_multiline_field(self):
"""
When no field limit is specified for task values, multiple lines are
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 3
} | 15.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/jonathanj/eliottree.git@4dae7890294edb4d845b00a8bb310bc08c555352#egg=eliot_tree
exceptiongroup==1.2.2
iniconfig==2.1.0
jmespath==1.0.1
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.17.0
testtools==2.7.2
tomli==2.2.1
toolz==1.0.0
| name: eliottree
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- jmespath==1.0.1
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- testtools==2.7.2
- tomli==2.2.1
- toolz==1.0.0
prefix: /opt/conda/envs/eliottree
| [
"eliottree/test/test_render.py::FormatValueTests::test_datetime_human_readable",
"eliottree/test/test_render.py::FormatValueTests::test_timestamp_hint",
"eliottree/test/test_render.py::RenderTaskNodesTests::test_tasks_human_readable"
]
| []
| [
"eliottree/test/test_render.py::FormatValueTests::test_other",
"eliottree/test/test_render.py::FormatValueTests::test_str",
"eliottree/test/test_render.py::FormatValueTests::test_unicode",
"eliottree/test/test_render.py::RenderTaskNodesTests::test_dict_data",
"eliottree/test/test_render.py::RenderTaskNodesTests::test_field_limit",
"eliottree/test/test_render.py::RenderTaskNodesTests::test_ignored_keys",
"eliottree/test/test_render.py::RenderTaskNodesTests::test_multiline_field",
"eliottree/test/test_render.py::RenderTaskNodesTests::test_multiline_field_limit",
"eliottree/test/test_render.py::RenderTaskNodesTests::test_nested",
"eliottree/test/test_render.py::RenderTaskNodesTests::test_task_data",
"eliottree/test/test_render.py::RenderTaskNodesTests::test_tasks"
]
| []
| MIT License | 282 | [
"setup.py",
"eliottree/render.py",
"eliottree/_cli.py"
]
| [
"setup.py",
"eliottree/render.py",
"eliottree/_cli.py"
]
|
|
sprymix__csscompressor-4 | 153ab1bb6cd925dc73a314af74db874a3314010f | 2015-11-01 06:16:44 | bec3e582cb5ab7182a0ca08ba381e491b94ed10c | diff --git a/csscompressor/__init__.py b/csscompressor/__init__.py
index 1b41119..1233cd3 100644
--- a/csscompressor/__init__.py
+++ b/csscompressor/__init__.py
@@ -56,9 +56,12 @@ _space_after_re = re.compile(r'([!{}:;>+\(\[,])\s+')
_semi_re = re.compile(r';+}')
_zero_fmt_spec_re = re.compile(r'''(\s|:|\(|,)(?:0?\.)?0
- (?:px|em|%|in|cm|mm|pc|pt|ex|deg|g?rad|m?s|k?hz)''',
+ (?:px|em|%|in|cm|mm|pc|pt|ex|deg|g?rad|k?hz)''',
re.I | re.X)
+_zero_req_unit_re = re.compile(r'''(\s|:|\(|,)(?:0?\.)?0
+ (m?s)''', re.I | re.X)
+
_bg_pos_re = re.compile(r'''(background-position|webkit-mask-position|transform-origin|
webkit-transform-origin|moz-transform-origin|o-transform-origin|
ms-transform-origin):0(;|})''', re.I | re.X)
@@ -377,6 +380,9 @@ def _compress(css, max_linelen=0):
# Replace 0(px,em,%) with 0.
css = _zero_fmt_spec_re.sub(lambda match: match.group(1) + '0', css)
+ # Replace 0.0(m,ms) or .0(m,ms) with 0(m,ms)
+ css = _zero_req_unit_re.sub(lambda match: match.group(1) + '0' + match.group(2), css)
+
# Replace 0 0 0 0; with 0.
css = _quad_0_re.sub(r':0\1', css)
css = _trip_0_re.sub(r':0\1', css)
| Omitting the unit on a time value is invalid
Input: `csscompressor.compress("transition: background-color 1s linear 0ms;")`
Expected output: `'transition:background-color 1s linear 0ms;'`
Actual output: `'transition:background-color 1s linear 0;'`
According to the [MDN page on \<time>](https://developer.mozilla.org/en-US/docs/Web/CSS/time), omitting the unit is only valid for \<length>, so when I use csscompressor, the declaration containing the 0ms or 0s is ignored by both Firefox and Chrome.
The same case is for `<frequency>`, and probably a few of the other ones, I think. Omitting the unit used to be valid in CSS2, but breaks in CSS3 unfortunately.
This is a fairly simple fix (removing the `m?s` from `_zero_fmt_spec_re`), and I'll create a PR and some tests. | sprymix/csscompressor | diff --git a/csscompressor/tests/test_yui.py b/csscompressor/tests/test_yui.py
index a0d6715..5d56d24 100644
--- a/csscompressor/tests/test_yui.py
+++ b/csscompressor/tests/test_yui.py
@@ -1458,7 +1458,7 @@ serve! */"""
"""
- output = """a{margin:0;_padding-top:0;background-position:0 0;padding:0;transition:opacity 0;transition-delay:0;transform:rotate3d(0,0,0);pitch:0;pitch:0}"""
+ output = """a{margin:0;_padding-top:0;background-position:0 0;padding:0;transition:opacity 0s;transition-delay:0ms;transform:rotate3d(0,0,0);pitch:0;pitch:0}"""
self._test(input, output)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/sprymix/csscompressor.git@153ab1bb6cd925dc73a314af74db874a3314010f#egg=csscompressor
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: csscompressor
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/csscompressor
| [
"csscompressor/tests/test_yui.py::TestYUI::test_yui_zeros"
]
| []
| [
"csscompressor/tests/test_yui.py::TestYUI::test_yui_background_position",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_border_none",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_box_model_hack",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_bug2527974",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_bug2527991",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_bug2527998",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_bug2528034",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_charset_media",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_color_keyword",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_color_simple",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_color",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_comment",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_concat_charset",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_base64_doublequotes",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_base64_eof",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_base64_linebreakindata",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_base64_noquotes",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_base64_singlequotes",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_base64_twourls",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_dbquote_font",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_nonbase64_doublequotes",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_nonbase64_noquotes",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_noquote_multiline_font",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_realdata_doublequotes",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_realdata_noquotes",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_realdata_singlequotes",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_realdata_yuiapp",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_singlequote_font",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_decimals",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dollar_header",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_font_face",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_ie5mac",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_lowercasing",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_media_empty_class",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_media_multi",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_media_test",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_old_ie_filter_matrix",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_opacity_filter",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_opera_pixel_ratio",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_preserve_case",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_preserve_important",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_preserve_new_line",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_preserve_strings",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_pseudo_first",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_pseudo",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_special_comments",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_star_underscore_hacks",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_string_in_comment",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_webkit_transform",
"csscompressor/tests/test_yui.py::TestYUI::test_yui_dataurl_nonbase64_singlequotes"
]
| []
| BSD | 284 | [
"csscompressor/__init__.py"
]
| [
"csscompressor/__init__.py"
]
|
|
tornadoweb__tornado-1576 | 1ecc7386da17df3f1dfd100845355f7211119a62 | 2015-11-02 23:03:13 | c20c44d776d3bd9b2c002db5aaa9e3b5284a3043 | diff --git a/tornado/options.py b/tornado/options.py
index ba16b1a7..bdb5baa0 100644
--- a/tornado/options.py
+++ b/tornado/options.py
@@ -132,8 +132,10 @@ class OptionParser(object):
return name in self._options
def __getitem__(self, name):
- name = self._normalize_name(name)
- return self._options[name].value()
+ return self.__getattr__(name)
+
+ def __setitem__(self, name, value):
+ return self.__setattr__(name, value)
def items(self):
"""A sequence of (name, value) pairs.
| Options should support setitem
From http://stackoverflow.com/questions/33411269/attributeerror-in-python-tornado-to-configure-log-into-a-file/33417289
Options currently override getattr, setattr, and getitem. Item and attribute syntax should be symmetric so we need a setitem override to match setattr. | tornadoweb/tornado | diff --git a/tornado/test/options_test.py b/tornado/test/options_test.py
index c32184bb..2f2384b2 100644
--- a/tornado/test/options_test.py
+++ b/tornado/test/options_test.py
@@ -131,6 +131,12 @@ class OptionsTest(unittest.TestCase):
options = self._sample_options()
self.assertEqual(1, options['a'])
+ def test_setitem(self):
+ options = OptionParser()
+ options.define('foo', default=1, type=int)
+ options['foo'] = 2
+ self.assertEqual(options['foo'], 2)
+
def test_items(self):
options = self._sample_options()
# OptionParsers always define 'help'.
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 4.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"futures",
"mock",
"monotonic",
"trollius",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
futures==2.2.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mock==5.2.0
monotonic==1.6
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tornadoweb/tornado.git@1ecc7386da17df3f1dfd100845355f7211119a62#egg=tornado
trollius==2.1.post2
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tornado
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- futures==2.2.0
- mock==5.2.0
- monotonic==1.6
- six==1.17.0
- trollius==2.1.post2
prefix: /opt/conda/envs/tornado
| [
"tornado/test/options_test.py::OptionsTest::test_setitem"
]
| []
| [
"tornado/test/options_test.py::OptionsTest::test_as_dict",
"tornado/test/options_test.py::OptionsTest::test_dash_underscore_cli",
"tornado/test/options_test.py::OptionsTest::test_dash_underscore_file",
"tornado/test/options_test.py::OptionsTest::test_dash_underscore_introspection",
"tornado/test/options_test.py::OptionsTest::test_error_redefine",
"tornado/test/options_test.py::OptionsTest::test_getitem",
"tornado/test/options_test.py::OptionsTest::test_group_dict",
"tornado/test/options_test.py::OptionsTest::test_help",
"tornado/test/options_test.py::OptionsTest::test_items",
"tornado/test/options_test.py::OptionsTest::test_iter",
"tornado/test/options_test.py::OptionsTest::test_mock_patch",
"tornado/test/options_test.py::OptionsTest::test_multiple_int",
"tornado/test/options_test.py::OptionsTest::test_multiple_string",
"tornado/test/options_test.py::OptionsTest::test_parse_callbacks",
"tornado/test/options_test.py::OptionsTest::test_parse_command_line",
"tornado/test/options_test.py::OptionsTest::test_parse_config_file",
"tornado/test/options_test.py::OptionsTest::test_setattr",
"tornado/test/options_test.py::OptionsTest::test_setattr_type_check",
"tornado/test/options_test.py::OptionsTest::test_setattr_with_callback",
"tornado/test/options_test.py::OptionsTest::test_subcommand",
"tornado/test/options_test.py::OptionsTest::test_types"
]
| []
| Apache License 2.0 | 286 | [
"tornado/options.py"
]
| [
"tornado/options.py"
]
|
|
sigmavirus24__github3.py-460 | 47487d093c0cbe24b4219a7137bedaab6b882548 | 2015-11-04 01:02:18 | 05ed0c6a02cffc6ddd0e82ce840c464e1c5fd8c4 | diff --git a/github3/github.py b/github3/github.py
index e575408b..4f5d0734 100644
--- a/github3/github.py
+++ b/github3/github.py
@@ -355,7 +355,10 @@ class GitHub(GitHubCore):
}
"""
url = self._build_url('emojis')
- return self._json(self._get(url), 200)
+ data = self._json(self._get(url), 200)
+ del data['ETag']
+ del data['Last-Modified']
+ return data
@requires_basic_auth
def feeds(self):
| GitHub.emojis() return value contains ETag and Last-Modified entries
emojis() returns the return value of _json to the user, which then contains
confusing ETag and Last-Modified enttries, caused by commit 8c42a709. Not sure
how you want to fix it, but easiest would be to delete those entries in
emojis() before returning the result. | sigmavirus24/github3.py | diff --git a/tests/integration/test_github.py b/tests/integration/test_github.py
index d27eb229..fe2726be 100644
--- a/tests/integration/test_github.py
+++ b/tests/integration/test_github.py
@@ -95,6 +95,15 @@ class TestGitHub(IntegrationHelper):
# Asserts that it's a string and looks ilke the URLs we expect to see
assert emojis['+1'].startswith('https://github')
+ def test_emojis_etag(self):
+ """Test the ability to retrieve from /emojis."""
+ cassette_name = self.cassette_name('emojis')
+ with self.recorder.use_cassette(cassette_name):
+ emojis = self.gh.emojis()
+
+ assert 'ETag' not in emojis
+ assert 'Last-Modified' not in emojis
+
def test_feeds(self):
"""Test the ability to retrieve a user's timelime URLs."""
self.basic_login()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | betamax==0.9.0
betamax-matchers==0.4.0
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
-e git+https://github.com/sigmavirus24/github3.py.git@47487d093c0cbe24b4219a7137bedaab6b882548#egg=github3.py
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
requests==2.32.3
requests-toolbelt==1.0.0
tomli==2.2.1
typing_extensions==4.13.0
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==2.3.0
| name: github3.py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- betamax==0.9.0
- betamax-matchers==0.4.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- tomli==2.2.1
- typing-extensions==4.13.0
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==2.3.0
prefix: /opt/conda/envs/github3.py
| [
"tests/integration/test_github.py::TestGitHub::test_emojis_etag"
]
| [
"tests/integration/test_github.py::TestGitHub::test_update_me"
]
| [
"tests/integration/test_github.py::TestGitHub::test_all_events",
"tests/integration/test_github.py::TestGitHub::test_all_organizations",
"tests/integration/test_github.py::TestGitHub::test_all_repositories",
"tests/integration/test_github.py::TestGitHub::test_all_users",
"tests/integration/test_github.py::TestGitHub::test_authorize",
"tests/integration/test_github.py::TestGitHub::test_create_gist",
"tests/integration/test_github.py::TestGitHub::test_create_issue",
"tests/integration/test_github.py::TestGitHub::test_create_key",
"tests/integration/test_github.py::TestGitHub::test_create_repository",
"tests/integration/test_github.py::TestGitHub::test_emojis",
"tests/integration/test_github.py::TestGitHub::test_feeds",
"tests/integration/test_github.py::TestGitHub::test_followers",
"tests/integration/test_github.py::TestGitHub::test_followers_of",
"tests/integration/test_github.py::TestGitHub::test_gist",
"tests/integration/test_github.py::TestGitHub::test_gitignore_template",
"tests/integration/test_github.py::TestGitHub::test_gitignore_templates",
"tests/integration/test_github.py::TestGitHub::test_is_following",
"tests/integration/test_github.py::TestGitHub::test_is_starred",
"tests/integration/test_github.py::TestGitHub::test_issue",
"tests/integration/test_github.py::TestGitHub::test_me",
"tests/integration/test_github.py::TestGitHub::test_meta",
"tests/integration/test_github.py::TestGitHub::test_non_existent_gitignore_template",
"tests/integration/test_github.py::TestGitHub::test_notifications",
"tests/integration/test_github.py::TestGitHub::test_notifications_all",
"tests/integration/test_github.py::TestGitHub::test_octocat",
"tests/integration/test_github.py::TestGitHub::test_organization",
"tests/integration/test_github.py::TestGitHub::test_pull_request",
"tests/integration/test_github.py::TestGitHub::test_rate_limit",
"tests/integration/test_github.py::TestGitHub::test_repositories",
"tests/integration/test_github.py::TestGitHub::test_repositories_by",
"tests/integration/test_github.py::TestGitHub::test_repository",
"tests/integration/test_github.py::TestGitHub::test_repository_with_id",
"tests/integration/test_github.py::TestGitHub::test_search_code",
"tests/integration/test_github.py::TestGitHub::test_search_code_with_text_match",
"tests/integration/test_github.py::TestGitHub::test_search_issues",
"tests/integration/test_github.py::TestGitHub::test_search_repositories",
"tests/integration/test_github.py::TestGitHub::test_search_repositories_with_text_match",
"tests/integration/test_github.py::TestGitHub::test_search_users",
"tests/integration/test_github.py::TestGitHub::test_search_users_with_text_match",
"tests/integration/test_github.py::TestGitHub::test_user",
"tests/integration/test_github.py::TestGitHub::test_user_teams",
"tests/integration/test_github.py::TestGitHub::test_user_with_id",
"tests/integration/test_github.py::TestGitHub::test_zen"
]
| []
| BSD 3-Clause "New" or "Revised" License | 287 | [
"github3/github.py"
]
| [
"github3/github.py"
]
|
|
infobloxopen__infoblox-client-6 | 3de58f527e14a03dcda5c537a1d82671a1aadb99 | 2015-11-05 13:46:52 | 3de58f527e14a03dcda5c537a1d82671a1aadb99 | diff --git a/infoblox_client/object_manager.py b/infoblox_client/object_manager.py
index d61987c..3a19fdd 100644
--- a/infoblox_client/object_manager.py
+++ b/infoblox_client/object_manager.py
@@ -246,15 +246,15 @@ class InfobloxObjectManager(object):
zone_format=None, ns_group=None, prefix=None,
extattrs=None):
try:
- obj.DNSZone.create(self.connector,
- fqdn=dns_zone,
- view=dns_view,
- extattrs=extattrs,
- zone_format=zone_format,
- ns_group=ns_group,
- prefix=prefix,
- grid_primary=grid_primary,
- grid_secondaries=grid_secondaries)
+ return obj.DNSZone.create(self.connector,
+ fqdn=dns_zone,
+ view=dns_view,
+ extattrs=extattrs,
+ zone_format=zone_format,
+ ns_group=ns_group,
+ prefix=prefix,
+ grid_primary=grid_primary,
+ grid_secondaries=grid_secondaries)
except ib_ex.InfobloxCannotCreateObject:
LOG.warning('Unable to create DNS zone %(dns_zone_fqdn)s '
'for %(dns_view)s',
| Need to return Zone if it was created
Currentry None is returned from create_dns_zone:
https://github.com/infobloxopen/infoblox-client/blob/master/infoblox_client/object_manager.py#L249
Need to return created DnsZone object. | infobloxopen/infoblox-client | diff --git a/infoblox_client/tests/unit/test_object_manager.py b/infoblox_client/tests/unit/test_object_manager.py
index b24897d..73d43ed 100644
--- a/infoblox_client/tests/unit/test_object_manager.py
+++ b/infoblox_client/tests/unit/test_object_manager.py
@@ -591,8 +591,9 @@ class ObjectManipulatorTestCase(base.TestCase):
ibom = om.InfobloxObjectManager(connector)
- ibom.create_dns_zone(dns_view_name, fqdn, primary_dns_members,
- secondary_dns_members, zone_format=zone_format)
+ zone = ibom.create_dns_zone(dns_view_name, fqdn, primary_dns_members,
+ secondary_dns_members,
+ zone_format=zone_format)
matcher = PayloadMatcher({'view': dns_view_name,
'fqdn': fqdn})
@@ -610,6 +611,7 @@ class ObjectManipulatorTestCase(base.TestCase):
}
connector.create_object.assert_called_once_with('zone_auth', payload,
mock.ANY)
+ self.assertIsInstance(zone, objects.DNSZone)
def test_create_dns_zone_creates_zone_auth_object(self):
dns_view_name = 'dns-view-name'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
debtcollector==3.0.0
exceptiongroup==1.2.2
idna==3.10
-e git+https://github.com/infobloxopen/infoblox-client.git@3de58f527e14a03dcda5c537a1d82671a1aadb99#egg=infoblox_client
iniconfig==2.1.0
iso8601==2.1.0
mock==5.2.0
msgpack==1.1.0
netaddr==1.3.0
oslo.config==9.7.1
oslo.context==5.7.1
oslo.i18n==6.5.1
oslo.log==7.1.0
oslo.serialization==5.7.0
oslo.utils==8.2.0
packaging==24.2
pbr==6.1.1
pluggy==1.5.0
psutil==7.0.0
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
rfc3986==2.0.0
six==1.17.0
stevedore==5.4.1
tomli==2.2.1
tzdata==2025.2
urllib3==2.3.0
wrapt==1.17.2
| name: infoblox-client
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- debtcollector==3.0.0
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- iso8601==2.1.0
- mock==5.2.0
- msgpack==1.1.0
- netaddr==1.3.0
- oslo-config==9.7.1
- oslo-context==5.7.1
- oslo-i18n==6.5.1
- oslo-log==7.1.0
- oslo-serialization==5.7.0
- oslo-utils==8.2.0
- packaging==24.2
- pbr==6.1.1
- pluggy==1.5.0
- psutil==7.0.0
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- rfc3986==2.0.0
- six==1.17.0
- stevedore==5.4.1
- tomli==2.2.1
- tzdata==2025.2
- urllib3==2.3.0
- wheel==0.23.0
- wrapt==1.17.2
prefix: /opt/conda/envs/infoblox-client
| [
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_create_dns_zone_with_grid_secondaries"
]
| []
| [
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_bind_names_updates_host_record",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_bind_names_with_a_record",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_create_dns_view_creates_view_object",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_create_dns_zone_creates_zone_auth_object",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_create_fixed_address_for_given_ip",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_create_fixed_address_from_cidr",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_create_fixed_address_from_range",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_create_host_record_creates_host_record_object",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_create_host_record_range_create_host_record_object",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_create_ip_range_creates_range_object",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_create_net_view_creates_network_view_object",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_default_net_view_is_never_deleted",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_delete_all_associated_objects",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_delete_fixed_address",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_delete_host_record_deletes_host_record_object",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_delete_ip_range_deletes_infoblox_object",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_delete_network_deletes_infoblox_network",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_delete_network_view_deletes_infoblox_object",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_delete_object_by_ref",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_get_member_gets_member_object",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_get_network_gets_network_object",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_has_networks",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_member_is_assigned_as_list_on_network_create",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_network_exists",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_object_is_not_created_if_already_exists",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_restart_services_calls_infoblox_function",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_throws_network_not_available_on_get_network",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_update_network_updates_eas_if_not_null",
"infoblox_client/tests/unit/test_object_manager.py::ObjectManipulatorTestCase::test_update_network_updates_object"
]
| []
| Apache License 2.0 | 289 | [
"infoblox_client/object_manager.py"
]
| [
"infoblox_client/object_manager.py"
]
|
|
sigmavirus24__github3.py-466 | dfbab2eb984045dc8f260dc81adf70f86d3c3e37 | 2015-11-07 00:57:43 | 05ed0c6a02cffc6ddd0e82ce840c464e1c5fd8c4 | diff --git a/AUTHORS.rst b/AUTHORS.rst
index 7bd1fec1..7d7615b9 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -106,3 +106,5 @@ Contributors
- Sourav Singh(@souravsingh)
- Matt Chung (@itsmemattchung)
+
+- Chris Thompson (@notyetsecure)
diff --git a/github3/issues/milestone.py b/github3/issues/milestone.py
index e55b9e74..55d86bd1 100644
--- a/github3/issues/milestone.py
+++ b/github3/issues/milestone.py
@@ -26,7 +26,9 @@ class Milestone(GitHubCore):
self.description = mile.get('description')
#: :class:`User <github3.users.User>` object representing the creator
#: of the milestone.
- self.creator = User(mile.get('creator'), self)
+ self.creator = None
+ if mile.get('creator'):
+ self.creator = User(mile.get('creator'), self)
#: Number of issues associated with this milestone which are still
#: open.
self.open_issues = mile.get('open_issues')
| Null Milestone creator causing Repository.issue() to crash
When trying to get an issue that has a Milestone without a creator, github3.py crashes.
I think the fix is simple -- add a guard check before trying to create the User for the milestone.creator (like what is done for getting the milestone from an issue -- it first checks if the milestone truth-y before acting on it).
I'm working on a PR with a regression test and a patch, which I should have up shortly.
Here's a simple test case to reproduce the exception:
```
import github3
repo = github3.repository("Code4HR", "localart")
milestone = repo.milestone(2)
```
Here's the traceback from iPython (with Python 3.5.0):
```
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-50-4132b3c2312f> in <module>()
----> 1 r.milestone(2)
python3.5/site-packages/github3/repos/repo.py in milestone(self, number)
1664 base_url=self._api)
1665 json = self._json(self._get(url), 200)
-> 1666 return Milestone(json, self) if json else None
1667
1668 @requires_auth
python3.5/site-packages/github3/issues/milestone.py in __init__(self, mile, session)
28 #: :class:`User <github3.users.User>` object representing the creator
29 #: of the milestone.
---> 30 self.creator = User(mile.get('creator'), self._session)
31 #: Number of issues associated with this milestone which are still
32 #: open.
python3.5/site-packages/github3/users.py in __init__(self, user, session)
121
122 def __init__(self, user, session=None):
--> 123 super(User, self).__init__(user, session)
124 if not self.type:
125 self.type = 'User'
python3.5/site-packages/github3/models.py in __init__(self, acct, session)
312 #: Tells you what type of account this is
313 self.type = None
--> 314 if acct.get('type'):
315 self.type = acct.get('type')
316 self._api = acct.get('url', '')
AttributeError: 'NoneType' object has no attribute 'get'
``` | sigmavirus24/github3.py | diff --git a/tests/test_issues.py b/tests/test_issues.py
index c36e3267..cdfa422b 100644
--- a/tests/test_issues.py
+++ b/tests/test_issues.py
@@ -121,6 +121,12 @@ class TestMilestone(BaseCase):
'2013-12-31T23:59:59Z')
self.mock_assertions()
+ def test_issue_465(self):
+ json = self.m.as_dict().copy()
+ json['creator'] = None
+ m = Milestone(json)
+ assert m.creator is None
+
class TestIssue(BaseCase):
def __init__(self, methodName='runTest'):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | betamax==0.9.0
betamax-matchers==0.4.0
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
-e git+https://github.com/sigmavirus24/github3.py.git@dfbab2eb984045dc8f260dc81adf70f86d3c3e37#egg=github3.py
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
requests==2.32.3
requests-toolbelt==1.0.0
tomli==2.2.1
typing_extensions==4.13.0
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==2.3.0
| name: github3.py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- betamax==0.9.0
- betamax-matchers==0.4.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- tomli==2.2.1
- typing-extensions==4.13.0
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==2.3.0
prefix: /opt/conda/envs/github3.py
| [
"tests/test_issues.py::TestMilestone::test_issue_465"
]
| []
| [
"tests/test_issues.py::TestLabel::test_delete",
"tests/test_issues.py::TestLabel::test_equality",
"tests/test_issues.py::TestLabel::test_repr",
"tests/test_issues.py::TestLabel::test_str",
"tests/test_issues.py::TestLabel::test_update",
"tests/test_issues.py::TestMilestone::test_delete",
"tests/test_issues.py::TestMilestone::test_due_on",
"tests/test_issues.py::TestMilestone::test_id",
"tests/test_issues.py::TestMilestone::test_repr",
"tests/test_issues.py::TestMilestone::test_str",
"tests/test_issues.py::TestMilestone::test_update",
"tests/test_issues.py::TestIssue::test_add_labels",
"tests/test_issues.py::TestIssue::test_assign",
"tests/test_issues.py::TestIssue::test_close",
"tests/test_issues.py::TestIssue::test_comment",
"tests/test_issues.py::TestIssue::test_create_comment",
"tests/test_issues.py::TestIssue::test_edit",
"tests/test_issues.py::TestIssue::test_enterprise",
"tests/test_issues.py::TestIssue::test_equality",
"tests/test_issues.py::TestIssue::test_is_closed",
"tests/test_issues.py::TestIssue::test_issue_137",
"tests/test_issues.py::TestIssue::test_remove_all_labels",
"tests/test_issues.py::TestIssue::test_remove_label",
"tests/test_issues.py::TestIssue::test_reopen",
"tests/test_issues.py::TestIssue::test_replace_labels",
"tests/test_issues.py::TestIssue::test_repr",
"tests/test_issues.py::TestIssueEvent::test_equality",
"tests/test_issues.py::TestIssueEvent::test_repr"
]
| []
| BSD 3-Clause "New" or "Revised" License | 291 | [
"github3/issues/milestone.py",
"AUTHORS.rst"
]
| [
"github3/issues/milestone.py",
"AUTHORS.rst"
]
|
|
zopefoundation__zope.publisher-9 | 57a3cee97207ab4a4f05924a07857d007109e17c | 2015-11-09 08:55:38 | 57a3cee97207ab4a4f05924a07857d007109e17c | diff --git a/src/zope/publisher/http.py b/src/zope/publisher/http.py
index 51ef486..f571f05 100644
--- a/src/zope/publisher/http.py
+++ b/src/zope/publisher/http.py
@@ -721,7 +721,7 @@ class HTTPResponse(BaseResponse):
result.append(
("X-Powered-By", "Zope (www.zope.org), Python (www.python.org)"))
- for key, values in headers.items():
+ for key, values in sorted(headers.items(), key=lambda x: x[0].lower()):
if key.lower() == key:
# only change non-literal header names
key = '-'.join([k.capitalize() for k in key.split('-')])
| Response headers are in random order
`HTTPResponse.getHeaders()` returns a list of headers in dictionary-internal order. This causes problems like https://github.com/zopefoundation/zope.app.publication/issues/3. | zopefoundation/zope.publisher | diff --git a/src/zope/publisher/tests/test_browserrequest.py b/src/zope/publisher/tests/test_browserrequest.py
index 36c1233..250f93c 100644
--- a/src/zope/publisher/tests/test_browserrequest.py
+++ b/src/zope/publisher/tests/test_browserrequest.py
@@ -152,10 +152,10 @@ class BrowserTests(HTTPTests):
self.assertEqual(
res,
"Status: 200 Ok\r\n"
+ "X-Powered-By: Zope (www.zope.org), Python (www.python.org)\r\n"
"Content-Length: 6\r\n"
"Content-Type: text/plain;charset=utf-8\r\n"
"X-Content-Type-Warning: guessed from content\r\n"
- "X-Powered-By: Zope (www.zope.org), Python (www.python.org)\r\n"
"\r\n"
"'5', 6")
diff --git a/src/zope/publisher/tests/test_http.py b/src/zope/publisher/tests/test_http.py
index 0772204..9859d6f 100644
--- a/src/zope/publisher/tests/test_http.py
+++ b/src/zope/publisher/tests/test_http.py
@@ -236,7 +236,6 @@ class HTTPTests(unittest.TestCase):
response = request.response
publish(request, handle_errors=False)
headers = response.getHeaders()
- headers.sort()
return (
"Status: %s\r\n" % response.getStatusString()
+
@@ -273,8 +272,8 @@ class HTTPTests(unittest.TestCase):
self.assertEqual(
res,
"Status: 200 Ok\r\n"
- "Content-Length: 6\r\n"
"X-Powered-By: Zope (www.zope.org), Python (www.python.org)\r\n"
+ "Content-Length: 6\r\n"
"\r\n"
"'5', 6")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 4.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-gettext==5.0
pytz==2025.2
six==1.17.0
tomli==2.2.1
typing_extensions==4.13.0
zope.browser==3.1
zope.component==6.0
zope.configuration==6.0
zope.contenttype==5.2
zope.deprecation==5.1
zope.event==5.0
zope.exceptions==5.2
zope.hookable==7.0
zope.i18n==5.2
zope.i18nmessageid==7.0
zope.interface==7.2
zope.location==5.1
zope.proxy==6.1
-e git+https://github.com/zopefoundation/zope.publisher.git@57a3cee97207ab4a4f05924a07857d007109e17c#egg=zope.publisher
zope.schema==7.0.1
zope.security==7.3
zope.testing==5.1
| name: zope.publisher
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-gettext==5.0
- pytz==2025.2
- six==1.17.0
- tomli==2.2.1
- typing-extensions==4.13.0
- zope-browser==3.1
- zope-component==6.0
- zope-configuration==6.0
- zope-contenttype==5.2
- zope-deprecation==5.1
- zope-event==5.0
- zope-exceptions==5.2
- zope-hookable==7.0
- zope-i18n==5.2
- zope-i18nmessageid==7.0
- zope-interface==7.2
- zope-location==5.1
- zope-proxy==6.1
- zope-schema==7.0.1
- zope-security==7.3
- zope-testing==5.1
prefix: /opt/conda/envs/zope.publisher
| [
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testTraversalToItem"
]
| [
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testCookieErrorToLog",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testCookieErrorToLog",
"src/zope/publisher/tests/test_http.py::HTTPTests::testCookieErrorToLog",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testCookieErrorToLog",
"src/zope/publisher/tests/test_http.py::TestHTTPResponse::testSetCookie"
]
| [
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testBasicAuth",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testCookies",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testCookiesUnicode",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testDeduceServerURL",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testHeaders",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testInterface",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testRedirect",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testRequestEnvironment",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testRequestLocale",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testResponseWriteFaile",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testSetPrincipal",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testTraversalToItem",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testUnicodeURLs",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testUnregisteredStatus",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::testUntrustedRedirect",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::test_PathTrailingWhitespace",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::test_double_dots",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::test_getVirtualHostRoot",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::test_method",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::test_repr",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::test_setApplicationNames",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::test_setApplicationServer",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::test_setVirtualHostRoot",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::test_traverse",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::test_traverseDuplicateHooks",
"src/zope/publisher/tests/test_browserrequest.py::HTTPTests::test_unacceptable_charset",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testBadPath",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testBadPath2",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testBasicAuth",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testCookies",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testCookiesUnicode",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testDeduceServerURL",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testDefault",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testDefault2",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testDefault3",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testDefault4",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testDefault6",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testDefaultPOST",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFileUploadPost",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testForm",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormAcceptsStarButNotUTF8",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormBooleanTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormDefaults",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormDefaults2",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormFieldName",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormFieldValue",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormFloatTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormIntTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormLinesTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormListRecordTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormListTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormListTypes2",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormLongTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormMultipleRecordsTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormNoEncodingUsesUTF8",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormRecordsTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormRequiredTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormStringTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormTextTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormTokensTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormTupleRecordTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testFormTupleTypes",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testHeaders",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testInterface",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testIssue394",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testIssue559",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testNoDefault",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testNoneFieldNamePost",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testQueryStringIgnoredForPOST",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testRedirect",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testRequestEnvironment",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testRequestLocale",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testResponseWriteFaile",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testSetPrincipal",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testUnicodeURLs",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testUnregisteredStatus",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::testUntrustedRedirect",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_PathTrailingWhitespace",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_double_dots",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_getVirtualHostRoot",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_method",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_post_body_not_consumed_unnecessarily",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_post_body_not_necessarily",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_repr",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_setApplicationNames",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_setApplicationServer",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_setVirtualHostRoot",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_traverse",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_traverseDuplicateHooks",
"src/zope/publisher/tests/test_browserrequest.py::BrowserTests::test_unacceptable_charset",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testEnvironment",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testGetAndDefaultInMapping",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testHaveCustomTestsForIApplicationRequest",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testHaveCustomTestsForIPublicationRequest",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testHaveCustomTestsForIPublisherRequest",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testHoldCloseAndGetResponse",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testIReadMapping",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testPublicationManagement",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testSkinManagement",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testTraversalStack",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testVerifyIApplicationRequest",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testVerifyIPublicationRequest",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testVerifyIPublisherRequest",
"src/zope/publisher/tests/test_browserrequest.py::APITests::testVerifyISkinnable",
"src/zope/publisher/tests/test_browserrequest.py::APITests::test_IApplicationRequest_bodyStream",
"src/zope/publisher/tests/test_browserrequest.py::APITests::test_IBrowserRequest",
"src/zope/publisher/tests/test_browserrequest.py::APITests::test_IPublicationRequest_getPositionalArguments",
"src/zope/publisher/tests/test_browserrequest.py::APITests::test_IPublisherRequest_processInputs",
"src/zope/publisher/tests/test_browserrequest.py::APITests::test_IPublisherRequest_retry",
"src/zope/publisher/tests/test_browserrequest.py::APITests::test_IPublisherRequest_traverse",
"src/zope/publisher/tests/test_browserrequest.py::APITests::test_ISkinnable",
"src/zope/publisher/tests/test_browserrequest.py::APITests::test___len__",
"src/zope/publisher/tests/test_browserrequest.py::APITests::test_items",
"src/zope/publisher/tests/test_browserrequest.py::APITests::test_keys",
"src/zope/publisher/tests/test_browserrequest.py::APITests::test_values",
"src/zope/publisher/tests/test_browserrequest.py::test_suite",
"src/zope/publisher/tests/test_http.py::HTTPInputStreamTests::testCachingWithContentLength",
"src/zope/publisher/tests/test_http.py::HTTPInputStreamTests::testGetCacheStream",
"src/zope/publisher/tests/test_http.py::HTTPInputStreamTests::testRead",
"src/zope/publisher/tests/test_http.py::HTTPInputStreamTests::testReadLine",
"src/zope/publisher/tests/test_http.py::HTTPInputStreamTests::testReadLines",
"src/zope/publisher/tests/test_http.py::HTTPInputStreamTests::testWorkingWithNonClosingStreams",
"src/zope/publisher/tests/test_http.py::HTTPTests::testBasicAuth",
"src/zope/publisher/tests/test_http.py::HTTPTests::testCookies",
"src/zope/publisher/tests/test_http.py::HTTPTests::testCookiesUnicode",
"src/zope/publisher/tests/test_http.py::HTTPTests::testDeduceServerURL",
"src/zope/publisher/tests/test_http.py::HTTPTests::testHeaders",
"src/zope/publisher/tests/test_http.py::HTTPTests::testInterface",
"src/zope/publisher/tests/test_http.py::HTTPTests::testRedirect",
"src/zope/publisher/tests/test_http.py::HTTPTests::testRequestEnvironment",
"src/zope/publisher/tests/test_http.py::HTTPTests::testRequestLocale",
"src/zope/publisher/tests/test_http.py::HTTPTests::testResponseWriteFaile",
"src/zope/publisher/tests/test_http.py::HTTPTests::testSetPrincipal",
"src/zope/publisher/tests/test_http.py::HTTPTests::testTraversalToItem",
"src/zope/publisher/tests/test_http.py::HTTPTests::testUnicodeURLs",
"src/zope/publisher/tests/test_http.py::HTTPTests::testUnregisteredStatus",
"src/zope/publisher/tests/test_http.py::HTTPTests::testUntrustedRedirect",
"src/zope/publisher/tests/test_http.py::HTTPTests::test_PathTrailingWhitespace",
"src/zope/publisher/tests/test_http.py::HTTPTests::test_double_dots",
"src/zope/publisher/tests/test_http.py::HTTPTests::test_getVirtualHostRoot",
"src/zope/publisher/tests/test_http.py::HTTPTests::test_method",
"src/zope/publisher/tests/test_http.py::HTTPTests::test_repr",
"src/zope/publisher/tests/test_http.py::HTTPTests::test_setApplicationNames",
"src/zope/publisher/tests/test_http.py::HTTPTests::test_setApplicationServer",
"src/zope/publisher/tests/test_http.py::HTTPTests::test_setVirtualHostRoot",
"src/zope/publisher/tests/test_http.py::HTTPTests::test_traverse",
"src/zope/publisher/tests/test_http.py::HTTPTests::test_traverseDuplicateHooks",
"src/zope/publisher/tests/test_http.py::HTTPTests::test_unacceptable_charset",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testBasicAuth",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testCookies",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testCookiesUnicode",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testDeduceServerURL",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testHeaders",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testInterface",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testRedirect",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testRequestEnvironment",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testRequestLocale",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testResponseWriteFaile",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testSetPrincipal",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testTraversalToItem",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testUnicodeURLs",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testUnregisteredStatus",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::testUntrustedRedirect",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_PathTrailingWhitespace",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_double_dots",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_getVirtualHostRoot",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_method",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_non_existing_charset",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_repr",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_setApplicationNames",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_setApplicationServer",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_setVirtualHostRoot",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_shiftNameToApplication",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_traverse",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_traverseDuplicateHooks",
"src/zope/publisher/tests/test_http.py::ConcreteHTTPTests::test_unacceptable_charset",
"src/zope/publisher/tests/test_http.py::TestHTTPResponse::testContentLength",
"src/zope/publisher/tests/test_http.py::TestHTTPResponse::testContentType",
"src/zope/publisher/tests/test_http.py::TestHTTPResponse::testInterface",
"src/zope/publisher/tests/test_http.py::TestHTTPResponse::testWrite_noContentLength",
"src/zope/publisher/tests/test_http.py::TestHTTPResponse::test_handleException",
"src/zope/publisher/tests/test_http.py::APITests::testEnvironment",
"src/zope/publisher/tests/test_http.py::APITests::testGetAndDefaultInMapping",
"src/zope/publisher/tests/test_http.py::APITests::testHaveCustomTestsForIApplicationRequest",
"src/zope/publisher/tests/test_http.py::APITests::testHaveCustomTestsForIPublicationRequest",
"src/zope/publisher/tests/test_http.py::APITests::testHaveCustomTestsForIPublisherRequest",
"src/zope/publisher/tests/test_http.py::APITests::testHoldCloseAndGetResponse",
"src/zope/publisher/tests/test_http.py::APITests::testIReadMapping",
"src/zope/publisher/tests/test_http.py::APITests::testPublicationManagement",
"src/zope/publisher/tests/test_http.py::APITests::testSkinManagement",
"src/zope/publisher/tests/test_http.py::APITests::testTraversalStack",
"src/zope/publisher/tests/test_http.py::APITests::testVerifyIApplicationRequest",
"src/zope/publisher/tests/test_http.py::APITests::testVerifyIPublicationRequest",
"src/zope/publisher/tests/test_http.py::APITests::testVerifyIPublisherRequest",
"src/zope/publisher/tests/test_http.py::APITests::test_IApplicationRequest_bodyStream",
"src/zope/publisher/tests/test_http.py::APITests::test_IPublicationRequest_getPositionalArguments",
"src/zope/publisher/tests/test_http.py::APITests::test_IPublisherRequest_processInputs",
"src/zope/publisher/tests/test_http.py::APITests::test_IPublisherRequest_retry",
"src/zope/publisher/tests/test_http.py::APITests::test_IPublisherRequest_traverse",
"src/zope/publisher/tests/test_http.py::APITests::test___len__",
"src/zope/publisher/tests/test_http.py::APITests::test_items",
"src/zope/publisher/tests/test_http.py::APITests::test_keys",
"src/zope/publisher/tests/test_http.py::APITests::test_values",
"src/zope/publisher/tests/test_http.py::test_suite"
]
| []
| Zope Public License 2.1 | 292 | [
"src/zope/publisher/http.py"
]
| [
"src/zope/publisher/http.py"
]
|
|
msgpack__msgpack-python-158 | c8513898e222a91ad7f6520aa8a4a5a1711cdc65 | 2015-11-09 18:54:44 | 0e2021d3a3d1218ca191f4e802df0af3bbfaa51f | diff --git a/.travis.yml b/.travis.yml
index eced353..7695184 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,18 +1,18 @@
sudo: false
-cache:
- directories:
- - $HOME/.cache/pip
+cache: pip
language: python
python:
- - 3.5
+ - 2.7
branches:
only:
- master
env:
- - TOXENV=py27-c,py33-c,py34-c,py35-c
- - TOXENV=py27-pure,py33-pure,py34-pure,py35-pure
+ - TOXENV=py26-c,py27-c
+ - TOXENV=py32-c,py33-c,py34-c
+ - TOXENV=py26-pure,py27-pure
+ - TOXENV=py32-pure,py33-pure,py34-pure
- TOXENV=pypy-pure,pypy3-pure
install:
diff --git a/ChangeLog.rst b/ChangeLog.rst
index f20bb75..35535b4 100644
--- a/ChangeLog.rst
+++ b/ChangeLog.rst
@@ -1,6 +1,6 @@
0.4.7
=====
-:release date: 2016-01-25
+:release date: TBD
Bugs fixed
----------
diff --git a/msgpack/_packer.pyx b/msgpack/_packer.pyx
index 6392655..7c1e53d 100644
--- a/msgpack/_packer.pyx
+++ b/msgpack/_packer.pyx
@@ -63,6 +63,13 @@ cdef class Packer(object):
:param bool use_bin_type:
Use bin type introduced in msgpack spec 2.0 for bytes.
It also enable str8 type for unicode.
+ :param bool strict_types:
+ If set to true, types will be checked to be exact. Derived classes
+ from serializeable types will not be serialized and will be
+ treated as unsupported type and forwarded to default.
+ Additionally tuples will not be serialized as lists.
+ This is useful when trying to implement accurate serialization
+ for python types.
"""
cdef msgpack_packer pk
cdef object _default
@@ -70,6 +77,7 @@ cdef class Packer(object):
cdef object _berrors
cdef char *encoding
cdef char *unicode_errors
+ cdef bint strict_types
cdef bool use_float
cdef bint autoreset
@@ -82,10 +90,12 @@ cdef class Packer(object):
self.pk.length = 0
def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
- use_single_float=False, bint autoreset=1, bint use_bin_type=0):
+ use_single_float=False, bint autoreset=1, bint use_bin_type=0,
+ bint strict_types=0):
"""
"""
self.use_float = use_single_float
+ self.strict_types = strict_types
self.autoreset = autoreset
self.pk.use_bin_type = use_bin_type
if default is not None:
@@ -121,6 +131,7 @@ cdef class Packer(object):
cdef dict d
cdef size_t L
cdef int default_used = 0
+ cdef bint strict_types = self.strict_types
if nest_limit < 0:
raise PackValueError("recursion limit exceeded.")
@@ -128,12 +139,12 @@ cdef class Packer(object):
while True:
if o is None:
ret = msgpack_pack_nil(&self.pk)
- elif isinstance(o, bool):
+ elif PyBool_Check(o) if strict_types else isinstance(o, bool):
if o:
ret = msgpack_pack_true(&self.pk)
else:
ret = msgpack_pack_false(&self.pk)
- elif PyLong_Check(o):
+ elif PyLong_CheckExact(o) if strict_types else PyLong_Check(o):
# PyInt_Check(long) is True for Python 3.
# So we should test long before int.
try:
@@ -150,17 +161,17 @@ cdef class Packer(object):
continue
else:
raise
- elif PyInt_Check(o):
+ elif PyInt_CheckExact(o) if strict_types else PyInt_Check(o):
longval = o
ret = msgpack_pack_long(&self.pk, longval)
- elif PyFloat_Check(o):
+ elif PyFloat_CheckExact(o) if strict_types else PyFloat_Check(o):
if self.use_float:
fval = o
ret = msgpack_pack_float(&self.pk, fval)
else:
dval = o
ret = msgpack_pack_double(&self.pk, dval)
- elif PyBytes_Check(o):
+ elif PyBytes_CheckExact(o) if strict_types else PyBytes_Check(o):
L = len(o)
if L > (2**32)-1:
raise ValueError("bytes is too large")
@@ -168,17 +179,17 @@ cdef class Packer(object):
ret = msgpack_pack_bin(&self.pk, L)
if ret == 0:
ret = msgpack_pack_raw_body(&self.pk, rawval, L)
- elif PyUnicode_Check(o):
+ elif PyUnicode_CheckExact(o) if strict_types else PyUnicode_Check(o):
if not self.encoding:
raise TypeError("Can't encode unicode string: no encoding is specified")
o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
L = len(o)
if L > (2**32)-1:
- raise ValueError("unicode string is too large")
+ raise ValueError("dict is too large")
rawval = o
- ret = msgpack_pack_raw(&self.pk, L)
+ ret = msgpack_pack_raw(&self.pk, len(o))
if ret == 0:
- ret = msgpack_pack_raw_body(&self.pk, rawval, L)
+ ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
elif PyDict_CheckExact(o):
d = <dict>o
L = len(d)
@@ -191,7 +202,7 @@ cdef class Packer(object):
if ret != 0: break
ret = self._pack(v, nest_limit-1)
if ret != 0: break
- elif PyDict_Check(o):
+ elif not strict_types and PyDict_Check(o):
L = len(o)
if L > (2**32)-1:
raise ValueError("dict is too large")
@@ -202,7 +213,7 @@ cdef class Packer(object):
if ret != 0: break
ret = self._pack(v, nest_limit-1)
if ret != 0: break
- elif isinstance(o, ExtType):
+ elif type(o) is ExtType if strict_types else isinstance(o, ExtType):
# This should be before Tuple because ExtType is namedtuple.
longval = o.code
rawval = o.data
@@ -211,7 +222,7 @@ cdef class Packer(object):
raise ValueError("EXT data is too large")
ret = msgpack_pack_ext(&self.pk, longval, L)
ret = msgpack_pack_raw_body(&self.pk, rawval, L)
- elif PyTuple_Check(o) or PyList_Check(o):
+ elif PyList_CheckExact(o) if strict_types else (PyTuple_Check(o) or PyList_Check(o)):
L = len(o)
if L > (2**32)-1:
raise ValueError("list is too large")
diff --git a/msgpack/_unpacker.pyx b/msgpack/_unpacker.pyx
index 1aefc64..aec3b7d 100644
--- a/msgpack/_unpacker.pyx
+++ b/msgpack/_unpacker.pyx
@@ -209,7 +209,7 @@ cdef class Unpacker(object):
:param int max_buffer_size:
Limits size of data waiting unpacked. 0 means system's INT_MAX (default).
Raises `BufferFull` exception when it is insufficient.
- You shoud set this parameter when unpacking data from untrusted source.
+ You shoud set this parameter when unpacking data from untrasted source.
:param int max_str_len:
Limits max length of str. (default: 2**31-1)
diff --git a/msgpack/_version.py b/msgpack/_version.py
index 37c172d..2c1c96c 100644
--- a/msgpack/_version.py
+++ b/msgpack/_version.py
@@ -1,1 +1,1 @@
-version = (0, 4, 7)
+version = (0, 4, 6)
diff --git a/msgpack/fallback.py b/msgpack/fallback.py
index f682611..11280ed 100644
--- a/msgpack/fallback.py
+++ b/msgpack/fallback.py
@@ -69,6 +69,13 @@ TYPE_EXT = 5
DEFAULT_RECURSE_LIMIT = 511
+def _check_type_strict(obj, t, type=type, tuple=tuple):
+ if type(t) is tuple:
+ return type(obj) in t
+ else:
+ return type(obj) is t
+
+
def unpack(stream, **kwargs):
"""
Unpack an object from `stream`.
@@ -138,7 +145,7 @@ class Unpacker(object):
:param int max_buffer_size:
Limits size of data waiting unpacked. 0 means system's INT_MAX (default).
Raises `BufferFull` exception when it is insufficient.
- You shoud set this parameter when unpacking data from untrusted source.
+ You shoud set this parameter when unpacking data from untrasted source.
:param int max_str_len:
Limits max length of str. (default: 2**31-1)
@@ -609,9 +616,18 @@ class Packer(object):
:param bool use_bin_type:
Use bin type introduced in msgpack spec 2.0 for bytes.
It also enable str8 type for unicode.
+ :param bool strict_types:
+ If set to true, types will be checked to be exact. Derived classes
+ from serializeable types will not be serialized and will be
+ treated as unsupported type and forwarded to default.
+ Additionally tuples will not be serialized as lists.
+ This is useful when trying to implement accurate serialization
+ for python types.
"""
def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
- use_single_float=False, autoreset=True, use_bin_type=False):
+ use_single_float=False, autoreset=True, use_bin_type=False,
+ strict_types=False):
+ self._strict_types = strict_types
self._use_float = use_single_float
self._autoreset = autoreset
self._use_bin_type = use_bin_type
@@ -623,18 +639,24 @@ class Packer(object):
raise TypeError("default must be callable")
self._default = default
- def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance):
+ def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT,
+ check=isinstance, check_type_strict=_check_type_strict):
default_used = False
+ if self._strict_types:
+ check = check_type_strict
+ list_types = list
+ else:
+ list_types = (list, tuple)
while True:
if nest_limit < 0:
raise PackValueError("recursion limit exceeded")
if obj is None:
return self._buffer.write(b"\xc0")
- if isinstance(obj, bool):
+ if check(obj, bool):
if obj:
return self._buffer.write(b"\xc3")
return self._buffer.write(b"\xc2")
- if isinstance(obj, int_types):
+ if check(obj, int_types):
if 0 <= obj < 0x80:
return self._buffer.write(struct.pack("B", obj))
if -0x20 <= obj < 0:
@@ -660,7 +682,7 @@ class Packer(object):
default_used = True
continue
raise PackValueError("Integer value out of range")
- if self._use_bin_type and isinstance(obj, bytes):
+ if self._use_bin_type and check(obj, bytes):
n = len(obj)
if n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xc4, n))
@@ -671,8 +693,8 @@ class Packer(object):
else:
raise PackValueError("Bytes is too large")
return self._buffer.write(obj)
- if isinstance(obj, (Unicode, bytes)):
- if isinstance(obj, Unicode):
+ if check(obj, (Unicode, bytes)):
+ if check(obj, Unicode):
if self._encoding is None:
raise TypeError(
"Can't encode unicode string: "
@@ -690,11 +712,11 @@ class Packer(object):
else:
raise PackValueError("String is too large")
return self._buffer.write(obj)
- if isinstance(obj, float):
+ if check(obj, float):
if self._use_float:
return self._buffer.write(struct.pack(">Bf", 0xca, obj))
return self._buffer.write(struct.pack(">Bd", 0xcb, obj))
- if isinstance(obj, ExtType):
+ if check(obj, ExtType):
code = obj.code
data = obj.data
assert isinstance(code, int)
@@ -719,13 +741,13 @@ class Packer(object):
self._buffer.write(struct.pack("b", code))
self._buffer.write(data)
return
- if isinstance(obj, (list, tuple)):
+ if check(obj, list_types):
n = len(obj)
self._fb_pack_array_header(n)
for i in xrange(n):
self._pack(obj[i], nest_limit - 1)
return
- if isinstance(obj, dict):
+ if check(obj, dict):
return self._fb_pack_map_pairs(len(obj), dict_iteritems(obj),
nest_limit - 1)
if not default_used and self._default is not None:
diff --git a/tox.ini b/tox.ini
index b6e7a7f..15feb51 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = {py27,py33,py34,py35}-{c,pure},{pypy,pypy3}-pure,py27-x86,py34-x86
+envlist = {py26,py27,py32,py33,py34}-{c,pure},{pypy,pypy3}-pure,py27-x86,py34-x86
[variants:pure]
setenv=
@@ -36,3 +36,4 @@ commands=
python -c 'import sys; print(hex(sys.maxsize))'
python -c 'from msgpack import _packer, _unpacker'
py.test
+
| Serialization of namedtuples
So I saw this referenced in the issue "Allow custom encoding (not character encoding but default function encoding) of unicode strings #73" and was wondering of there's a work around or any plans to implement a fix in the future.
I'm using version 0.4.6 and as far as I can tell there's no way to define custom serialization of namedtuples (or any object that inherits from native python types msgpack-python supports) with the current implementation. The problem is that any class that inherits from tuple somewhere in their class hierarchy will return True for isinstance(obj, (tuple)), which will not give the user's default callable a chance to run. On solution I see (after a brief code review) is as simple as moving the if block that calls the self._default function from the bottom to right after the `if obj is None:` block. | msgpack/msgpack-python | diff --git a/test/test_stricttype.py b/test/test_stricttype.py
new file mode 100644
index 0000000..a20b5eb
--- /dev/null
+++ b/test/test_stricttype.py
@@ -0,0 +1,15 @@
+# coding: utf-8
+
+from collections import namedtuple
+from msgpack import packb, unpackb
+
+
+def test_namedtuple():
+ T = namedtuple('T', "foo bar")
+ def default(o):
+ if isinstance(o, T):
+ return dict(o._asdict())
+ raise TypeError('Unsupported type %s' % (type(o),))
+ packed = packb(T(1, 42), strict_types=True, use_bin_type=True, default=default)
+ unpacked = unpackb(packed, encoding='utf-8')
+ assert unpacked == {'foo': 1, 'bar': 42}
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 7
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
-e git+https://github.com/msgpack/msgpack-python.git@c8513898e222a91ad7f6520aa8a4a5a1711cdc65#egg=msgpack_python
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: msgpack-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/msgpack-python
| [
"test/test_stricttype.py::test_namedtuple"
]
| []
| []
| []
| Apache License 2.0 | 293 | [
"ChangeLog.rst",
"msgpack/_unpacker.pyx",
".travis.yml",
"tox.ini",
"msgpack/_version.py",
"msgpack/_packer.pyx",
"msgpack/fallback.py"
]
| [
"ChangeLog.rst",
"msgpack/_unpacker.pyx",
".travis.yml",
"tox.ini",
"msgpack/_version.py",
"msgpack/_packer.pyx",
"msgpack/fallback.py"
]
|
|
craffel__mir_eval-157 | 3a6a8ce53eb7974db52104a834210b505b43f711 | 2015-11-10 21:04:57 | 4a35625bf2bbbd71d15916f634d563b3878ac5d0 | diff --git a/mir_eval/sonify.py b/mir_eval/sonify.py
index 23703e3..66a8319 100644
--- a/mir_eval/sonify.py
+++ b/mir_eval/sonify.py
@@ -125,7 +125,7 @@ def time_frequency(gram, frequencies, times, fs, function=np.sin, length=None):
return output
-def chroma(chromagram, times, fs):
+def chroma(chromagram, times, fs, **kwargs):
"""Reverse synthesis of a chromagram (semitone matrix)
Parameters
@@ -138,6 +138,9 @@ def chroma(chromagram, times, fs):
The start time of each column in the chromagram
fs : int
Sampling rate to synthesize audio data at
+ kwargs
+ Additional keyword arguments to pass to
+ :func:`mir_eval.sonify.time_frequency`
Returns
-------
@@ -165,10 +168,10 @@ def chroma(chromagram, times, fs):
gram *= shepard_weight.reshape(-1, 1)
# Compute frequencies
frequencies = 440.0*(2.0**((notes - 69)/12.0))
- return time_frequency(gram, frequencies, times, fs)
+ return time_frequency(gram, frequencies, times, fs, **kwargs)
-def chords(chord_labels, intervals, fs):
+def chords(chord_labels, intervals, fs, **kwargs):
"""Synthesizes chord labels
Parameters
@@ -179,6 +182,9 @@ def chords(chord_labels, intervals, fs):
Start and end times of each chord label
fs : int
Sampling rate to synthesize at
+ kwargs
+ Additional keyword arguments to pass to
+ :func:`mir_eval.sonify.time_frequency`
Returns
-------
@@ -200,4 +206,4 @@ def chords(chord_labels, intervals, fs):
chromagram = np.array([np.roll(interval_bitmap, root)
for (interval_bitmap, root)
in zip(interval_bitmaps, roots)]).T
- return chroma(chromagram, times, fs)
+ return chroma(chromagram, times, fs, **kwargs)
| sonify.chroma and sonify.chords should pass **kwargs to time_frequency
So that `length` and `function` can be set. | craffel/mir_eval | diff --git a/tests/test_sonify.py b/tests/test_sonify.py
index 03a2ec4..a1975c9 100644
--- a/tests/test_sonify.py
+++ b/tests/test_sonify.py
@@ -36,6 +36,10 @@ def test_chroma():
np.random.standard_normal((12, 1000)),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
+ signal = mir_eval.sonify.chroma(
+ np.random.standard_normal((12, 1000)),
+ np.linspace(0, 10, 1000), fs, length=fs*11)
+ assert len(signal) == 11*fs
def test_chords():
@@ -45,3 +49,7 @@ def test_chords():
['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
intervals, fs)
assert len(signal) == 10*fs
+ signal = mir_eval.sonify.chords(
+ ['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
+ intervals, fs, length=fs*11)
+ assert len(signal) == 11*fs
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "numpy>=1.7.0 scipy>=0.9.0 future six",
"pip_packages": [
"nose",
"pep8",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
future @ file:///croot/future_1730902796226/work
iniconfig==2.1.0
-e git+https://github.com/craffel/mir_eval.git@3a6a8ce53eb7974db52104a834210b505b43f711#egg=mir_eval
nose==1.3.7
numpy @ file:///croot/numpy_and_numpy_base_1736283260865/work/dist/numpy-2.0.2-cp39-cp39-linux_x86_64.whl#sha256=3387e3e62932fa288bc18e8f445ce19e998b418a65ed2064dd40a054f976a6c7
packaging==24.2
pep8==1.7.1
pluggy==1.5.0
pytest==8.3.5
scipy @ file:///croot/scipy_1733756309941/work/dist/scipy-1.13.1-cp39-cp39-linux_x86_64.whl#sha256=3b247b926209f2d9f719ebae39faf3ff891b2596150ed8f8349adfc3eb19441c
six @ file:///tmp/build/80754af9/six_1644875935023/work
tomli==2.2.1
| name: mir_eval
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- blas=1.0=openblas
- ca-certificates=2025.2.25=h06a4308_0
- future=1.0.0=py39h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=11.2.0=h00389a5_1
- libgfortran5=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libopenblas=0.3.21=h043d6bf_0
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- numpy=2.0.2=py39heeff2f4_0
- numpy-base=2.0.2=py39h8a23956_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- pybind11-abi=4=hd3eb1b0_1
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- scipy=1.13.1=py39heeff2f4_1
- setuptools=72.1.0=py39h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pep8==1.7.1
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/mir_eval
| [
"tests/test_sonify.py::test_chroma"
]
| [
"tests/test_sonify.py::test_clicks",
"tests/test_sonify.py::test_chords"
]
| [
"tests/test_sonify.py::test_time_frequency"
]
| []
| MIT License | 294 | [
"mir_eval/sonify.py"
]
| [
"mir_eval/sonify.py"
]
|
|
tomerfiliba__plumbum-243 | 0db72571329ef8f8167b93c7cd9d737dfb37ebbf | 2015-11-11 06:35:45 | d24e80c5364a777a2393c3acb24a7a7220f023fc | diff --git a/plumbum/cli/progress.py b/plumbum/cli/progress.py
index 3ca68b2..080c80a 100644
--- a/plumbum/cli/progress.py
+++ b/plumbum/cli/progress.py
@@ -103,9 +103,9 @@ class ProgressBase(six.ABC):
pass
@classmethod
- def range(cls, *value, **kargs):
+ def range(cls, value, **kargs):
"""Fast shortcut to create a range based progress bar, assumes work done in body"""
- return cls(range(*value), body=True, **kargs)
+ return cls(range(value), value, body=True, **kargs)
@classmethod
def wrap(cls, iterator, length=None, **kargs):
diff --git a/plumbum/commands/base.py b/plumbum/commands/base.py
index f20ee15..900f0ce 100644
--- a/plumbum/commands/base.py
+++ b/plumbum/commands/base.py
@@ -5,6 +5,7 @@ from plumbum.commands.processes import run_proc, iter_lines
from plumbum.lib import six
from tempfile import TemporaryFile
from subprocess import PIPE, Popen
+from types import MethodType
class RedirectionError(Exception):
@@ -296,6 +297,16 @@ class Pipeline(BaseCommand):
dstproc.returncode = rc_src or rc_dst
return dstproc.returncode
dstproc.wait = wait2
+
+ dstproc_verify = dstproc.verify
+ def verify(proc, retcode, timeout, stdout, stderr):
+ #TODO: right now it's impossible to specify different expected
+ # return codes for different stages of the pipeline, but we
+ # should make that possible.
+ proc.srcproc.verify(retcode, timeout, stdout, stderr)
+ dstproc_verify(retcode, timeout, stdout, stderr)
+ dstproc.verify = MethodType(verify, dstproc)
+
return dstproc
class BaseRedirection(BaseCommand):
diff --git a/plumbum/commands/processes.py b/plumbum/commands/processes.py
index 2129dde..06a5dca 100644
--- a/plumbum/commands/processes.py
+++ b/plumbum/commands/processes.py
@@ -20,18 +20,7 @@ except ImportError:
# utility functions
#===================================================================================================
def _check_process(proc, retcode, timeout, stdout, stderr):
- if getattr(proc, "_timed_out", False):
- raise ProcessTimedOut("Process did not terminate within %s seconds" % (timeout,),
- getattr(proc, "argv", None))
-
- if retcode is not None:
- if hasattr(retcode, "__contains__"):
- if proc.returncode not in retcode:
- raise ProcessExecutionError(getattr(proc, "argv", None), proc.returncode,
- stdout, stderr)
- elif proc.returncode != retcode:
- raise ProcessExecutionError(getattr(proc, "argv", None), proc.returncode,
- stdout, stderr)
+ proc.verify(retcode, timeout, stdout, stderr)
return proc.returncode, stdout, stderr
def _iter_lines(proc, decode, linesize):
diff --git a/plumbum/machines/base.py b/plumbum/machines/base.py
index 109f4f3..a384e4d 100644
--- a/plumbum/machines/base.py
+++ b/plumbum/machines/base.py
@@ -1,4 +1,21 @@
from plumbum.commands.processes import CommandNotFound
+from plumbum.commands.processes import ProcessExecutionError
+from plumbum.commands.processes import ProcessTimedOut
+
+class PopenAddons(object):
+ def verify(self, retcode, timeout, stdout, stderr):
+ if getattr(self, "_timed_out", False):
+ raise ProcessTimedOut("Process did not terminate within %s seconds" % (timeout,),
+ getattr(self, "argv", None))
+
+ if retcode is not None:
+ if hasattr(retcode, "__contains__"):
+ if self.returncode not in retcode:
+ raise ProcessExecutionError(getattr(self, "argv", None), self.returncode,
+ stdout, stderr)
+ elif self.returncode != retcode:
+ raise ProcessExecutionError(getattr(self, "argv", None), self.returncode,
+ stdout, stderr)
class BaseMachine(object):
diff --git a/plumbum/machines/local.py b/plumbum/machines/local.py
index e821c25..0cd64e5 100644
--- a/plumbum/machines/local.py
+++ b/plumbum/machines/local.py
@@ -16,6 +16,7 @@ from plumbum.lib import ProcInfo, IS_WIN32, six, StaticProperty
from plumbum.commands.daemons import win32_daemonize, posix_daemonize
from plumbum.commands.processes import iter_lines
from plumbum.machines.base import BaseMachine
+from plumbum.machines.base import PopenAddons
from plumbum.machines.env import BaseEnv
if sys.version_info >= (3, 2):
@@ -31,11 +32,12 @@ else:
from subprocess import Popen, PIPE
has_new_subprocess = False
-class IterablePopen(Popen):
+class IterablePopen(Popen, PopenAddons):
iter_lines = iter_lines
def __iter__(self):
return self.iter_lines()
+
logger = logging.getLogger("plumbum.local")
diff --git a/plumbum/machines/paramiko_machine.py b/plumbum/machines/paramiko_machine.py
index 43c5ed1..8f45ad8 100644
--- a/plumbum/machines/paramiko_machine.py
+++ b/plumbum/machines/paramiko_machine.py
@@ -2,6 +2,7 @@ import logging
import errno
import stat
import socket
+from plumbum.machines.base import PopenAddons
from plumbum.machines.remote import BaseRemoteMachine
from plumbum.machines.session import ShellSession
from plumbum.lib import _setdoc, six
@@ -23,7 +24,7 @@ except ImportError:
logger = logging.getLogger("plumbum.paramiko")
-class ParamikoPopen(object):
+class ParamikoPopen(PopenAddons):
def __init__(self, argv, stdin, stdout, stderr, encoding, stdin_file = None,
stdout_file = None, stderr_file = None):
self.argv = argv
diff --git a/plumbum/machines/session.py b/plumbum/machines/session.py
index 3767294..43b4b37 100644
--- a/plumbum/machines/session.py
+++ b/plumbum/machines/session.py
@@ -4,6 +4,7 @@ import logging
import threading
from plumbum.commands import BaseCommand, run_proc
from plumbum.lib import six
+from plumbum.machines.base import PopenAddons
class ShellSessionError(Exception):
@@ -53,7 +54,7 @@ class MarkedPipe(object):
return line
-class SessionPopen(object):
+class SessionPopen(PopenAddons):
"""A shell-session-based ``Popen``-like object (has the following attributes: ``stdin``,
``stdout``, ``stderr``, ``returncode``)"""
def __init__(self, argv, isatty, stdin, stdout, stderr, encoding):
| ProcessExecutionError is misleading when piped commands fail
Relates to #145
```
Type "help", "copyright", "credits" or "license" for more information.
>>> from plumbum.cmd import cat, head
>>> from plumbum import FG
>>> (cat['/dev/urndom'] | head['-c', '10']) & FG()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/nail/home/josnyder/consul-backups/env/local/lib/python2.7/site-packages/plumbum/commands/modifiers.py", line 143, in __rand__
cmd(retcode = self.retcode, stdin = None, stdout = None, stderr = None)
File "/nail/home/josnyder/consul-backups/env/local/lib/python2.7/site-packages/plumbum/commands/base.py", line 89, in __call__
return self.run(args, **kwargs)[1]
File "/nail/home/josnyder/consul-backups/env/local/lib/python2.7/site-packages/plumbum/commands/base.py", line 219, in run
return p.run()
File "/nail/home/josnyder/consul-backups/env/local/lib/python2.7/site-packages/plumbum/commands/base.py", line 181, in runner
return run_proc(p, retcode, timeout)
File "/nail/home/josnyder/consul-backups/env/local/lib/python2.7/site-packages/plumbum/commands/processes.py", line 217, in run_proc
return _check_process(proc, retcode, timeout, stdout, stderr)
File "/nail/home/josnyder/consul-backups/env/local/lib/python2.7/site-packages/plumbum/commands/processes.py", line 34, in _check_process
stdout, stderr)
plumbum.commands.processes.ProcessExecutionError: Command line: ['/usr/bin/head', '-c', '10']
Exit code: 1
```
In this example, the process which failed is the cat process. The exception message is misleading, because it implies that the head process failed. | tomerfiliba/plumbum | diff --git a/tests/test_local.py b/tests/test_local.py
index d5bd458..a1fadcd 100644
--- a/tests/test_local.py
+++ b/tests/test_local.py
@@ -7,6 +7,7 @@ from plumbum import (local, LocalPath, FG, BG, TF, RETCODE, ERROUT,
CommandNotFound, ProcessExecutionError, ProcessTimedOut)
from plumbum.lib import six, IS_WIN32
from plumbum.fs.atomic import AtomicFile, AtomicCounterFile, PidFile
+from plumbum.machines.local import LocalCommand
from plumbum.path import RelativePath
import plumbum
from plumbum._testtools import (skipIf, skip_on_windows,
@@ -295,13 +296,26 @@ class LocalMachineTest(unittest.TestCase):
def test_run(self):
from plumbum.cmd import ls, grep
- rc, out, err = (ls | grep["non_exist1N9"]).run(retcode = 1)
+ rc, out, err = (ls | grep["non_exist1N9"]).run(retcode = (0, 1))
self.assertEqual(rc, 1)
def test_timeout(self):
from plumbum.cmd import sleep
self.assertRaises(ProcessTimedOut, sleep, 10, timeout = 5)
+ @skip_on_windows
+ def test_fair_error_attribution(self):
+ # use LocalCommand directly for predictable argv
+ false = LocalCommand('false')
+ true = LocalCommand('true')
+ try:
+ (false | true) & FG
+ except ProcessExecutionError as e:
+ self.assertEqual(e.argv, ['false'])
+ else:
+ self.fail("Expected a ProcessExecutionError")
+
+
@skip_on_windows
def test_iter_lines_timeout(self):
from plumbum.cmd import ping
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 7
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
bcrypt==4.0.1
certifi==2021.5.30
cffi==1.15.1
cryptography==40.0.2
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
paramiko==3.5.1
pluggy==1.0.0
-e git+https://github.com/tomerfiliba/plumbum.git@0db72571329ef8f8167b93c7cd9d737dfb37ebbf#egg=plumbum
py==1.11.0
pycparser==2.21
PyNaCl==1.5.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: plumbum
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- bcrypt==4.0.1
- cffi==1.15.1
- cryptography==40.0.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- paramiko==3.5.1
- pluggy==1.0.0
- py==1.11.0
- pycparser==2.21
- pynacl==1.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/plumbum
| [
"tests/test_local.py::LocalMachineTest::test_fair_error_attribution"
]
| [
"tests/test_local.py::LocalMachineTest::test_arg_expansion",
"tests/test_local.py::LocalMachineTest::test_as_user",
"tests/test_local.py::LocalMachineTest::test_atomic_counter",
"tests/test_local.py::LocalMachineTest::test_atomic_file2",
"tests/test_local.py::LocalMachineTest::test_bound_env",
"tests/test_local.py::LocalMachineTest::test_cwd",
"tests/test_local.py::LocalMachineTest::test_env",
"tests/test_local.py::LocalMachineTest::test_imports",
"tests/test_local.py::LocalMachineTest::test_iter_lines_error",
"tests/test_local.py::LocalMachineTest::test_iter_lines_timeout",
"tests/test_local.py::LocalMachineTest::test_list_processes",
"tests/test_local.py::LocalMachineTest::test_local",
"tests/test_local.py::LocalMachineTest::test_mixing_chdir",
"tests/test_local.py::LocalMachineTest::test_modifiers",
"tests/test_local.py::LocalMachineTest::test_path",
"tests/test_local.py::LocalMachineTest::test_pgrep",
"tests/test_local.py::LocalMachineTest::test_pid_file",
"tests/test_local.py::LocalMachineTest::test_pipeline_failure",
"tests/test_local.py::LocalMachineTest::test_piping",
"tests/test_local.py::LocalMachineTest::test_popen",
"tests/test_local.py::LocalMachineTest::test_quoting",
"tests/test_local.py::LocalMachineTest::test_redirection",
"tests/test_local.py::LocalMachineTest::test_run",
"tests/test_local.py::LocalMachineTest::test_session",
"tests/test_local.py::LocalMachineTest::test_timeout"
]
| [
"tests/test_local.py::LocalPathTest::test_chown",
"tests/test_local.py::LocalPathTest::test_compare_pathlib",
"tests/test_local.py::LocalPathTest::test_dirname",
"tests/test_local.py::LocalPathTest::test_name",
"tests/test_local.py::LocalPathTest::test_newname",
"tests/test_local.py::LocalPathTest::test_parts",
"tests/test_local.py::LocalPathTest::test_read_write",
"tests/test_local.py::LocalPathTest::test_relative_to",
"tests/test_local.py::LocalPathTest::test_root_drive",
"tests/test_local.py::LocalPathTest::test_split",
"tests/test_local.py::LocalPathTest::test_stem",
"tests/test_local.py::LocalPathTest::test_suffix",
"tests/test_local.py::LocalPathTest::test_suffix_expected",
"tests/test_local.py::LocalPathTest::test_uri",
"tests/test_local.py::LocalMachineTest::test_atomic_counter2",
"tests/test_local.py::LocalMachineTest::test_atomic_file",
"tests/test_local.py::LocalMachineTest::test_contains",
"tests/test_local.py::LocalMachineTest::test_direct_open_tmpdir",
"tests/test_local.py::LocalMachineTest::test_get",
"tests/test_local.py::LocalMachineTest::test_getattr",
"tests/test_local.py::LocalMachineTest::test_issue_139",
"tests/test_local.py::LocalMachineTest::test_links",
"tests/test_local.py::LocalMachineTest::test_local_daemon",
"tests/test_local.py::LocalMachineTest::test_nesting_lists_as_argv",
"tests/test_local.py::LocalMachineTest::test_read_write",
"tests/test_local.py::LocalMachineTest::test_shadowed_by_dir",
"tests/test_local.py::LocalMachineTest::test_tempdir"
]
| []
| MIT License | 295 | [
"plumbum/machines/local.py",
"plumbum/commands/processes.py",
"plumbum/machines/session.py",
"plumbum/machines/base.py",
"plumbum/cli/progress.py",
"plumbum/commands/base.py",
"plumbum/machines/paramiko_machine.py"
]
| [
"plumbum/machines/local.py",
"plumbum/commands/processes.py",
"plumbum/machines/session.py",
"plumbum/machines/base.py",
"plumbum/cli/progress.py",
"plumbum/commands/base.py",
"plumbum/machines/paramiko_machine.py"
]
|
|
refnx__refnx-15 | 828e06645c2d04138505c4b22b1348e4a92ffa82 | 2015-11-12 03:41:18 | 568a56132fe0cd8418cff41ffedfc276bdb99af4 | diff --git a/refnx/dataset/data1d.py b/refnx/dataset/data1d.py
index 11737c90..cc231aca 100644
--- a/refnx/dataset/data1d.py
+++ b/refnx/dataset/data1d.py
@@ -1,5 +1,5 @@
""""
- A basic representation of a 1D dataset
+A basic representation of a 1D dataset
"""
from __future__ import division
@@ -12,25 +12,24 @@ import refnx.util.nsplice as nsplice
class Data1D(object):
"""
A basic representation of a 1D dataset.
- """
+ Parameters
+ ----------
+ data_tuple : tuple of np.ndarray, optional
+ Tuple containing the data. The tuple should have between 2 and 4
+ members.
+ data_tuple[0] - x
+ data_tuple[1] - y
+ data_tuple[2] - standard deviation of y, y_sd
+ data_tuple[3] - standard deviation of x, x_sd
+
+ `data_tuple` must be at least two long, `x` and `y`.
+ If the tuple is at least 3 long then the third member is `y_sd`.
+ If the tuple is 4 long then the fourth member is `x_sd`.
+ All arrays must have the same shape.
+ """
def __init__(self, data_tuple=None, curvefitter=None):
- """
- Parameters
- ----------
- dataTuple : tuple of np.ndarray, optional
- Tuple containing the data. The tuple should have between 2 and 4
- members.
- data_tuple[0] - x
- data_tuple[1] - y
- data_tuple[2] - standard deviation of y, y_sd
- data_tuple[3] - standard deviation of x, x_sd
-
- `data_tuple` must be at least two long, `x` and `y`.
- If the tuple is at least 3 long then the third member is `y_sd`.
- If the tuple is 4 long then the fourth member is `x_sd`.
- All arrays must have the same shape.
- """
+
self.filename = None
self.fit = None
self.params = None
diff --git a/refnx/dataset/reflectdataset.py b/refnx/dataset/reflectdataset.py
index e6b4fad9..a18958d2 100644
--- a/refnx/dataset/reflectdataset.py
+++ b/refnx/dataset/reflectdataset.py
@@ -52,8 +52,9 @@ class ReflectDataset(Data1D):
Parameters
----------
- f : file-like
- The file to save the data to.
+ f : str or file-like
+ The file to write the spectrum to, or a str that specifies the file
+ name
"""
s = string.Template(self._template_ref_xml)
self.time = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
@@ -66,7 +67,17 @@ class ReflectDataset(Data1D):
self._xdataSD = repr(self.x_sd.tolist()).strip(',[]')
thefile = s.safe_substitute(self.__dict__)
- f.write(thefile)
+
+ auto_fh = None
+ g = f
+ if not hasattr(f, 'write'):
+ auto_fh = open(f, 'wb')
+ g = auto_fh
+
+ g.write(thefile.encode('utf-8'))
+
+ if auto_fh is not None:
+ auto_fh.close()
def load(self, f):
"""
@@ -75,13 +86,14 @@ class ReflectDataset(Data1D):
Parameters
----------
f : str or file-like
- File to load reflectivity data from.
+ The file to load the spectrum from, or a str that specifies the file
+ name
"""
- own_fh = None
+ auto_fh = None
g = f
if not hasattr(f, 'read'):
- own_fh = open(f, 'rb')
- g = own_fh
+ auto_fh = open(f, 'rb')
+ g = auto_fh
try:
tree = ET.ElementTree()
tree.parse(g)
@@ -103,5 +115,5 @@ class ReflectDataset(Data1D):
g.seek(0)
super(ReflectDataset, self).load(g)
finally:
- if own_fh is not None:
- own_fh.close()
\ No newline at end of file
+ if auto_fh is not None:
+ auto_fh.close()
diff --git a/refnx/reduce/event.py b/refnx/reduce/event.py
index 038341dc..1718255d 100644
--- a/refnx/reduce/event.py
+++ b/refnx/reduce/event.py
@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
"""
-unpack streaming file
-@author: andrew
+Unpack event files
"""
import numpy as np
@@ -70,7 +69,7 @@ def process_event_stream(events, frame_bins, t_bins, y_bins, x_bins):
return detector, localframe_bins
-def events(f, endoflastevent=127, max_frames=np.inf):
+def events(f, end_last_event=127, max_frames=np.inf):
"""
Unpacks event data from packedbinary format for the ANSTO Platypus
instrument
@@ -78,27 +77,29 @@ def events(f, endoflastevent=127, max_frames=np.inf):
Parameters
----------
- f : file
- The file to read the data from.
- endoflastevent : uint
- The file position to start the read from. The data starts from byte
- 127.
+ f : file-like or str
+ The file to read the data from. If `f` is not file-like then f is
+ assumed to be a path pointing to the event file.
+ end_last_event : uint
+ The reading of event data starts from `end_last_event + 1`. The default
+ of 127 corresponds to a file header that is 128 bytes long.
max_frames : int
- Stop reading the event file when you get to this many frames.
+ Stop reading the event file when have read this many frames.
Returns
-------
- (f_events, t_events, y_events, x_events), endoflastevent:
+ (f_events, t_events, y_events, x_events), end_last_event:
x_events, y_events, t_events and f_events are numpy arrays containing
- the events. endoflastevent is a byte offset to the end of the last
+ the events. end_last_event is a byte offset to the end of the last
successful event read from the file. Use this value to extract more
events from the same file at a future date.
"""
- if not f:
- return None
+ fi = f
+ auto_f = None
+ if not hasattr(fi, 'read'):
+ auto_f = open(f, 'rb')
+ fi = auto_f
- state = 0
- event_ended = 0
frame_number = -1
dt = 0
t = 0
@@ -110,7 +111,7 @@ def events(f, endoflastevent=127, max_frames=np.inf):
t_events = np.array((), dtype='uint32')
f_events = np.array((), dtype='int32')
- BUFSIZE = 32768
+ bufsize = 32768
while True and frame_number < max_frames:
x_neutrons = []
@@ -118,10 +119,10 @@ def events(f, endoflastevent=127, max_frames=np.inf):
t_neutrons = []
f_neutrons = []
- f.seek(endoflastevent + 1)
- buf = f.read(BUFSIZE)
+ fi.seek(end_last_event + 1)
+ buf = fi.read(bufsize)
- filepos = endoflastevent + 1
+ filepos = end_last_event + 1
if not len(buf):
break
@@ -142,7 +143,7 @@ def events(f, endoflastevent=127, max_frames=np.inf):
state += 1
else:
if state == 2:
- y = y | ((c & 0xF) * 64)
+ y |= (c & 0xF) * 64
if y & 0x200:
y = -(0x100000000 - (y | 0xFFFFFC00))
@@ -153,14 +154,14 @@ def events(f, endoflastevent=127, max_frames=np.inf):
if state == 2:
dt = c >> 4
else:
- dt |= (c) << (2 + 6 * (state - 3))
+ dt |= c << 2 + 6 * (state - 3)
if not event_ended:
state += 1
else:
- #print "got to state", state, event_ended, x, y, frame_number, t, dt
+ # print "got to state", state, event_ended, x, y, frame_number, t, dt
state = 0
- endoflastevent = filepos + i
+ end_last_event = filepos + i
if x == 0 and y == 0 and dt == 0xFFFFFFFF:
t = 0
frame_number += 1
@@ -181,5 +182,9 @@ def events(f, endoflastevent=127, max_frames=np.inf):
t_events = np.append(t_events, t_neutrons)
f_events = np.append(f_events, f_neutrons)
- t_events = t_events // 1000
- return (f_events, t_events, y_events, x_events), endoflastevent
\ No newline at end of file
+ t_events //= 1000
+
+ if auto_f:
+ auto_f.close()
+
+ return (f_events, t_events, y_events, x_events), end_last_event
diff --git a/refnx/reduce/platypusnexus.py b/refnx/reduce/platypusnexus.py
index 51026ba3..e85c6cce 100644
--- a/refnx/reduce/platypusnexus.py
+++ b/refnx/reduce/platypusnexus.py
@@ -886,20 +886,21 @@ class PlatypusNexus(object):
Parameters
----------
- f : file-like object
- The file to write the spectrum to
+ f : file-like or str
+ The file to write the spectrum to, or a str that specifies the file
+ name
scanpoint : int
- Which scanpoint to write.
+ Which scanpoint to write
"""
if self.processed_spectrum is None:
return False
m_lambda = self.processed_spectrum['m_lambda'][scanpoint]
m_spec = self.processed_spectrum['m_spec'][scanpoint]
- m_spec_sd = self.processed_spectrum['m_spec'][scanpoint]
- m_lambda_sd = self.processed_spectrum['m_lambda_sd'][scanpoint]
+ m_spec_sd = self.processed_spectrum['m_spec_sd'][scanpoint]
+ m_lambda_fwhm = self.processed_spectrum['m_lambda_fwhm'][scanpoint]
- stacked_data = np.c_[m_lambda, m_spec, m_spec_sd, m_lambda_sd]
+ stacked_data = np.c_[m_lambda, m_spec, m_spec_sd, m_lambda_fwhm]
np.savetxt(f, stacked_data, delimiter='\t')
return True
@@ -911,12 +912,12 @@ class PlatypusNexus(object):
Parameters
----------
- f : file-like object
- The file to write the spectrum to
+ f : file-like or str
+ The file to write the spectrum to, or a str that specifies the file
+ name
scanpoint : int
- Which scanpoint to write.
+ Which scanpoint to write
"""
-
spectrum_template = """<?xml version="1.0"?>
<REFroot xmlns="">
<REFentry time="$time">
@@ -930,7 +931,6 @@ class PlatypusNexus(object):
</REFdata>
</REFentry>
</REFroot>"""
-
if self.processed_spectrum is None:
return
@@ -941,26 +941,37 @@ class PlatypusNexus(object):
m_lambda = self.processed_spectrum['m_lambda']
m_spec = self.processed_spectrum['m_spec']
- m_spec_sd = self.processed_spectrum['m_spec']
- m_lambda_sd = self.processed_spectrum['m_lambda_sd']
+ m_spec_sd = self.processed_spectrum['m_spec_sd']
+ m_lambda_fwhm = self.processed_spectrum['m_lambda_fwhm']
# sort the data
sorted = np.argsort(self.m_lambda[0])
r = m_spec[:, sorted]
l = m_lambda[:, sorted]
- dl = m_lambda_sd[:, sorted]
+ dl = m_lambda_fwhm [:, sorted]
dr = m_spec_sd[:, sorted]
d['n_spectra'] = self.processed_spectrum['n_spectra']
d['runnumber'] = 'PLP{:07d}'.format(self.cat.datafile_number)
- d['r'] = string.translate(repr(r[scanpoint].tolist()), None, ',[]')
- d['dr'] = string.translate(repr(dr[scanpoint].tolist()), None, ',[]')
- d['l'] = string.translate(repr(l[scanpoint].tolist()), None, ',[]')
- d['dl'] = string.translate(repr(dl[scanpoint].tolist()), None, ',[]')
+ d['r'] = repr(r[scanpoint].tolist()).strip(',[]')
+ d['dr'] = repr(dr[scanpoint].tolist()).strip(',[]')
+ d['l'] = repr(l[scanpoint].tolist()).strip(',[]')
+ d['dl'] = repr(dl[scanpoint].tolist()).strip(',[]')
thefile = s.safe_substitute(d)
- f.write(thefile)
- f.truncate()
+
+ g = f
+ auto_fh = None
+
+ if not hasattr(f, 'write'):
+ auto_fh = open(f, 'wb')
+ g = auto_fh
+
+ g.write(thefile.encode('utf-8'))
+ g.truncate()
+
+ if auto_fh is not None:
+ auto_fh.close()
return True
@@ -968,8 +979,8 @@ class PlatypusNexus(object):
def spectrum(self):
return (self.processed_spectrum['m_lambda'],
self.processed_spectrum['m_spec'],
- self.processed_spectrum['m_spec'],
- self.processed_spectrum['m_lambda_sd'])
+ self.processed_spectrum['m_spec_sd'],
+ self.processed_spectrum['m_lambda_fwhm'])
def create_detector_norm(h5norm, x_min, x_max):
diff --git a/refnx/reduce/reduce.py b/refnx/reduce/reduce.py
index 3f9b7f21..b487a5a5 100644
--- a/refnx/reduce/reduce.py
+++ b/refnx/reduce/reduce.py
@@ -402,7 +402,7 @@ class ReducePlatypus(object):
with open(fname, 'wb') as f:
dataset.save(f)
fname = 'PLP{0:07d}_{1}.xml'.format(self.datafile_number, i)
- with open(fname, 'w') as f:
+ with open(fname, 'wb') as f:
dataset.save_xml(f)
reduction['fname'] = fnames
@@ -481,7 +481,7 @@ def reduce_stitch(reflect_list, direct_list, norm_file_num=None,
with open(fname, 'wb') as f:
combined_dataset.save(f)
fname = 'c_PLP{0:07d}.xml'.format(reflect_list[0])
- with open(fname, 'w') as f:
+ with open(fname, 'wb') as f:
combined_dataset.save_xml(f)
return combined_dataset, fname
diff --git a/refnx/util/general.py b/refnx/util/general.py
index 122e55fc..66fd567a 100644
--- a/refnx/util/general.py
+++ b/refnx/util/general.py
@@ -1,4 +1,7 @@
#!/usr/bin/python
+"""
+Functions for various calculations related to reflectometry
+"""
from __future__ import division
import numpy as np
| PlatypusNexus.write_spectrum_xml doesn't work.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-7-1d0132abf492> in <module>()
----> 1 f.write_spectrum_xml('PLP0024619.spec')
C:\Miniconda3\envs\dev3\lib\site-packages\refnx\reduce\platypusnexus.py in write_spectrum_xml(self, f, scanpoint)
953 d['runnumber'] = 'PLP{:07d}'.format(self.cat.datafile_number)
954
--> 955 d['r'] = string.translate(repr(r[scanpoint].tolist()), None, ',[]')
956 d['dr'] = string.translate(repr(dr[scanpoint].tolist()), None, ',[]')
957 d['l'] = string.translate(repr(l[scanpoint].tolist()), None, ',[]')
AttributeError: 'module' object has no attribute 'translate | refnx/refnx | diff --git a/refnx/dataset/test/test_reflectdataset.py b/refnx/dataset/test/test_reflectdataset.py
index 2bfbab6e..36cfa16f 100644
--- a/refnx/dataset/test/test_reflectdataset.py
+++ b/refnx/dataset/test/test_reflectdataset.py
@@ -10,8 +10,15 @@ path = os.path.dirname(os.path.abspath(__file__))
class TestReflectDataset(unittest.TestCase):
def setUp(self):
- pass
-
+ data = ReflectDataset()
+
+ x1 = np.linspace(0, 10, 5)
+ y1 = 2 * x1
+ e1 = np.ones_like(x1)
+ dx1 = np.ones_like(x1)
+ data.add_data((x1, y1, e1, dx1))
+ self.data = data
+
def test_load(self):
# test reflectivity calculation with values generated from Motofit
dataset = ReflectDataset()
@@ -77,6 +84,10 @@ class TestReflectDataset(unittest.TestCase):
assert_(data.npoints==13)
+ def test_save_xml(self):
+ self.data.save_xml('test.xml')
+ with open('test.xml', 'wb') as f:
+ self.data.save_xml(f)
if __name__ == '__main__':
unittest.main()
\ No newline at end of file
diff --git a/refnx/reduce/test/test_event.py b/refnx/reduce/test/test_event.py
index 072b0815..25257484 100644
--- a/refnx/reduce/test/test_event.py
+++ b/refnx/reduce/test/test_event.py
@@ -32,6 +32,13 @@ class TestEvent(unittest.TestCase):
max_f = np.max(f)
assert_equal(9, max_f)
+ def test_open_with_path(self):
+ # give the event reader a file path
+ event_list, fpos = event.events(self.event_file_path, max_frames=10)
+ f, t, y, x = event_list
+ max_f = np.max(f)
+ assert_equal(9, max_f)
+
def test_values(self):
# We know the values of all the events in the file from another program
# test that a set of random events are correct.
diff --git a/refnx/reduce/test/test_platypusnexus.py b/refnx/reduce/test/test_platypusnexus.py
index 706121ae..62bc03ab 100644
--- a/refnx/reduce/test/test_platypusnexus.py
+++ b/refnx/reduce/test/test_platypusnexus.py
@@ -144,6 +144,22 @@ class TestPlatypusNexus(unittest.TestCase):
assert_array_less(res, np.ones_like(res) * 0.08)
assert_array_less(np.ones_like(res) * 0.07, res)
+ def test_save_spectrum(self):
+ # test saving spectrum
+ self.f113.process()
+
+ # can save the spectra by supplying a filename
+ self.f113.write_spectrum_xml('test.xml')
+ self.f113.write_spectrum_dat('test.dat')
+
+ # can save by supplying file handle:
+ with open('test.xml', 'wb') as f:
+ self.f113.write_spectrum_xml(f)
+
+ # can save by supplying file handle:
+ with open('test.dat', 'wb') as f:
+ self.f113.write_spectrum_xml(f)
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 6
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "numpy>=1.16.0 scipy>=1.0.0 emcee>=2.2.1 six>=1.11.0 uncertainties>=3.0.1 pandas>=0.23.4 pytest>=3.6.0 h5py>=2.8.0 xlrd>=1.1.0 ptemcee>=1.0.0",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asteval==0.9.26
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
emcee @ file:///home/conda/feedstock_root/build_artifacts/emcee_1713796893786/work
future==0.18.2
h5py @ file:///tmp/build/80754af9/h5py_1593454121459/work
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lmfit==1.0.3
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
numpy @ file:///tmp/build/80754af9/numpy_and_numpy_base_1603483703303/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
ptemcee==1.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
pytz==2021.3
-e git+https://github.com/refnx/refnx.git@828e06645c2d04138505c4b22b1348e4a92ffa82#egg=refnx
scipy @ file:///tmp/build/80754af9/scipy_1597686635649/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
uncertainties @ file:///home/conda/feedstock_root/build_artifacts/uncertainties_1720452225073/work
xlrd @ file:///croot/xlrd_1685030938141/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: refnx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- blas=1.0=openblas
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- emcee=3.1.6=pyhd8ed1ab_0
- future=0.18.2=py36_1
- h5py=2.10.0=py36hd6299e0_1
- hdf5=1.10.6=hb1b8bf9_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=7.5.0=ha8ba4b0_17
- libgfortran4=7.5.0=ha8ba4b0_17
- libgomp=11.2.0=h1234567_1
- libopenblas=0.3.18=hf726d26_0
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- numpy=1.19.2=py36h6163131_0
- numpy-base=1.19.2=py36h75fe3a5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pandas=1.1.5=py36ha9443f7_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- ptemcee=1.0.0=py_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- python-dateutil=2.8.2=pyhd3eb1b0_0
- pytz=2021.3=pyhd3eb1b0_0
- readline=8.2=h5eee18b_0
- scipy=1.5.2=py36habc2bb6_0
- setuptools=58.0.4=py36h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- uncertainties=3.2.2=pyhd8ed1ab_1
- wheel=0.37.1=pyhd3eb1b0_0
- xlrd=2.0.1=pyhd3eb1b0_1
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- asteval==0.9.26
- lmfit==1.0.3
prefix: /opt/conda/envs/refnx
| [
"refnx/dataset/test/test_reflectdataset.py::TestReflectDataset::test_save_xml",
"refnx/reduce/test/test_event.py::TestEvent::test_open_with_path"
]
| [
"refnx/reduce/test/test_platypusnexus.py::TestPlatypusNexus::test_calculate_bins",
"refnx/reduce/test/test_platypusnexus.py::TestPlatypusNexus::test_event",
"refnx/reduce/test/test_platypusnexus.py::TestPlatypusNexus::test_reduction_runs",
"refnx/reduce/test/test_platypusnexus.py::TestPlatypusNexus::test_save_spectrum"
]
| [
"refnx/dataset/test/test_reflectdataset.py::TestReflectDataset::test_add_data",
"refnx/dataset/test/test_reflectdataset.py::TestReflectDataset::test_load",
"refnx/reduce/test/test_event.py::TestEvent::test_max_frames",
"refnx/reduce/test/test_event.py::TestEvent::test_num_events",
"refnx/reduce/test/test_event.py::TestEvent::test_process_event_stream",
"refnx/reduce/test/test_event.py::TestEvent::test_values",
"refnx/reduce/test/test_platypusnexus.py::TestPlatypusNexus::test_background_subtract",
"refnx/reduce/test/test_platypusnexus.py::TestPlatypusNexus::test_background_subtract_line",
"refnx/reduce/test/test_platypusnexus.py::TestPlatypusNexus::test_chod",
"refnx/reduce/test/test_platypusnexus.py::TestPlatypusNexus::test_find_specular_ridge",
"refnx/reduce/test/test_platypusnexus.py::TestPlatypusNexus::test_multiple_acquisitions",
"refnx/reduce/test/test_platypusnexus.py::TestPlatypusNexus::test_phase_angle"
]
| []
| BSD 3-Clause "New" or "Revised" License | 296 | [
"refnx/reduce/event.py",
"refnx/reduce/reduce.py",
"refnx/dataset/reflectdataset.py",
"refnx/dataset/data1d.py",
"refnx/reduce/platypusnexus.py",
"refnx/util/general.py"
]
| [
"refnx/reduce/event.py",
"refnx/reduce/reduce.py",
"refnx/dataset/reflectdataset.py",
"refnx/dataset/data1d.py",
"refnx/reduce/platypusnexus.py",
"refnx/util/general.py"
]
|
|
falconry__falcon-651 | a8154de497b3ec5d6e5579026e74e9073e353819 | 2015-11-13 11:56:13 | b78ffaac7c412d3b3d6cd3c70dd05024d79d2cce | kgriffs: I think this makes sense. It is a breaking change, but one that should be easy for developers to accommodate. A couple testing suggestions inline. | diff --git a/falcon/api.py b/falcon/api.py
index cdc2c66..1a6b944 100644
--- a/falcon/api.py
+++ b/falcon/api.py
@@ -178,7 +178,8 @@ class API(object):
# e.g. a 404.
responder, params, resource = self._get_responder(req)
- self._call_rsrc_mw(middleware_stack, req, resp, resource)
+ self._call_rsrc_mw(middleware_stack, req, resp, resource,
+ params)
responder(req, resp, **params)
self._call_resp_mw(middleware_stack, req, resp, resource)
@@ -537,13 +538,13 @@ class API(object):
# Put executed component on the stack
stack.append(component) # keep track from outside
- def _call_rsrc_mw(self, stack, req, resp, resource):
+ def _call_rsrc_mw(self, stack, req, resp, resource, params):
"""Run process_resource middleware methods."""
for component in self._middleware:
_, process_resource, _ = component
if process_resource is not None:
- process_resource(req, resp, resource)
+ process_resource(req, resp, resource, params)
def _call_resp_mw(self, stack, req, resp, resource):
"""Run process_response middleware."""
| Pass params to process_resource
This is needed for feature parity with global *before* hooks. We can use a shim to avoid breaking existing middleware. | falconry/falcon | diff --git a/tests/test_httpstatus.py b/tests/test_httpstatus.py
index 3569e6b..f525be1 100644
--- a/tests/test_httpstatus.py
+++ b/tests/test_httpstatus.py
@@ -166,7 +166,7 @@ class TestHTTPStatusWithGlobalHooks(testing.TestBase):
def test_raise_status_in_process_resource(self):
""" Make sure we can raise status from middleware process resource """
class TestMiddleware:
- def process_resource(self, req, resp, resource):
+ def process_resource(self, req, resp, resource, params):
raise HTTPStatus(falcon.HTTP_200,
headers={"X-Failed": "False"},
body="Pass")
diff --git a/tests/test_middlewares.py b/tests/test_middlewares.py
index 226e57f..519852a 100644
--- a/tests/test_middlewares.py
+++ b/tests/test_middlewares.py
@@ -1,3 +1,5 @@
+import json
+
import falcon
import falcon.testing as testing
from datetime import datetime
@@ -11,7 +13,7 @@ class RequestTimeMiddleware(object):
global context
context['start_time'] = datetime.utcnow()
- def process_resource(self, req, resp, resource):
+ def process_resource(self, req, resp, resource, params):
global context
context['mid_time'] = datetime.utcnow()
@@ -34,7 +36,7 @@ class ExecutedFirstMiddleware(object):
context['executed_methods'].append(
'{0}.{1}'.format(self.__class__.__name__, 'process_request'))
- def process_resource(self, req, resp, resource):
+ def process_resource(self, req, resp, resource, params):
global context
context['executed_methods'].append(
'{0}.{1}'.format(self.__class__.__name__, 'process_resource'))
@@ -55,9 +57,17 @@ class RemoveBasePathMiddleware(object):
req.path = req.path.replace('/base_path', '', 1)
+class AccessParamsMiddleware(object):
+
+ def process_resource(self, req, resp, resource, params):
+ global context
+ params['added'] = True
+ context['params'] = params
+
+
class MiddlewareClassResource(object):
- def on_get(self, req, resp):
+ def on_get(self, req, resp, **kwargs):
resp.status = falcon.HTTP_200
resp.body = {'status': 'ok'}
@@ -368,3 +378,23 @@ class TestRemoveBasePathMiddleware(TestMiddleware):
self.assertEqual(self.srmock.status, falcon.HTTP_200)
self.simulate_request('/base_pathIncorrect/sub_path')
self.assertEqual(self.srmock.status, falcon.HTTP_404)
+
+
+class TestResourceMiddleware(TestMiddleware):
+
+ def test_can_access_resource_params(self):
+ """Test that params can be accessed from within process_resource"""
+ global context
+
+ class Resource:
+ def on_get(self, req, resp, **params):
+ resp.data = json.dumps(params)
+
+ self.api = falcon.API(middleware=AccessParamsMiddleware())
+ self.api.add_route('/path/{id}', Resource())
+ resp = self.simulate_request('/path/22')
+
+ self.assertIn('params', context)
+ self.assertTrue(context['params'])
+ self.assertEqual(context['params']['id'], '22')
+ self.assertEqual(json.loads(resp[0]), {"added": True, "id": "22"})
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"coverage",
"ddt",
"pyyaml",
"requests",
"testtools",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///croot/attrs_1668696182826/work
certifi @ file:///croot/certifi_1671487769961/work/certifi
charset-normalizer==3.4.1
coverage==7.2.7
ddt==1.7.2
-e git+https://github.com/falconry/falcon.git@a8154de497b3ec5d6e5579026e74e9073e353819#egg=falcon
flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core
idna==3.10
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
nose==1.3.7
packaging @ file:///croot/packaging_1671697413597/work
pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pytest==7.1.2
python-mimeparse==1.6.0
PyYAML==6.0.1
requests==2.31.0
six==1.17.0
testtools==2.7.1
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions @ file:///croot/typing_extensions_1669924550328/work
urllib3==2.0.7
zipp @ file:///croot/zipp_1672387121353/work
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=22.1.0=py37h06a4308_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- flit-core=3.6.0=pyhd3eb1b0_0
- importlib-metadata=4.11.3=py37h06a4308_0
- importlib_metadata=4.11.3=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=22.0=py37h06a4308_0
- pip=22.3.1=py37h06a4308_0
- pluggy=1.0.0=py37h06a4308_1
- py=1.11.0=pyhd3eb1b0_0
- pytest=7.1.2=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py37h06a4308_0
- typing_extensions=4.4.0=py37h06a4308_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zipp=3.11.0=py37h06a4308_0
- zlib=1.2.13=h5eee18b_1
- pip:
- charset-normalizer==3.4.1
- coverage==7.2.7
- ddt==1.7.2
- idna==3.10
- nose==1.3.7
- python-mimeparse==1.6.0
- pyyaml==6.0.1
- requests==2.31.0
- six==1.17.0
- testtools==2.7.1
- urllib3==2.0.7
prefix: /opt/conda/envs/falcon
| [
"tests/test_httpstatus.py::TestHTTPStatusWithGlobalHooks::test_raise_status_in_process_resource",
"tests/test_middlewares.py::TestRequestTimeMiddleware::test_log_get_request",
"tests/test_middlewares.py::TestSeveralMiddlewares::test_generate_trans_id_and_time_with_request",
"tests/test_middlewares.py::TestSeveralMiddlewares::test_middleware_execution_order",
"tests/test_middlewares.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_resp",
"tests/test_middlewares.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_rsrc",
"tests/test_middlewares.py::TestResourceMiddleware::test_can_access_resource_params"
]
| []
| [
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_empty_body",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_in_before_hook",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_in_responder",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_runs_after_hooks",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_survives_after_hooks",
"tests/test_httpstatus.py::TestHTTPStatusWithGlobalHooks::test_raise_status_in_before_hook",
"tests/test_httpstatus.py::TestHTTPStatusWithGlobalHooks::test_raise_status_in_process_request",
"tests/test_httpstatus.py::TestHTTPStatusWithGlobalHooks::test_raise_status_runs_after_hooks",
"tests/test_httpstatus.py::TestHTTPStatusWithGlobalHooks::test_raise_status_runs_process_response",
"tests/test_httpstatus.py::TestHTTPStatusWithGlobalHooks::test_raise_status_survives_after_hooks",
"tests/test_middlewares.py::TestRequestTimeMiddleware::test_add_invalid_middleware",
"tests/test_middlewares.py::TestRequestTimeMiddleware::test_response_middleware_raises_exception",
"tests/test_middlewares.py::TestTransactionIdMiddleware::test_generate_trans_id_with_request",
"tests/test_middlewares.py::TestSeveralMiddlewares::test_inner_mw_throw_exception",
"tests/test_middlewares.py::TestSeveralMiddlewares::test_inner_mw_with_ex_handler_throw_exception",
"tests/test_middlewares.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_req",
"tests/test_middlewares.py::TestSeveralMiddlewares::test_outer_mw_with_ex_handler_throw_exception",
"tests/test_middlewares.py::TestRemoveBasePathMiddleware::test_base_path_is_removed_before_routing"
]
| []
| Apache License 2.0 | 298 | [
"falcon/api.py"
]
| [
"falcon/api.py"
]
|
Stranger6667__pyanyapi-33 | 433dc8263c6c346d772120ad6f402714eb95c72a | 2015-11-15 11:30:16 | aebee636ad26f387850a6c8ab820ce4aac3f9adb | codecov-io: ## [Current coverage][1] is `100.00%`
> Merging **#33** into **master** will not affect coverage as of [`13e366f`][3]
```diff
@@ master #33 diff @@
======================================
Files 7 7
Stmts 348 351 +3
Branches 36 38 +2
Methods 0 0
======================================
+ Hit 348 351 +3
Partial 0 0
Missed 0 0
```
> Review entire [Coverage Diff][4] as of [`13e366f`][3]
[1]: https://codecov.io/github/Stranger6667/pyanyapi?ref=13e366f7c2915fc8f7f7e7c1396ac9772f6942e8
[2]: https://codecov.io/github/Stranger6667/pyanyapi/features/suggestions?ref=13e366f7c2915fc8f7f7e7c1396ac9772f6942e8
[3]: https://codecov.io/github/Stranger6667/pyanyapi/commit/13e366f7c2915fc8f7f7e7c1396ac9772f6942e8
[4]: https://codecov.io/github/Stranger6667/pyanyapi/compare/b1f0bc477bb850dc39c4646ff359e59d764a9f77...13e366f7c2915fc8f7f7e7c1396ac9772f6942e8
> Powered by [Codecov](https://codecov.io). Updated on successful CI builds. | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 016a9b7..ecfdb61 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,7 @@ History
----------------
* Fixed `lxml` installation on PyPy (#34).
+* Add support for subparsers (#32).
0.5.3 - 30.10.2015
----------------
diff --git a/README.rst b/README.rst
index ad508c4..7600418 100644
--- a/README.rst
+++ b/README.rst
@@ -105,6 +105,36 @@ list. Here comes "base-children" setup style.
>>> api.test
['123 ', ' 234']
+There is another option to interact with sub-elements. Sub parsers!
+
+.. code:: python
+
+ from pyanyapi import HTMLParser
+
+
+ class SubParser(HTMLParser):
+ settings = {
+ 'href': 'string(//@href)',
+ 'text': 'string(//text())'
+ }
+
+
+ class Parser(HTMLParser):
+ settings = {
+ 'elem': {
+ 'base': './/a',
+ 'parser': SubParser
+ }
+ }
+
+ >>> api = Parser().parse('<html><body><a href='#test'>test</body></html>')
+ >>> api.elem[0].href
+ #test
+ >>> api.elem[0].text
+ test
+
+Also you can pass sub parsers as classes or like instances.
+
Settings inheritance
~~~~~~~~~~~~~~~~~~~~
diff --git a/pyanyapi/interfaces.py b/pyanyapi/interfaces.py
index 121f684..53734a7 100644
--- a/pyanyapi/interfaces.py
+++ b/pyanyapi/interfaces.py
@@ -141,6 +141,12 @@ class XPathInterface(BaseInterface):
child_query = settings.get('children')
if child_query:
return [self.maybe_strip(''.join(element.xpath(child_query))) for element in result]
+ sub_parser = settings.get('parser')
+ if sub_parser:
+ return [
+ (sub_parser() if callable(sub_parser) else sub_parser).parse(etree.tostring(element))
+ for element in result
+ ]
return result
return self.parse(settings)
| Improve children syntax
As we discussed =D
Add supporting for childrens parse for every parent element
Example settings:
```python
class IMDBParser(HTMLParser):
settings = {
'films_blocks': {
'base': '//div[@class="media"]',
'children': {
'title': '/h4',
'link': '/span/a/@href'
}
}
}
```
Example usage:
```
parsed_elements.film_blocks[0].title
```
or something like dictionary with childs in Element object.
| Stranger6667/pyanyapi | diff --git a/tests/conftest.py b/tests/conftest.py
index 06f9542..d127796 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -4,7 +4,7 @@ import sys
import pytest
-from pyanyapi import JSONParser, RegExpParser, CombinedParser, interface_property, interface_method
+from pyanyapi import JSONParser, RegExpParser, CombinedParser, interface_property, interface_method, HTMLParser
class EmptyValuesParser(CombinedParser):
@@ -74,6 +74,13 @@ class ChildParser(ParentParser):
}
+class SubParser(HTMLParser):
+ settings = {
+ 'href': 'string(//@href)',
+ 'text': 'string(//text())'
+ }
+
+
class SimpleParser(RegExpParser):
settings = {
'test': '\d+.\d+',
diff --git a/tests/test_parsers.py b/tests/test_parsers.py
index e13a167..163b42c 100644
--- a/tests/test_parsers.py
+++ b/tests/test_parsers.py
@@ -4,12 +4,21 @@ import re
import pytest
from ._compat import patch
-from .conftest import ChildParser, SimpleParser, lxml_is_supported, lxml_is_not_supported
-from pyanyapi import XMLObjectifyParser, XMLParser, JSONParser, YAMLParser, RegExpParser, AJAXParser, CSVParser
+from .conftest import ChildParser, SubParser, SimpleParser, lxml_is_supported, lxml_is_not_supported
+from pyanyapi import (
+ XMLObjectifyParser,
+ XMLParser,
+ JSONParser,
+ YAMLParser,
+ RegExpParser,
+ AJAXParser,
+ CSVParser,
+ HTMLParser,
+)
from pyanyapi.exceptions import ResponseParseError
-HTML_CONTENT = "<html><body><a href='#test'></body></html>"
+HTML_CONTENT = "<html><body><a href='#test'>test</body></html>"
XML_CONTENT = '''<?xml version="1.0" encoding="UTF-8"?>
<response>
<id>32e9a4a2</id>
@@ -289,3 +298,21 @@ def test_csv_parser_error():
parsed = CSVParser({'test': '1:1'}).parse(123)
with pytest.raises(ResponseParseError):
parsed.test
+
+
+@lxml_is_supported
[email protected]('sub_parser', (SubParser, SubParser()))
+def test_children(sub_parser):
+
+ class Parser(HTMLParser):
+ settings = {
+ 'elem': {
+ 'base': './/a',
+ 'parser': sub_parser
+ }
+ }
+
+ api = Parser().parse(HTML_CONTENT)
+ sub_api = api.elem[0]
+ assert sub_api.href == '#test'
+ assert sub_api.text == 'test'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
lxml==5.3.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/Stranger6667/pyanyapi.git@433dc8263c6c346d772120ad6f402714eb95c72a#egg=pyanyapi
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
PyYAML==3.11
tomli==1.2.3
typing_extensions==4.1.1
ujson==4.3.0
zipp==3.6.0
| name: pyanyapi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- lxml==5.3.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pyyaml==3.11
- tomli==1.2.3
- typing-extensions==4.1.1
- ujson==4.3.0
- zipp==3.6.0
prefix: /opt/conda/envs/pyanyapi
| [
"tests/test_parsers.py::test_children[SubParser]",
"tests/test_parsers.py::test_children[sub_parser1]"
]
| []
| [
"tests/test_parsers.py::test_xml_objectify_parser",
"tests/test_parsers.py::test_xml_objectify_parser_error",
"tests/test_parsers.py::test_xml_parser_error",
"tests/test_parsers.py::test_yaml_parser_error",
"tests/test_parsers.py::test_xml_parsed[settings0]",
"tests/test_parsers.py::test_xml_parsed[settings1]",
"tests/test_parsers.py::test_xml_simple_settings",
"tests/test_parsers.py::test_json_parsed",
"tests/test_parsers.py::test_multiple_parser_join",
"tests/test_parsers.py::test_multiply_parsers_declaration",
"tests/test_parsers.py::test_empty_values[{\"container\":{\"test\":\"value\"}}-test-value]",
"tests/test_parsers.py::test_empty_values[{\"container\":{\"test\":\"value\"}}-second-None]",
"tests/test_parsers.py::test_empty_values[{\"container\":{\"fail\":[1]}}-second-None]",
"tests/test_parsers.py::test_empty_values[{\"container\":[[1],[],[3]]}-third-expected3]",
"tests/test_parsers.py::test_empty_values[{\"container\":null}-null-None]",
"tests/test_parsers.py::test_empty_values[{\"container\":[1,2]}-test-1,2]",
"tests/test_parsers.py::test_attributes",
"tests/test_parsers.py::test_efficient_parsing",
"tests/test_parsers.py::test_simple_config_xml_parser",
"tests/test_parsers.py::test_simple_config_json_parser",
"tests/test_parsers.py::test_settings_inheritance",
"tests/test_parsers.py::test_complex_config",
"tests/test_parsers.py::test_json_parse",
"tests/test_parsers.py::test_json_value_error_parse",
"tests/test_parsers.py::test_regexp_parse",
"tests/test_parsers.py::test_yaml_parse",
"tests/test_parsers.py::test_ajax_parser",
"tests/test_parsers.py::test_ajax_parser_cache",
"tests/test_parsers.py::test_ajax_parser_invalid_settings",
"tests/test_parsers.py::test_parse_memoization",
"tests/test_parsers.py::test_regexp_settings",
"tests/test_parsers.py::test_parse_all",
"tests/test_parsers.py::test_parse_all_combined_parser",
"tests/test_parsers.py::test_parse_csv",
"tests/test_parsers.py::test_parse_csv_custom_delimiter",
"tests/test_parsers.py::test_csv_parser_error"
]
| []
| MIT License | 299 | [
"README.rst",
"CHANGELOG.md",
"pyanyapi/interfaces.py"
]
| [
"README.rst",
"CHANGELOG.md",
"pyanyapi/interfaces.py"
]
|
docker__docker-py-854 | 9ebecb5991303d55fe208114a1de422650c4dcb2 | 2015-11-16 19:50:27 | 2f2d50d0c7be5882b150f6ff3bae31d469720e5b | GordonTheTurtle: Please sign your commits following these rules:
https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work
The easiest way to do this is to amend the last commit:
~~~console
$ git clone -b "840-add-exception-utility-method-for-create-host-config" [email protected]:lots0logs/docker-py.git somewhere
$ cd somewhere
$ git rebase -i HEAD~4
editor opens
change each 'pick' to 'edit'
save the file and quit
$ git commit --amend -s --no-edit
$ git rebase --continue # and repeat the amend for each commit
$ git push -f
~~~
Ammending updates the existing PR. You **DO NOT** need to open a new one.
lots0logs: @shin- @dnephin
aanand: LGTM
dnephin: LGTM, but I think it needs a rebase.
It might be that the gihub "branch protection" is preventing me from merging this because I'm not an owner on this repo.
GordonTheTurtle: Please sign your commits following these rules:
https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work
The easiest way to do this is to amend the last commit:
~~~console
$ git clone -b "840-add-exception-utility-method-for-create-host-config" [email protected]:lots0logs/docker-py.git somewhere
$ cd somewhere
$ git rebase -i HEAD~5
editor opens
change each 'pick' to 'edit'
save the file and quit
$ git commit --amend -s --no-edit
$ git rebase --continue # and repeat the amend for each commit
$ git push -f
~~~
Ammending updates the existing PR. You **DO NOT** need to open a new one.
GordonTheTurtle: Please sign your commits following these rules:
https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work
The easiest way to do this is to amend the last commit:
~~~console
$ git clone -b "840-add-exception-utility-method-for-create-host-config" [email protected]:lots0logs/docker-py.git somewhere
$ cd somewhere
$ git rebase -i HEAD~5
editor opens
change each 'pick' to 'edit'
save the file and quit
$ git commit --amend -s --no-edit
$ git rebase --continue # and repeat the amend for each commit
$ git push -f
~~~
Ammending updates the existing PR. You **DO NOT** need to open a new one.
lots0logs: @dnephin | diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 560ee8e2..9c4bb477 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -236,7 +236,7 @@ def convert_port_bindings(port_bindings):
for k, v in six.iteritems(port_bindings):
key = str(k)
if '/' not in key:
- key = key + '/tcp'
+ key += '/tcp'
if isinstance(v, list):
result[key] = [_convert_port_binding(binding) for binding in v]
else:
@@ -434,7 +434,7 @@ def parse_bytes(s):
s = 0
else:
if s[-2:-1].isalpha() and s[-1].isalpha():
- if (s[-1] == "b" or s[-1] == "B"):
+ if s[-1] == "b" or s[-1] == "B":
s = s[:-1]
units = BYTE_UNITS
suffix = s[-1].lower()
@@ -467,16 +467,32 @@ def parse_bytes(s):
return s
-def create_host_config(
- binds=None, port_bindings=None, lxc_conf=None, publish_all_ports=False,
- links=None, privileged=False, dns=None, dns_search=None, volumes_from=None,
- network_mode=None, restart_policy=None, cap_add=None, cap_drop=None,
- devices=None, extra_hosts=None, read_only=None, pid_mode=None,
- ipc_mode=None, security_opt=None, ulimits=None, log_config=None,
- mem_limit=None, memswap_limit=None, mem_swappiness=None,
- cgroup_parent=None, group_add=None, cpu_quota=None, cpu_period=None,
- oom_kill_disable=False, version=None
-):
+def host_config_type_error(param, param_value, expected):
+ error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
+ return TypeError(error_msg.format(param, expected, type(param_value)))
+
+
+def host_config_version_error(param, version, less_than=True):
+ operator = '<' if less_than else '>'
+ error_msg = '{0} param is not supported in API versions {1} {2}'
+ return errors.InvalidVersion(error_msg.format(param, operator, version))
+
+
+def host_config_value_error(param, param_value):
+ error_msg = 'Invalid value for {0} param: {1}'
+ return ValueError(error_msg.format(param, param_value))
+
+
+def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
+ publish_all_ports=False, links=None, privileged=False,
+ dns=None, dns_search=None, volumes_from=None,
+ network_mode=None, restart_policy=None, cap_add=None,
+ cap_drop=None, devices=None, extra_hosts=None,
+ read_only=None, pid_mode=None, ipc_mode=None,
+ security_opt=None, ulimits=None, log_config=None,
+ mem_limit=None, memswap_limit=None, mem_swappiness=None,
+ cgroup_parent=None, group_add=None, cpu_quota=None,
+ cpu_period=None, oom_kill_disable=False, version=None):
host_config = {}
@@ -496,24 +512,21 @@ def create_host_config(
if memswap_limit is not None:
if isinstance(memswap_limit, six.string_types):
memswap_limit = parse_bytes(memswap_limit)
+
host_config['MemorySwap'] = memswap_limit
if mem_swappiness is not None:
if version_lt(version, '1.20'):
- raise errors.InvalidVersion(
- 'mem_swappiness param not supported for API version < 1.20'
- )
+ raise host_config_version_error('mem_swappiness', '1.20')
if not isinstance(mem_swappiness, int):
- raise TypeError(
- 'Invalid type for mem_swappiness param: expected int but'
- ' found {0}'.format(type(mem_swappiness))
+ raise host_config_type_error(
+ 'mem_swappiness', mem_swappiness, 'int'
)
+
host_config['MemorySwappiness'] = mem_swappiness
if pid_mode not in (None, 'host'):
- raise errors.DockerException(
- 'Invalid value for pid param: {0}'.format(pid_mode)
- )
+ raise host_config_value_error('pid_mode', pid_mode)
elif pid_mode:
host_config['PidMode'] = pid_mode
@@ -524,10 +537,9 @@ def create_host_config(
host_config['Privileged'] = privileged
if oom_kill_disable:
- if version_lt(version, '1.19'):
- raise errors.InvalidVersion(
- 'oom_kill_disable param not supported for API version < 1.19'
- )
+ if version_lt(version, '1.20'):
+ raise host_config_version_error('oom_kill_disable', '1.19')
+
host_config['OomKillDisable'] = oom_kill_disable
if publish_all_ports:
@@ -545,6 +557,11 @@ def create_host_config(
host_config['NetworkMode'] = 'default'
if restart_policy:
+ if not isinstance(restart_policy, dict):
+ raise host_config_type_error(
+ 'restart_policy', restart_policy, 'dict'
+ )
+
host_config['RestartPolicy'] = restart_policy
if cap_add:
@@ -558,9 +575,8 @@ def create_host_config(
if group_add:
if version_lt(version, '1.20'):
- raise errors.InvalidVersion(
- 'group_add param not supported for API version < 1.20'
- )
+ raise host_config_version_error('group_add', '1.20')
+
host_config['GroupAdd'] = [six.text_type(grp) for grp in group_add]
if dns is not None:
@@ -568,24 +584,21 @@ def create_host_config(
if security_opt is not None:
if not isinstance(security_opt, list):
- raise errors.DockerException(
- 'Invalid type for security_opt param: expected list but found'
- ' {0}'.format(type(security_opt))
- )
+ raise host_config_type_error('security_opt', security_opt, 'list')
+
host_config['SecurityOpt'] = security_opt
if volumes_from is not None:
if isinstance(volumes_from, six.string_types):
volumes_from = volumes_from.split(',')
+
host_config['VolumesFrom'] = volumes_from
if binds is not None:
host_config['Binds'] = convert_volume_binds(binds)
if port_bindings is not None:
- host_config['PortBindings'] = convert_port_bindings(
- port_bindings
- )
+ host_config['PortBindings'] = convert_port_bindings(port_bindings)
if extra_hosts is not None:
if isinstance(extra_hosts, dict):
@@ -600,9 +613,7 @@ def create_host_config(
if isinstance(links, dict):
links = six.iteritems(links)
- formatted_links = [
- '{0}:{1}'.format(k, v) for k, v in sorted(links)
- ]
+ formatted_links = ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
host_config['Links'] = formatted_links
@@ -620,10 +631,7 @@ def create_host_config(
if ulimits is not None:
if not isinstance(ulimits, list):
- raise errors.DockerException(
- 'Invalid type for ulimits param: expected list but found'
- ' {0}'.format(type(ulimits))
- )
+ raise host_config_type_error('ulimits', ulimits, 'list')
host_config['Ulimits'] = []
for l in ulimits:
if not isinstance(l, Ulimit):
@@ -633,35 +641,27 @@ def create_host_config(
if log_config is not None:
if not isinstance(log_config, LogConfig):
if not isinstance(log_config, dict):
- raise errors.DockerException(
- 'Invalid type for log_config param: expected LogConfig but'
- ' found {0}'.format(type(log_config))
+ raise host_config_type_error(
+ 'log_config', log_config, 'LogConfig'
)
log_config = LogConfig(**log_config)
+
host_config['LogConfig'] = log_config
if cpu_quota:
if not isinstance(cpu_quota, int):
- raise TypeError(
- 'Invalid type for cpu_quota param: expected int but'
- ' found {0}'.format(type(cpu_quota))
- )
+ raise host_config_type_error('cpu_quota', cpu_quota, 'int')
if version_lt(version, '1.19'):
- raise errors.InvalidVersion(
- 'cpu_quota param not supported for API version < 1.19'
- )
+ raise host_config_version_error('cpu_quota', '1.19')
+
host_config['CpuQuota'] = cpu_quota
if cpu_period:
if not isinstance(cpu_period, int):
- raise TypeError(
- 'Invalid type for cpu_period param: expected int but'
- ' found {0}'.format(type(cpu_period))
- )
+ raise host_config_type_error('cpu_period', cpu_period, 'int')
if version_lt(version, '1.19'):
- raise errors.InvalidVersion(
- 'cpu_period param not supported for API version < 1.19'
- )
+ raise host_config_version_error('cpu_period', '1.19')
+
host_config['CpuPeriod'] = cpu_period
return host_config
| [enhancement] Add utility method to utils.py for raising errors
PR incoming... | docker/docker-py | diff --git a/tests/integration/container_test.py b/tests/integration/container_test.py
index 03965146..79840a1b 100644
--- a/tests/integration/container_test.py
+++ b/tests/integration/container_test.py
@@ -364,6 +364,13 @@ class CreateContainerTest(helpers.BaseTestCase):
host_config = inspect['HostConfig']
self.assertIn('MemorySwappiness', host_config)
+ def test_create_host_config_exception_raising(self):
+ self.assertRaises(TypeError,
+ self.client.create_host_config, mem_swappiness='40')
+
+ self.assertRaises(ValueError,
+ self.client.create_host_config, pid_mode='40')
+
class VolumeBindTest(helpers.BaseTestCase):
def setUp(self):
diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py
index 62d64e8a..23fd1913 100644
--- a/tests/unit/api_test.py
+++ b/tests/unit/api_test.py
@@ -314,8 +314,7 @@ class DockerApiTest(DockerClientTest):
self.assertIn('SecurityOpt', result)
self.assertEqual(result['SecurityOpt'], security_opt)
self.assertRaises(
- docker.errors.DockerException, self.client.create_host_config,
- security_opt='wrong'
+ TypeError, self.client.create_host_config, security_opt='wrong'
)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi @ file:///croot/certifi_1671487769961/work/certifi
coverage==7.2.7
-e git+https://github.com/docker/docker-py.git@9ebecb5991303d55fe208114a1de422650c4dcb2#egg=docker_py
exceptiongroup==1.2.2
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==2.0.0
mccabe==0.7.0
packaging==24.0
pluggy==1.2.0
pycodestyle==2.9.1
pyflakes==2.5.0
pytest==7.4.4
pytest-cov==4.1.0
requests==2.5.3
six==1.17.0
tomli==2.0.1
typing_extensions==4.7.1
websocket-client==0.32.0
zipp==3.15.0
| name: docker-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.2.7
- exceptiongroup==1.2.2
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==2.0.0
- mccabe==0.7.0
- packaging==24.0
- pluggy==1.2.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pytest==7.4.4
- pytest-cov==4.1.0
- requests==2.5.3
- six==1.17.0
- tomli==2.0.1
- typing-extensions==4.7.1
- websocket-client==0.32.0
- zipp==3.15.0
prefix: /opt/conda/envs/docker-py
| [
"tests/unit/api_test.py::DockerApiTest::test_create_host_config_secopt"
]
| []
| [
"tests/unit/api_test.py::DockerApiTest::test_auto_retrieve_server_version",
"tests/unit/api_test.py::DockerApiTest::test_ctor",
"tests/unit/api_test.py::DockerApiTest::test_events",
"tests/unit/api_test.py::DockerApiTest::test_events_with_filters",
"tests/unit/api_test.py::DockerApiTest::test_events_with_since_until",
"tests/unit/api_test.py::DockerApiTest::test_info",
"tests/unit/api_test.py::DockerApiTest::test_remove_link",
"tests/unit/api_test.py::DockerApiTest::test_retrieve_server_version",
"tests/unit/api_test.py::DockerApiTest::test_search",
"tests/unit/api_test.py::DockerApiTest::test_url_compatibility_http",
"tests/unit/api_test.py::DockerApiTest::test_url_compatibility_http_unix_triple_slash",
"tests/unit/api_test.py::DockerApiTest::test_url_compatibility_tcp",
"tests/unit/api_test.py::DockerApiTest::test_url_compatibility_unix",
"tests/unit/api_test.py::DockerApiTest::test_url_compatibility_unix_triple_slash",
"tests/unit/api_test.py::DockerApiTest::test_url_invalid_resource",
"tests/unit/api_test.py::DockerApiTest::test_url_no_resource",
"tests/unit/api_test.py::DockerApiTest::test_url_unversioned_api",
"tests/unit/api_test.py::DockerApiTest::test_url_valid_resource",
"tests/unit/api_test.py::DockerApiTest::test_version",
"tests/unit/api_test.py::DockerApiTest::test_version_no_api_version",
"tests/unit/api_test.py::StreamTest::test_early_stream_response"
]
| []
| Apache License 2.0 | 300 | [
"docker/utils/utils.py"
]
| [
"docker/utils/utils.py"
]
|
rackerlabs__lambda-uploader-29 | abf7e64f20294e7f6f44169ccb496f61018667b7 | 2015-11-18 14:32:02 | c40923a6982a0a3d4fd41b135a4f9b7e97b74f90 | diff --git a/README.md b/README.md
index 13e2baa..b3654f3 100644
--- a/README.md
+++ b/README.md
@@ -47,6 +47,11 @@ To specify an alternative profile that has been defined in `~/.aws/credentials`
lambda-uploader --profile=alternative-profile
```
+To specify an alternative, prexisting virtualenv use the `--virtualenv` parameter.
+```shell
+lambda-uploader --virtualenv=~/.virtualenv/my_custom_virtualenv
+```
+
If you would prefer to upload another way you can tell the uploader to ignore the upload.
This will create a package and leave it in the project directory.
```shell
diff --git a/README.rst b/README.rst
index ad7b199..db1a8b3 100644
--- a/README.rst
+++ b/README.rst
@@ -63,6 +63,12 @@ To specify an alternative profile that has been defined in
lambda-uploader --profile=alternative-profile
+To specify an alternative, prexisting virtualenv use the ``--virtualenv`` parameter.
+
+.. code:: shell
+
+ lambda-uploader --virtualenv=~/.virtualenv/my_custom_virtualenv
+
If you would prefer to upload another way you can tell the uploader to
ignore the upload. This will create a package and leave it in the
project directory.
diff --git a/lambda_uploader/package.py b/lambda_uploader/package.py
index 083077f..8bbbf54 100644
--- a/lambda_uploader/package.py
+++ b/lambda_uploader/package.py
@@ -26,28 +26,38 @@ TEMP_WORKSPACE_NAME = ".lamba_uploader_temp"
ZIPFILE_NAME = 'lambda_function.zip'
-def build_package(path, requirements):
- pkg = Package(path)
+def build_package(path, requirements, virtualenv=None):
+ pkg = Package(path, virtualenv)
pkg.clean_workspace()
pkg.clean_zipfile()
pkg.prepare_workspace()
- pkg.install_requirements(requirements)
+ if virtualenv:
+ if not os.path.isdir(virtualenv):
+ raise Exception("supplied virtualenv %s not found" % virtualenv)
+ LOG.info("Using existing virtualenv found in %s" % virtualenv)
+ else:
+ LOG.info('Building new virtualenv and installing requirements')
+ pkg.prepare_virtualenv()
+ pkg.install_requirements(requirements)
pkg.package()
return pkg
class Package(object):
- def __init__(self, path):
+ def __init__(self, path, virtualenv=None):
self._path = path
self._temp_workspace = os.path.join(path,
TEMP_WORKSPACE_NAME)
self.zip_file = os.path.join(path, ZIPFILE_NAME)
- self._pkg_venv = os.path.join(self._temp_workspace, 'venv')
- self._venv_pip = 'bin/pip'
- if sys.platform == 'win32' or sys.platform == 'cygwin':
- self._venv_pip = 'Scripts\pip.exe'
+ if virtualenv:
+ self._pkg_venv = virtualenv
+ else:
+ self._pkg_venv = os.path.join(self._temp_workspace, 'venv')
+ self._venv_pip = 'bin/pip'
+ if sys.platform == 'win32' or sys.platform == 'cygwin':
+ self._venv_pip = 'Scripts\pip.exe'
def clean_workspace(self):
if os.path.isdir(self._temp_workspace):
@@ -61,6 +71,7 @@ class Package(object):
# Setup temporary workspace
os.mkdir(self._temp_workspace)
+ def prepare_virtualenv(self):
proc = Popen(["virtualenv", self._pkg_venv], stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
LOG.debug("Virtualenv stdout: %s" % stdout)
diff --git a/lambda_uploader/shell.py b/lambda_uploader/shell.py
index 027685f..5b57967 100644
--- a/lambda_uploader/shell.py
+++ b/lambda_uploader/shell.py
@@ -50,7 +50,7 @@ def _execute(args):
cfg = config.Config(pth, args.config, role=args.role)
_print('Building Package')
- pkg = package.build_package(pth, cfg.requirements)
+ pkg = package.build_package(pth, cfg.requirements, args.virtualenv)
if not args.no_clean:
pkg.clean_workspace()
@@ -97,6 +97,9 @@ def main(arv=None):
action='store_const',
help='publish an upload to an immutable version',
const=True)
+ parser.add_argument('--virtualenv', '-e',
+ help='use specified virtualenv instead of making one',
+ default=None)
parser.add_argument('--role', dest='role',
default=getenv('LAMBDA_UPLOADER_ROLE'),
help=('IAM role to assign the lambda function, '
| Allow a virtualenv for another platform to be included
Instead of building a virtualenv with requirements.txt, allow an existing virtualenv to be used. This would allow us to build a virtualenv for Amazon Linux, and then still deploy lambda functions using lambda-uploader from client machines or other automation that may not be the same architecture as what Lambda runs on.
This is needed for me due to PyPi's cryptography package using different shared libraries on Ubuntu (my desktop) and Amazon Linux (where the function will run). | rackerlabs/lambda-uploader | diff --git a/test/test_package.py b/test/test_package.py
index 88cc9f6..1a64572 100644
--- a/test/test_package.py
+++ b/test/test_package.py
@@ -39,6 +39,7 @@ def test_prepare_workspace():
pkg = package.Package(TESTING_TEMP_DIR)
pkg.prepare_workspace()
+ pkg.prepare_virtualenv()
assert path.isdir(temp_workspace)
assert path.isdir(path.join(temp_workspace, 'venv'))
if sys.platform == 'win32' or sys.platform == 'cygwin':
@@ -63,6 +64,11 @@ def test_install_requirements():
assert path.isdir(path.join(site_packages, '_pytest'))
+def test_existing_virtualenv():
+ pkg = package.Package(TESTING_TEMP_DIR, 'abc')
+ assert pkg._pkg_venv == 'abc'
+
+
def test_package():
pkg = package.Package(TESTING_TEMP_DIR)
pkg.package()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 4
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"tests/pksetup_data/pksetupunit1/requirements.txt",
"tests/pksetup_data/pksetupunit2/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | boto3==1.1.4
botocore==1.2.11
certifi @ file:///croot/certifi_1671487769961/work/certifi
coverage==7.2.7
distlib==0.3.9
docutils==0.20.1
exceptiongroup==1.2.2
execnet==2.0.2
filelock==3.12.2
futures==2.2.0
importlib-metadata==6.7.0
iniconfig==2.0.0
jmespath==0.10.0
-e git+https://github.com/rackerlabs/lambda-uploader.git@abf7e64f20294e7f6f44169ccb496f61018667b7#egg=lambda_uploader
packaging==24.0
platformdirs==4.0.0
pluggy==1.2.0
pytest==7.4.4
pytest-asyncio==0.21.2
pytest-cov==4.1.0
pytest-mock==3.11.1
pytest-xdist==3.5.0
python-dateutil==2.9.0.post0
six==1.17.0
tomli==2.0.1
typing_extensions==4.7.1
virtualenv==20.26.6
zipp==3.15.0
| name: lambda-uploader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.1.4
- botocore==1.2.11
- coverage==7.2.7
- distlib==0.3.9
- docutils==0.20.1
- exceptiongroup==1.2.2
- execnet==2.0.2
- filelock==3.12.2
- futures==2.2.0
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- jmespath==0.10.0
- packaging==24.0
- platformdirs==4.0.0
- pluggy==1.2.0
- pytest==7.4.4
- pytest-asyncio==0.21.2
- pytest-cov==4.1.0
- pytest-mock==3.11.1
- pytest-xdist==3.5.0
- python-dateutil==2.9.0.post0
- six==1.17.0
- tomli==2.0.1
- typing-extensions==4.7.1
- virtualenv==20.26.6
- zipp==3.15.0
prefix: /opt/conda/envs/lambda-uploader
| [
"test/test_package.py::test_prepare_workspace",
"test/test_package.py::test_existing_virtualenv"
]
| [
"test/test_package.py::test_install_requirements"
]
| [
"test/test_package.py::test_package_zip_location",
"test/test_package.py::test_package_clean_workspace",
"test/test_package.py::test_package"
]
| []
| Apache License 2.0 | 302 | [
"README.rst",
"lambda_uploader/package.py",
"README.md",
"lambda_uploader/shell.py"
]
| [
"README.rst",
"lambda_uploader/package.py",
"README.md",
"lambda_uploader/shell.py"
]
|
|
ARMmbed__yotta-586 | 852c1e498fbb12938fa28aa388cf2b2b650508fe | 2015-11-19 17:32:49 | 852c1e498fbb12938fa28aa388cf2b2b650508fe | diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..0150ea4
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,5 @@
+[run]
+parallel=True
+concurrency=multiprocessing
+include=./yotta/*
+
diff --git a/tox.ini b/tox.ini
index 20af1fb..06075d3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,14 +6,16 @@ deps=
cython
pylint
coverage
-setenv=
- COVERAGE_PROCESS_START = {toxinidir}/.coveragerc
+passenv=
+ SSH_AUTH_SOCK
commands=
pip install .
- coverage erase
- coverage run --parallel-mode setup.py test
- coverage combine
- coverage report --include="yotta/*"
+ python setup.py test
+ # disable coverage for now: subprocesses aren't being combined correctly
+ # coverage erase
+ # coverage run --parallel-mode setup.py test
+ # coverage combine
+ # coverage report --include="yotta/*"
py27: pylint ./yotta
py33: pylint ./yotta
py34: pylint ./yotta
diff --git a/yotta/lib/validate.py b/yotta/lib/validate.py
index 1ff5b8e..ed38de8 100644
--- a/yotta/lib/validate.py
+++ b/yotta/lib/validate.py
@@ -18,10 +18,12 @@ import pack
Source_Dir_Regex = re.compile('^[a-z0-9_-]*$')
Source_Dir_Invalid_Regex = re.compile('[^a-z0-9_-]*')
-Component_Name_Regex = re.compile('^[a-z0-9-]*$')
Component_Name_Replace_With_Dash = re.compile('[^a-z0-9]+')
Looks_Like_An_Email = re.compile('^[^@]+@[^@]+\.[^@]+$')
+Component_Name_Regex = r'^[a-z]+[a-z0-9-]*$'
+Target_Name_Regex = r'^[a-z]+[a-z0-9+-]*$'
+
# return an error string describing the validation failure, or None if there is
# no error
def sourceDirValidationError(dirname, component_name):
@@ -41,10 +43,15 @@ def sourceDirValidationError(dirname, component_name):
return None
def componentNameValidationError(component_name):
- if not Component_Name_Regex.match(component_name):
+ if not re.match(Component_Name_Regex, component_name):
return 'Module name "%s" is invalid - must contain only lowercase a-z, 0-9 and hyphen, with no spaces.' % component_name
return None
+def targetNameValidationError(target_name):
+ if not re.match(Target_Name_Regex, target_name):
+ return 'Module name "%s" is invalid - must contain only lowercase a-z, 0-9 and hyphen, with no spaces.' % target_name
+ return None
+
def componentNameCoerced(component_name):
return Component_Name_Replace_With_Dash.sub('-', component_name.lower())
@@ -67,6 +74,18 @@ def currentDirectoryModule():
return None
return c
+def currentDirectoryTarget():
+ try:
+ t = target.Target(os.getcwd())
+ except pack.InvalidDescription as e:
+ logging.error(e)
+ return None
+ if not t:
+ logging.error(str(t.error))
+ logging.error('The current directory does not contain a valid target.')
+ return None
+ return t
+
def currentDirectoryModuleOrTarget():
wd = os.getcwd()
errors = []
diff --git a/yotta/main.py b/yotta/main.py
index f1d6055..7147d01 100644
--- a/yotta/main.py
+++ b/yotta/main.py
@@ -25,7 +25,7 @@ from .lib import detect
import yotta.lib.globalconf as globalconf
# hook to support coverage information when yotta runs itself during tests:
-if 'COVERAGE_PROCESS_START' is os.environ:
+if 'COVERAGE_PROCESS_START' in os.environ:
import coverage
coverage.process_startup()
@@ -92,7 +92,7 @@ def main():
description='Build software using re-usable components.\n'+
'For more detailed help on each subcommand, run: yotta <subcommand> --help'
)
- subparser = parser.add_subparsers(metavar='<subcommand>')
+ subparser = parser.add_subparsers(dest='subcommand_name', metavar='<subcommand>')
parser.add_argument('--version', nargs=0, action=FastVersionAction,
help='display the version'
@@ -145,7 +145,8 @@ def main():
'Search for open-source modules and targets that have been published '+
'to the yotta registry (with yotta publish). See help for `yotta '+
'install` for installing modules, and for `yotta target` for '+
- 'switching targets.'
+ 'switching targets.',
+ 'Search for published modules and targets'
)
addParser('init', 'init', 'Create a new module.')
addParser('install', 'install',
@@ -164,8 +165,20 @@ def main():
'Build the current module.'
)
addParser('version', 'version', 'Bump the module version, or (with no arguments) display the current version.')
- addParser('link', 'link', 'Symlink a module.')
- addParser('link-target', 'link_target', 'Symlink a target.')
+ addParser('link', 'link',
+ 'Symlink a module to be used in another module. Use "yotta link" '+
+ '(with no arguments) to link the current module globally. Or use '+
+ '"yotta link module-name" To use a module that was previously linked '+
+ 'globally in the current module.',
+ 'Symlink a module'
+ )
+ addParser('link-target', 'link_target',
+ 'Symlink a target to be used in another module. Use "yotta link-target" '+
+ '(with no arguments) to link the current target globally. Or use '+
+ '"yotta link-target target-name" To use a target that was previously linked '+
+ 'globally in the current module.',
+ 'Symlink a target'
+ )
addParser('update', 'update', 'Update dependencies for the current module, or a specific module.')
addParser('target', 'target', 'Set or display the target device.')
addParser('debug', 'debug', 'Attach a debugger to the current target. Requires target support.')
@@ -186,7 +199,12 @@ def main():
addParser('list', 'list', 'List the dependencies of the current module, or the inherited targets of the current target.')
addParser('outdated', 'outdated', 'Display information about dependencies which have newer versions available.')
addParser('uninstall', 'uninstall', 'Remove a specific dependency of the current module, both from module.json and from disk.')
- addParser('remove', 'remove', 'Remove the downloaded version of a dependency, or un-link a linked module.')
+ addParser('remove', 'remove',
+ 'Remove the downloaded version of a dependency module or target, or '+
+ 'un-link a linked module or target (see yotta link --help for details '+
+ 'of linking). This command does not modify your module.json file.',
+ 'Remove or unlink a dependency without removing it from module.json.'
+ )
addParser('owners', 'owners', 'Add/remove/display the owners of a module or target.')
addParser('licenses', 'licenses', 'List the licenses of the current module and its dependencies.')
addParser('clean', 'clean', 'Remove files created by yotta and the build.')
@@ -195,16 +213,17 @@ def main():
# short synonyms, subparser.choices is a dictionary, so use update() to
# merge in the keys from another dictionary
short_commands = {
- 'up':subparser.choices['update'],
- 'in':subparser.choices['install'],
- 'ln':subparser.choices['link'],
- 'v':subparser.choices['version'],
- 'ls':subparser.choices['list'],
- 'rm':subparser.choices['remove'],
- 'unlink':subparser.choices['remove'],
- 'owner':subparser.choices['owners'],
- 'lics':subparser.choices['licenses'],
- 'who':subparser.choices['whoami']
+ 'up':subparser.choices['update'],
+ 'in':subparser.choices['install'],
+ 'ln':subparser.choices['link'],
+ 'v':subparser.choices['version'],
+ 'ls':subparser.choices['list'],
+ 'rm':subparser.choices['remove'],
+ 'unlink':subparser.choices['remove'],
+ 'unlink-target':subparser.choices['remove'],
+ 'owner':subparser.choices['owners'],
+ 'lics':subparser.choices['licenses'],
+ 'who':subparser.choices['whoami']
}
subparser.choices.update(short_commands)
diff --git a/yotta/remove.py b/yotta/remove.py
index e734003..c961616 100644
--- a/yotta/remove.py
+++ b/yotta/remove.py
@@ -14,22 +14,58 @@ from .lib import validate
def addOptions(parser):
- parser.add_argument('component',
- help='Name of the dependency to remove'
+ parser.add_argument('module', default=None, nargs='?', metavar='<module>',
+ help='Name of the module to remove. If omitted the current module '+
+ 'or target will be removed from the global linking directory.'
)
def execCommand(args, following_args):
- err = validate.componentNameValidationError(args.component)
- if err:
- logging.error(err)
- return 1
- c = validate.currentDirectoryModule()
- if not c:
+ module_or_target = 'module'
+ if 'target' in args.subcommand_name:
+ module_or_target = 'target'
+ if args.module is not None:
+ return removeDependency(args, module_or_target)
+ else:
+ return removeGlobally(module_or_target)
+
+def rmLinkOrDirectory(path, nonexistent_warning):
+ if not os.path.exists(path):
+ logging.warning(nonexistent_warning)
return 1
- path = os.path.join(c.modulesPath(), args.component)
if fsutils.isLink(path):
fsutils.rmF(path)
else:
fsutils.rmRf(path)
+ return 0
+
+def removeGlobally(module_or_target):
+ # folders, , get places to install things, internal
+ from .lib import folders
+ if module_or_target == 'module':
+ global_dir = folders.globalInstallDirectory()
+ p = validate.currentDirectoryModule()
+ else:
+ global_dir = folders.globalTargetInstallDirectory()
+ p = validate.currentDirectoryTarget()
+ if p is None:
+ return 1
+ path = os.path.join(global_dir, p.getName())
+ return rmLinkOrDirectory(path, ('%s is not linked globally' % p.getName()))
+
+def removeDependency(args, module_or_target):
+ c = validate.currentDirectoryModule()
+ if not c:
+ return 1
+ if module_or_target == 'module':
+ subdir = c.modulesPath()
+ err = validate.componentNameValidationError(args.module)
+ else:
+ subdir = c.targetsPath()
+ err = validate.targetNameValidationError(args.module)
+ if err:
+ logging.error(err)
+ return 1
+ path = os.path.join(subdir, args.module)
+ return rmLinkOrDirectory(path, '%s %s not found' % (('dependency', 'target')[module_or_target=='target'], args.module))
| c:\ytexe yt link does not install module
## Problem
To add a local library module to a executable module it requires 3 steps
`c:\ytlib yt link`
`c:\ytexe yt link ytlib`
If you run `yt ls` at this point there is no indication that `ytlib` is linked into the project, you must either `yt install` or `yt build` to get `ytlib` added to your module.json file
`c:\ytexe yt install ytlib`
At this point you can now see `ytlib` in a `yt ls` command output.
### example
I'm a developer developing locally. I want to add simplelog to my project to help me debug and maybe tweak a few pretty print settings in simplelog for fun. So I make the yotta executable `ytexe`. I clone the simplelog repo to my local machine so they are both at my root directory. Now to add simplelog to my project I must
`C:\simplelog yt link`
`C:\ytexe yt link simplelog`
at this point I have added simplelog calls into my code and run `yt build`, which fails, because the simplelog module has not been added to my module.json, nor does it show up in a `yt ls` command.
I must run `yt install simplelog`.
This may be expected behaviour, and maybe we dont want to change it, but at the very least it makes for a bad user experience.
## Solution
1) have a `yt link <absolute file path>` command that takes care of all these steps
or
2) have `c:\ytexe yt link ytlib` add the module to the module.json or otherwise make it obvious that it is added to the project, currently there is no feedback.
or
3) when a user runs `c:\ytexe yt link ytlib` give them a feedback message telling them they need to `yt install ytlib` to finish adding the module. I think this solution is the least optimum because it requires 3 steps instead of 1, but it is the miminum required for user interaction. | ARMmbed/yotta | diff --git a/yotta/test/cli/build.py b/yotta/test/cli/build.py
index bbd0dbb..3ff37bf 100644
--- a/yotta/test/cli/build.py
+++ b/yotta/test/cli/build.py
@@ -6,17 +6,14 @@
# standard library modules, , ,
import unittest
-import os
-import tempfile
import subprocess
import copy
import re
import datetime
# internal modules:
-from yotta.lib.fsutils import mkDirP, rmRf
-from yotta.lib.detect import systemDefaultTarget
from . import cli
+from . import util
Test_Complex = {
'module.json': '''{
@@ -87,59 +84,7 @@ int main(){
'''
}
-
-Test_Trivial_Lib = {
-'module.json':'''{
- "name": "test-trivial-lib",
- "version": "0.0.2",
- "description": "Module to test trivial lib compilation",
- "licenses": [
- {
- "url": "https://spdx.org/licenses/Apache-2.0",
- "type": "Apache-2.0"
- }
- ],
- "dependencies": {
- }
-}''',
-
-'test-trivial-lib/lib.h': '''
-int foo();
-''',
-
-'source/lib.c':'''
-#include "test-trivial-lib/lib.h"
-
-int foo(){
- return 7;
-}
-'''
-}
-
-Test_Trivial_Exe = {
-'module.json':'''{
- "name": "test-trivial-exe",
- "version": "0.0.2",
- "description": "Module to test trivial exe compilation",
- "licenses": [
- {
- "url": "https://spdx.org/licenses/Apache-2.0",
- "type": "Apache-2.0"
- }
- ],
- "dependencies": {
- },
- "bin":"./source"
-}''',
-
-'source/lib.c':'''
-int main(){
- return 0;
-}
-'''
-}
-
-Test_Build_Info = copy.copy(Test_Trivial_Exe)
+Test_Build_Info = copy.copy(util.Test_Trivial_Exe)
Test_Build_Info['source/lib.c'] = '''
#include "stdio.h"
#include YOTTA_BUILD_INFO_HEADER
@@ -202,84 +147,66 @@ int foo(){
'test/g/a/a/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }'
}
-def isWindows():
- # can't run tests that hit github without an authn token
- return os.name == 'nt'
-
class TestCLIBuild(unittest.TestCase):
- def writeTestFiles(self, files, add_space_in_path=False):
- test_dir = tempfile.mkdtemp()
- if add_space_in_path:
- test_dir = test_dir + ' spaces in path'
-
- for path, contents in files.items():
- path_dir, file_name = os.path.split(path)
- path_dir = os.path.join(test_dir, path_dir)
- mkDirP(path_dir)
- with open(os.path.join(path_dir, file_name), 'w') as f:
- f.write(contents)
- return test_dir
-
-
- @unittest.skipIf(isWindows(), "can't build natively on windows yet")
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_buildTrivialLib(self):
- test_dir = self.writeTestFiles(Test_Trivial_Lib)
+ test_dir = util.writeTestFiles(util.Test_Trivial_Lib)
- stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
+ stdout = self.runCheckCommand(['--target', util.nativeTarget(), 'build'], test_dir)
- rmRf(test_dir)
+ util.rmRf(test_dir)
- @unittest.skipIf(isWindows(), "can't build natively on windows yet")
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_buildTrivialExe(self):
- test_dir = self.writeTestFiles(Test_Trivial_Exe)
+ test_dir = util.writeTestFiles(util.Test_Trivial_Exe)
- stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
+ stdout = self.runCheckCommand(['--target', util.nativeTarget(), 'build'], test_dir)
- rmRf(test_dir)
+ util.rmRf(test_dir)
- @unittest.skipIf(isWindows(), "can't build natively on windows yet")
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_buildComplex(self):
- test_dir = self.writeTestFiles(Test_Complex)
+ test_dir = util.writeTestFiles(Test_Complex)
- stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
+ stdout = self.runCheckCommand(['--target', util.nativeTarget(), 'build'], test_dir)
- rmRf(test_dir)
+ util.rmRf(test_dir)
- @unittest.skipIf(isWindows(), "can't build natively on windows yet")
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_buildComplexSpaceInPath(self):
- test_dir = self.writeTestFiles(Test_Complex, True)
+ test_dir = util.writeTestFiles(Test_Complex, True)
- stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
+ stdout = self.runCheckCommand(['--target', util.nativeTarget(), 'build'], test_dir)
- rmRf(test_dir)
+ util.rmRf(test_dir)
- @unittest.skipIf(isWindows(), "can't build natively on windows yet")
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_buildTests(self):
- test_dir = self.writeTestFiles(Test_Tests, True)
- stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
- stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir)
+ test_dir = util.writeTestFiles(Test_Tests, True)
+ stdout = self.runCheckCommand(['--target', util.nativeTarget(), 'build'], test_dir)
+ stdout = self.runCheckCommand(['--target', util.nativeTarget(), 'test'], test_dir)
self.assertIn('test-a', stdout)
self.assertIn('test-c', stdout)
self.assertIn('test-d', stdout)
self.assertIn('test-e', stdout)
self.assertIn('test-f', stdout)
self.assertIn('test-g', stdout)
- rmRf(test_dir)
+ util.rmRf(test_dir)
- @unittest.skipIf(isWindows(), "can't build natively on windows yet")
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_buildInfo(self):
- test_dir = self.writeTestFiles(Test_Build_Info, True)
+ test_dir = util.writeTestFiles(Test_Build_Info, True)
# commit all the test files to git so that the VCS build info gets
# defined:
subprocess.check_call(['git', 'init', '-q'], cwd=test_dir)
subprocess.check_call(['git', 'add', '.'], cwd=test_dir)
subprocess.check_call(['git', 'commit', '-m', 'test build info automated commit', '-q'], cwd=test_dir)
- self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
+ self.runCheckCommand(['--target', util.nativeTarget(), 'build'], test_dir)
build_time = datetime.datetime.utcnow()
- output = subprocess.check_output(['./build/' + systemDefaultTarget().split(',')[0] + '/source/test-trivial-exe'], cwd=test_dir).decode()
+ output = subprocess.check_output(['./build/' + util.nativeTarget().split(',')[0] + '/source/test-trivial-exe'], cwd=test_dir).decode()
self.assertIn('vcs clean: 1', output)
# check build timestamp
diff --git a/yotta/test/cli/cli.py b/yotta/test/cli/cli.py
index 3017aa2..541cedb 100644
--- a/yotta/test/cli/cli.py
+++ b/yotta/test/cli/cli.py
@@ -24,6 +24,10 @@ def run(arguments, cwd='.'):
stdin = subprocess.PIPE
)
out, err = child.communicate()
+ # no command should ever produce a traceback:
+ if 'traceback' in (out.decode('utf-8')+err.decode('utf-8')).lower():
+ print(out+err)
+ assert(False)
return out.decode('utf-8'), err.decode('utf-8'), child.returncode
diff --git a/yotta/test/cli/link.py b/yotta/test/cli/link.py
new file mode 100644
index 0000000..eddb5c5
--- /dev/null
+++ b/yotta/test/cli/link.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0
+# See LICENSE file for details.
+
+
+# standard library modules, , ,
+import unittest
+import os
+import tempfile
+
+# internal modules:
+from yotta.lib.folders import globalInstallDirectory
+
+from . import cli
+from . import util
+
+Test_Target = 'x86-linux-native'
+
+class TestCLILink(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.prefix_dir = tempfile.mkdtemp()
+ os.environ['YOTTA_PREFIX'] = cls.prefix_dir
+
+ @classmethod
+ def tearDownClass(cls):
+ util.rmRf(cls.prefix_dir)
+ cls.prefix_dir = None
+
+ def testLink(self):
+ linked_in_module = util.writeTestFiles(util.Test_Trivial_Lib, True)
+
+ stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'link'], cwd=linked_in_module)
+ self.assertEqual(statuscode, 0)
+ self.assertTrue(os.path.exists(os.path.join(globalInstallDirectory(), 'test-trivial-lib')))
+
+ test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True)
+ stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'list'], cwd=test_module)
+ self.assertIn('missing', stdout+stderr)
+
+ stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'link', 'test-trivial-lib'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+ self.assertNotIn('broken', stdout+stderr)
+
+ stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'list'], cwd=test_module)
+ self.assertNotIn('missing', stdout+stderr)
+
+ util.rmRf(test_module)
+ util.rmRf(linked_in_module)
+
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on this platform yet")
+ def testLinkedBuild(self):
+ linked_in_module = util.writeTestFiles(util.Test_Trivial_Lib, True)
+ test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True)
+
+ stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link'], cwd=linked_in_module)
+ self.assertEqual(statuscode, 0)
+ stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link', 'test-trivial-lib'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+ stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'build'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+
+ util.rmRf(test_module)
+ util.rmRf(linked_in_module)
+
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on this platform yet")
+ def testLinkedReBuild(self):
+ # test that changing which module is linked triggers a re-build
+ linked_in_module_1 = util.writeTestFiles(util.Test_Trivial_Lib, True)
+ linked_in_module_2 = util.writeTestFiles(util.Test_Trivial_Lib, True)
+ test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True)
+
+ stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link'], cwd=linked_in_module_1)
+ self.assertEqual(statuscode, 0)
+ stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link', 'test-trivial-lib'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+ stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'build'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+
+ # check that rebuild is no-op
+ stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'build'], cwd=test_module)
+ self.assertIn('no work to do', stdout+stderr)
+ self.assertEqual(statuscode, 0)
+
+ stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link'], cwd=linked_in_module_2)
+ self.assertEqual(statuscode, 0)
+
+ stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'build'], cwd=test_module)
+ self.assertNotIn('no work to do', stdout+stderr)
+ self.assertEqual(statuscode, 0)
+
+ util.rmRf(test_module)
+ util.rmRf(linked_in_module_1)
+ util.rmRf(linked_in_module_2)
+
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on this platform yet")
+ def testTargetLinkedBuild(self):
+ linked_in_target = util.writeTestFiles(util.getNativeTargetDescription(), True)
+ test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep_Preinstalled, True)
+
+ stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'link-target'], cwd=linked_in_target)
+ self.assertEqual(statuscode, 0)
+ stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'link-target', 'test-native-target'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+ stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'build'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+
+ util.rmRf(test_module)
+ util.rmRf(linked_in_target)
+
diff --git a/yotta/test/cli/outdated.py b/yotta/test/cli/outdated.py
index 15fbed4..be8eb4d 100644
--- a/yotta/test/cli/outdated.py
+++ b/yotta/test/cli/outdated.py
@@ -6,11 +6,9 @@
# standard library modules, , ,
import unittest
-import os
-import tempfile
# internal modules:
-from yotta.lib.fsutils import mkDirP, rmRf
+from . import util
from . import cli
Test_Outdated = {
@@ -42,30 +40,17 @@ int foo(){
}
class TestCLIOutdated(unittest.TestCase):
- def writeTestFiles(self, files, add_space_in_path=False):
- test_dir = tempfile.mkdtemp()
- if add_space_in_path:
- test_dir = test_dir + ' spaces in path'
-
- for path, contents in files.items():
- path_dir, file_name = os.path.split(path)
- path_dir = os.path.join(test_dir, path_dir)
- mkDirP(path_dir)
- with open(os.path.join(path_dir, file_name), 'w') as f:
- f.write(contents)
- return test_dir
-
def test_outdated(self):
- path = self.writeTestFiles(Test_Outdated, True)
+ path = util.writeTestFiles(Test_Outdated, True)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'outdated'], cwd=path)
self.assertNotEqual(statuscode, 0)
self.assertIn('test-testing-dummy', stdout + stderr)
- rmRf(path)
+ util.rmRf(path)
def test_notOutdated(self):
- path = self.writeTestFiles(Test_Outdated, True)
+ path = util.writeTestFiles(Test_Outdated, True)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'up'], cwd=path)
self.assertEqual(statuscode, 0)
@@ -74,4 +59,4 @@ class TestCLIOutdated(unittest.TestCase):
self.assertEqual(statuscode, 0)
self.assertNotIn('test-testing-dummy', stdout + stderr)
- rmRf(path)
+ util.rmRf(path)
diff --git a/yotta/test/cli/test.py b/yotta/test/cli/test.py
index 6a243d6..ccec431 100644
--- a/yotta/test/cli/test.py
+++ b/yotta/test/cli/test.py
@@ -6,15 +6,12 @@
# standard library modules, , ,
import unittest
-import os
-import tempfile
import copy
# internal modules:
-from yotta.lib.fsutils import mkDirP, rmRf
from yotta.lib.detect import systemDefaultTarget
from . import cli
-
+from . import util
Test_Tests = {
'module.json':'''{
@@ -103,26 +100,10 @@ Test_Fitler_NotFound['module.json'] = '''{
}
}'''
-def isWindows():
- return os.name == 'nt'
-
class TestCLITest(unittest.TestCase):
- def writeTestFiles(self, files, add_space_in_path=False):
- test_dir = tempfile.mkdtemp()
- if add_space_in_path:
- test_dir = test_dir + ' spaces in path'
-
- for path, contents in files.items():
- path_dir, file_name = os.path.split(path)
- path_dir = os.path.join(test_dir, path_dir)
- mkDirP(path_dir)
- with open(os.path.join(path_dir, file_name), 'w') as f:
- f.write(contents)
- return test_dir
-
- @unittest.skipIf(isWindows(), "can't build natively on windows yet")
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_tests(self):
- test_dir = self.writeTestFiles(Test_Tests, True)
+ test_dir = util.writeTestFiles(Test_Tests, True)
output = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
output = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir)
self.assertIn('test-a passed', output)
@@ -131,17 +112,17 @@ class TestCLITest(unittest.TestCase):
self.assertIn('test-e passed', output)
self.assertIn('test-f passed', output)
self.assertIn('test-g passed', output)
- rmRf(test_dir)
+ util.rmRf(test_dir)
- @unittest.skipIf(isWindows(), "can't build natively on windows yet")
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_testOutputFilterPassing(self):
- test_dir = self.writeTestFiles(Test_Fitler_Pass, True)
+ test_dir = util.writeTestFiles(Test_Fitler_Pass, True)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir)
- rmRf(test_dir)
+ util.rmRf(test_dir)
- @unittest.skipIf(isWindows(), "can't build natively on windows yet")
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_testOutputFilterFailing(self):
- test_dir = self.writeTestFiles(Test_Fitler_Fail, True)
+ test_dir = util.writeTestFiles(Test_Fitler_Fail, True)
stdout, stderr, statuscode = cli.run(['--target', systemDefaultTarget(), 'test'], cwd=test_dir)
if statuscode == 0:
print(stdout)
@@ -153,17 +134,17 @@ class TestCLITest(unittest.TestCase):
self.assertIn('test-f failed', '%s %s' % (stdout, stderr))
self.assertIn('test-g failed', '%s %s' % (stdout, stderr))
self.assertNotEqual(statuscode, 0)
- rmRf(test_dir)
+ util.rmRf(test_dir)
- @unittest.skipIf(isWindows(), "can't build natively on windows yet")
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_testOutputFilterNotFound(self):
- test_dir = self.writeTestFiles(Test_Fitler_NotFound, True)
+ test_dir = util.writeTestFiles(Test_Fitler_NotFound, True)
stdout, stderr, statuscode = cli.run(['--target', systemDefaultTarget(), 'test'], cwd=test_dir)
if statuscode == 0:
print(stdout)
print(stderr)
self.assertNotEqual(statuscode, 0)
- rmRf(test_dir)
+ util.rmRf(test_dir)
def runCheckCommand(self, args, test_dir):
stdout, stderr, statuscode = cli.run(args, cwd=test_dir)
diff --git a/yotta/test/cli/unlink.py b/yotta/test/cli/unlink.py
new file mode 100644
index 0000000..ff6eda6
--- /dev/null
+++ b/yotta/test/cli/unlink.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0
+# See LICENSE file for details.
+
+
+# standard library modules, , ,
+import unittest
+import tempfile
+import os
+
+# internal modules:
+from . import cli
+from . import util
+
+Test_Target = 'x86-linux-native'
+
+class TestCLIUnLink(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.prefix_dir = tempfile.mkdtemp()
+ os.environ['YOTTA_PREFIX'] = cls.prefix_dir
+
+ @classmethod
+ def tearDownClass(cls):
+ util.rmRf(cls.prefix_dir)
+ cls.prefix_dir = None
+
+ def testUnlinkNonexistentModule(self):
+ test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True)
+ stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'unlink', 'doesnotexist'], cwd=test_module)
+ self.assertNotEqual(statuscode, 0)
+ util.rmRf(test_module)
+
+ def testUnlinkNonexistentTarget(self):
+ test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True)
+ stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'unlink-target', 'doesnotexist'], cwd=test_module)
+ self.assertNotEqual(statuscode, 0)
+ util.rmRf(test_module)
+
+ def testUnlinkNotLinkedModuleGlobally(self):
+ test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True)
+ stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'unlink'], cwd=test_module)
+ self.assertNotEqual(statuscode, 0)
+ util.rmRf(test_module)
+
+ def testUnlinkNotLinkedTargetGlobally(self):
+ test_target = util.writeTestFiles(util.getNativeTargetDescription(), True)
+ stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'unlink'], cwd=test_target)
+ self.assertNotEqual(statuscode, 0)
+ util.rmRf(test_target)
+
+ def testUnlinkModuleGlobally(self):
+ test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True)
+ stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'link'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+ stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'unlink'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+ util.rmRf(test_module)
+
+ def testUnlinkTargetGlobally(self):
+ test_target = util.writeTestFiles(util.getNativeTargetDescription(), True)
+ stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'link-target'], cwd=test_target)
+ self.assertEqual(statuscode, 0)
+ stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'unlink-target'], cwd=test_target)
+ self.assertEqual(statuscode, 0)
+ util.rmRf(test_target)
+
+ def testUnlinkModule(self):
+ linked_in_module = util.writeTestFiles(util.Test_Trivial_Lib, True)
+ test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True)
+
+ stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link'], cwd=linked_in_module)
+ self.assertEqual(statuscode, 0)
+ stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link', 'test-trivial-lib'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+ self.assertTrue(os.path.exists(os.path.join(test_module, 'yotta_modules', 'test-trivial-lib')))
+ stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'unlink', 'test-trivial-lib'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+ self.assertTrue(not os.path.exists(os.path.join(test_module, 'yotta_modules', 'test-trivial-lib')))
+
+ util.rmRf(test_module)
+ util.rmRf(linked_in_module)
+
+ @unittest.skipIf(not util.canBuildNatively(), "can't build natively on this platform yet")
+ def testUnlinkTarget(self):
+ linked_in_target = util.writeTestFiles(util.getNativeTargetDescription(), True)
+ test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep_Preinstalled, True)
+
+ stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'link-target'], cwd=linked_in_target)
+ self.assertEqual(statuscode, 0)
+ stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'link-target', 'test-native-target'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+ self.assertTrue(os.path.exists(os.path.join(test_module, 'yotta_targets', 'test-native-target')))
+ stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'unlink-target', 'test-native-target'], cwd=test_module)
+ self.assertEqual(statuscode, 0)
+ self.assertTrue(not os.path.exists(os.path.join(test_module, 'yotta_targets', 'test-native-target')))
+
+ util.rmRf(test_module)
+ util.rmRf(linked_in_target)
+
+
diff --git a/yotta/test/cli/update.py b/yotta/test/cli/update.py
index 4906fab..8581689 100644
--- a/yotta/test/cli/update.py
+++ b/yotta/test/cli/update.py
@@ -6,12 +6,10 @@
# standard library modules, , ,
import unittest
-import os
-import tempfile
# internal modules:
-from yotta.lib.fsutils import mkDirP, rmRf
from . import cli
+from . import util
Test_Outdated = {
'module.json':'''{
@@ -42,39 +40,26 @@ int foo(){
}
class TestCLIUpdate(unittest.TestCase):
- def writeTestFiles(self, files, add_space_in_path=False):
- test_dir = tempfile.mkdtemp()
- if add_space_in_path:
- test_dir = test_dir + ' spaces in path'
-
- for path, contents in files.items():
- path_dir, file_name = os.path.split(path)
- path_dir = os.path.join(test_dir, path_dir)
- mkDirP(path_dir)
- with open(os.path.join(path_dir, file_name), 'w') as f:
- f.write(contents)
- return test_dir
-
def test_update(self):
- path = self.writeTestFiles(Test_Outdated, True)
+ path = util.writeTestFiles(Test_Outdated, True)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'update'], cwd=path)
self.assertEqual(statuscode, 0)
self.assertIn('download test-testing-dummy', stdout + stderr)
- rmRf(path)
+ util.rmRf(path)
def test_updateExplicit(self):
- path = self.writeTestFiles(Test_Outdated, True)
+ path = util.writeTestFiles(Test_Outdated, True)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'update', 'test-testing-dummy'], cwd=path)
self.assertEqual(statuscode, 0)
self.assertIn('download test-testing-dummy', stdout + stderr)
- rmRf(path)
+ util.rmRf(path)
def test_updateNothing(self):
- path = self.writeTestFiles(Test_Outdated, True)
+ path = util.writeTestFiles(Test_Outdated, True)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'up'], cwd=path)
self.assertEqual(statuscode, 0)
@@ -84,4 +69,4 @@ class TestCLIUpdate(unittest.TestCase):
self.assertEqual(statuscode, 0)
self.assertNotIn('download test-testing-dummy', stdout + stderr)
- rmRf(path)
+ util.rmRf(path)
diff --git a/yotta/test/cli/util.py b/yotta/test/cli/util.py
new file mode 100644
index 0000000..553619b
--- /dev/null
+++ b/yotta/test/cli/util.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+# Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0
+# See LICENSE file for details.
+
+# standard library modules, , ,
+import tempfile
+import os
+import copy
+
+# internal modules:
+import yotta.lib.fsutils as fsutils
+from yotta.lib.detect import systemDefaultTarget
+
+# some simple example module definitions that can be re-used by multiple tests:
+Test_Trivial_Lib = {
+'module.json':'''{
+ "name": "test-trivial-lib",
+ "version": "1.0.0",
+ "description": "Module to test trivial lib compilation",
+ "license": "Apache-2.0",
+ "dependencies": {
+ }
+}''',
+
+'test-trivial-lib/lib.h': '''
+int foo();
+''',
+
+'source/lib.c':'''
+#include "test-trivial-lib/lib.h"
+int foo(){ return 7; }
+'''
+}
+
+Test_Trivial_Exe = {
+'module.json':'''{
+ "name": "test-trivial-exe",
+ "version": "1.0.0",
+ "description": "Module to test trivial exe compilation",
+ "license": "Apache-2.0",
+ "dependencies": {
+ },
+ "bin":"./source"
+}''',
+
+'source/lib.c':'''
+int main(){ return 0; }
+'''
+}
+
+Test_Testing_Trivial_Lib_Dep = {
+'module.json':'''{
+ "name": "test-simple-module",
+ "version": "1.0.0",
+ "description": "a simple test module",
+ "author": "Someone Somewhere <[email protected]>",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "test-trivial-lib": "^1.0.0"
+ }
+}
+''',
+
+'test-simple-module/simple.h': '''
+int simple();
+''',
+
+'source/lib.c':'''
+#include "test-simple-module/simple.h"
+int simple(){ return 123; }
+'''
+}
+
+Test_Testing_Trivial_Lib_Dep_Preinstalled = copy.copy(Test_Testing_Trivial_Lib_Dep)
+for k, v in Test_Trivial_Lib.items():
+ Test_Testing_Trivial_Lib_Dep_Preinstalled['yotta_modules/test-trivial-lib/' + k] = v
+
+
+def getNativeTargetDescription():
+ # actually returns a trivial target which inherits from the native target
+ native_target = nativeTarget()
+ if ',' in native_target:
+ native_target = native_target[:native_target.find(',')]
+ return {
+ 'target.json':'''{
+ "name": "test-native-target",
+ "version": "1.0.0",
+ "license": "Apache-2.0",
+ "inherits": {
+ "%s": "*"
+ }
+ }
+ ''' % native_target
+ }
+
+
+def writeTestFiles(files, add_space_in_path=False):
+ ''' write a dictionary of filename:contents into a new temporary directory
+ '''
+ test_dir = tempfile.mkdtemp()
+ if add_space_in_path:
+ test_dir = test_dir + ' spaces in path'
+
+ for path, contents in files.items():
+ path_dir, file_name = os.path.split(path)
+ path_dir = os.path.join(test_dir, path_dir)
+ fsutils.mkDirP(path_dir)
+ with open(os.path.join(path_dir, file_name), 'w') as f:
+ f.write(contents)
+ return test_dir
+
+def isWindows():
+ # can't run tests that hit github without an authn token
+ return os.name == 'nt'
+
+def canBuildNatively():
+ return not isWindows()
+
+def nativeTarget():
+ assert(canBuildNatively())
+ return systemDefaultTarget()
+
+#expose rmRf for convenience
+rmRf = fsutils.rmRf
diff --git a/yotta/test/config.py b/yotta/test/config.py
index 8c7b417..8c67192 100644
--- a/yotta/test/config.py
+++ b/yotta/test/config.py
@@ -7,12 +7,11 @@
import unittest
import copy
import os
-import tempfile
import logging
# internal modules:
-from yotta.lib.fsutils import mkDirP, rmRf
from yotta.lib import validate
+from .cli import util
logging.basicConfig(
level=logging.ERROR
@@ -78,19 +77,6 @@ Test_Module_Config_Ignored['module.json'] = '''{
}'''
class ConfigTest(unittest.TestCase):
- def writeTestFiles(self, files, add_space_in_path=False):
- test_dir = tempfile.mkdtemp()
- if add_space_in_path:
- test_dir = test_dir + ' spaces in path'
-
- for path, contents in files.items():
- path_dir, file_name = os.path.split(path)
- path_dir = os.path.join(test_dir, path_dir)
- mkDirP(path_dir)
- with open(os.path.join(path_dir, file_name), 'w') as f:
- f.write(contents)
- return test_dir
-
def setUp(self):
self.restore_cwd = os.getcwd()
@@ -98,7 +84,7 @@ class ConfigTest(unittest.TestCase):
os.chdir(self.restore_cwd)
def test_targetConfigMerge(self):
- test_dir = self.writeTestFiles(Test_Target_Config_Merge, True)
+ test_dir = util.writeTestFiles(Test_Target_Config_Merge, True)
os.chdir(test_dir)
c = validate.currentDirectoryModule()
@@ -118,10 +104,10 @@ class ConfigTest(unittest.TestCase):
self.assertEqual(merged_config['bar']['d'], "def")
os.chdir(self.restore_cwd)
- rmRf(test_dir)
+ util.rmRf(test_dir)
def test_targetAppConfigMerge(self):
- test_dir = self.writeTestFiles(Test_Target_Config_Merge_App, True)
+ test_dir = util.writeTestFiles(Test_Target_Config_Merge_App, True)
os.chdir(test_dir)
c = validate.currentDirectoryModule()
@@ -144,10 +130,10 @@ class ConfigTest(unittest.TestCase):
self.assertEqual(merged_config['new'], 123)
os.chdir(self.restore_cwd)
- rmRf(test_dir)
+ util.rmRf(test_dir)
def test_moduleConfigIgnored(self):
- test_dir = self.writeTestFiles(Test_Module_Config_Ignored, True)
+ test_dir = util.writeTestFiles(Test_Module_Config_Ignored, True)
os.chdir(test_dir)
c = validate.currentDirectoryModule()
@@ -157,5 +143,5 @@ class ConfigTest(unittest.TestCase):
self.assertNotIn("new", merged_config)
os.chdir(self.restore_cwd)
- rmRf(test_dir)
+ util.rmRf(test_dir)
diff --git a/yotta/test/ignores.py b/yotta/test/ignores.py
index 3a5f8e9..16832a8 100644
--- a/yotta/test/ignores.py
+++ b/yotta/test/ignores.py
@@ -8,13 +8,12 @@
# standard library modules, , ,
import unittest
import os
-import tempfile
# internal modules:
-from yotta.lib.fsutils import mkDirP, rmRf
from yotta.lib.detect import systemDefaultTarget
from yotta.lib import component
from .cli import cli
+from .cli import util
Test_Files = {
'.yotta_ignore': '''
@@ -115,24 +114,14 @@ def isWindows():
# can't run tests that hit github without an authn token
return os.name == 'nt'
-def writeTestFiles(files):
- test_dir = tempfile.mkdtemp()
- for path, contents in files.items():
- path_dir, file_name = os.path.split(path)
- path_dir = os.path.join(test_dir, path_dir)
- mkDirP(path_dir)
- with open(os.path.join(path_dir, file_name), 'w') as f:
- f.write(contents)
- return test_dir
-
class TestPackIgnores(unittest.TestCase):
@classmethod
def setUpClass(cls):
- cls.test_dir = writeTestFiles(Test_Files)
+ cls.test_dir = util.writeTestFiles(Test_Files)
@classmethod
def tearDownClass(cls):
- rmRf(cls.test_dir)
+ util.rmRf(cls.test_dir)
def test_absolute_ignores(self):
c = component.Component(self.test_dir)
@@ -158,7 +147,7 @@ class TestPackIgnores(unittest.TestCase):
self.assertTrue(c.ignores('test/someothertest/alsoignored.c'))
def test_default_ignores(self):
- default_test_dir = writeTestFiles(Default_Test_Files)
+ default_test_dir = util.writeTestFiles(Default_Test_Files)
c = component.Component(default_test_dir)
self.assertTrue(c.ignores('.something.c.swp'))
self.assertTrue(c.ignores('.something.c~'))
@@ -173,7 +162,7 @@ class TestPackIgnores(unittest.TestCase):
self.assertTrue(c.ignores('build'))
self.assertTrue(c.ignores('.yotta.json'))
- rmRf(default_test_dir)
+ util.rmRf(default_test_dir)
def test_comments(self):
c = component.Component(self.test_dir)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 4
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc cmake ninja-build"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | argcomplete==0.9.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
colorama==0.3.9
cryptography==44.0.2
Deprecated==1.2.18
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
future==1.0.0
hgapi==1.7.4
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
intelhex==2.3.0
intervaltree==3.1.0
Jinja2==2.11.3
jsonpointer==2.0
jsonschema==2.6.0
MarkupSafe==3.0.2
mbed_test_wrapper==0.0.3
packaging @ file:///croot/packaging_1734472117206/work
pathlib==1.0.1
pluggy @ file:///croot/pluggy_1733169602837/work
project-generator-definitions==0.2.46
project_generator==0.8.17
pycparser==2.22
pyelftools==0.23
PyGithub==1.54.1
PyJWT==1.7.1
pyocd==0.15.0
pytest @ file:///croot/pytest_1738938843180/work
pyusb==1.3.1
PyYAML==3.13
requests==2.32.3
semantic-version==2.10.0
six==1.17.0
sortedcontainers==2.4.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
urllib3==2.3.0
valinor==0.0.15
websocket-client==1.8.0
wrapt==1.17.2
xmltodict==0.14.2
-e git+https://github.com/ARMmbed/yotta.git@852c1e498fbb12938fa28aa388cf2b2b650508fe#egg=yotta
| name: yotta
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argcomplete==0.9.0
- argparse==1.4.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- colorama==0.3.9
- cryptography==44.0.2
- deprecated==1.2.18
- future==1.0.0
- hgapi==1.7.4
- idna==3.10
- intelhex==2.3.0
- intervaltree==3.1.0
- jinja2==2.11.3
- jsonpointer==2.0
- jsonschema==2.6.0
- markupsafe==3.0.2
- mbed-test-wrapper==0.0.3
- pathlib==1.0.1
- project-generator==0.8.17
- project-generator-definitions==0.2.46
- pycparser==2.22
- pyelftools==0.23
- pygithub==1.54.1
- pyjwt==1.7.1
- pyocd==0.15.0
- pyusb==1.3.1
- pyyaml==3.13
- requests==2.32.3
- semantic-version==2.10.0
- six==1.17.0
- sortedcontainers==2.4.0
- urllib3==2.3.0
- valinor==0.0.15
- websocket-client==1.8.0
- wrapt==1.17.2
- xmltodict==0.14.2
prefix: /opt/conda/envs/yotta
| [
"yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkModuleGlobally",
"yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkNonexistentModule",
"yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkTarget",
"yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkTargetGlobally"
]
| [
"yotta/test/cli/build.py::TestCLIBuild::test_buildComplex",
"yotta/test/cli/build.py::TestCLIBuild::test_buildComplexSpaceInPath",
"yotta/test/cli/build.py::TestCLIBuild::test_buildInfo",
"yotta/test/cli/build.py::TestCLIBuild::test_buildTests",
"yotta/test/cli/build.py::TestCLIBuild::test_buildTrivialExe",
"yotta/test/cli/build.py::TestCLIBuild::test_buildTrivialLib",
"yotta/test/cli/link.py::TestCLILink::testLink",
"yotta/test/cli/link.py::TestCLILink::testLinkedBuild",
"yotta/test/cli/link.py::TestCLILink::testLinkedReBuild",
"yotta/test/cli/link.py::TestCLILink::testTargetLinkedBuild",
"yotta/test/cli/outdated.py::TestCLIOutdated::test_notOutdated",
"yotta/test/cli/outdated.py::TestCLIOutdated::test_outdated",
"yotta/test/cli/test.py::TestCLITest::test_testOutputFilterFailing",
"yotta/test/cli/test.py::TestCLITest::test_testOutputFilterNotFound",
"yotta/test/cli/test.py::TestCLITest::test_testOutputFilterPassing",
"yotta/test/cli/test.py::TestCLITest::test_tests",
"yotta/test/cli/update.py::TestCLIUpdate::test_update",
"yotta/test/cli/update.py::TestCLIUpdate::test_updateExplicit",
"yotta/test/cli/update.py::TestCLIUpdate::test_updateNothing",
"yotta/test/config.py::ConfigTest::test_moduleConfigIgnored",
"yotta/test/config.py::ConfigTest::test_targetAppConfigMerge",
"yotta/test/config.py::ConfigTest::test_targetConfigMerge",
"yotta/test/ignores.py::TestPackIgnores::test_build",
"yotta/test/ignores.py::TestPackIgnores::test_test"
]
| [
"yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkModule",
"yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkNonexistentTarget",
"yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkNotLinkedModuleGlobally",
"yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkNotLinkedTargetGlobally",
"yotta/test/ignores.py::TestPackIgnores::test_absolute_ignores",
"yotta/test/ignores.py::TestPackIgnores::test_comments",
"yotta/test/ignores.py::TestPackIgnores::test_default_ignores",
"yotta/test/ignores.py::TestPackIgnores::test_glob_ignores",
"yotta/test/ignores.py::TestPackIgnores::test_relative_ignores"
]
| []
| Apache License 2.0 | 304 | [
"yotta/lib/validate.py",
"tox.ini",
".coveragerc",
"yotta/remove.py",
"yotta/main.py"
]
| [
"yotta/lib/validate.py",
"tox.ini",
".coveragerc",
"yotta/remove.py",
"yotta/main.py"
]
|
|
rackerlabs__lambda-uploader-35 | c40923a6982a0a3d4fd41b135a4f9b7e97b74f90 | 2015-11-20 15:06:36 | c40923a6982a0a3d4fd41b135a4f9b7e97b74f90 | diff --git a/README.md b/README.md
index 332bb35..bf0a3bb 100644
--- a/README.md
+++ b/README.md
@@ -29,11 +29,7 @@ Example lambda.json file:
"handler": "function.lambda_handler",
"role": "arn:aws:iam::00000000000:role/lambda_basic_execution",
"requirements": ["pygithub"],
- "ignore": [
- "circle.yml",
- ".git",
- "*.pyc"
- ],
+ "ignore": ["circle.yml"],
"timeout": 30,
"memory": 512
}
@@ -57,6 +53,11 @@ To specify an alternative, prexisting virtualenv use the `--virtualenv` paramete
lambda-uploader --virtualenv=~/.virtualenv/my_custom_virtualenv
```
+To omit using a virtualenv use the `--no-virtualenv` parameter.
+```shell
+lambda-uploader --no-virtualenv
+```
+
If you would prefer to upload another way you can tell the uploader to ignore the upload.
This will create a package and leave it in the project directory.
```shell
diff --git a/README.rst b/README.rst
index eaa1676..a9bf569 100644
--- a/README.rst
+++ b/README.rst
@@ -42,11 +42,7 @@ Example lambda.json file:
"handler": "function.lambda_handler",
"role": "arn:aws:iam::00000000000:role/lambda_basic_execution",
"requirements": ["pygithub"],
- "ignore": [
- "circle.yml",
- ".git",
- "*.pyc"
- ],
+ "ignore": ["circle.yml"],
"timeout": 30,
"memory": 512
}
@@ -75,6 +71,12 @@ To specify an alternative, prexisting virtualenv use the
lambda-uploader --virtualenv=~/.virtualenv/my_custom_virtualenv
+To omit using a virtualenv use the ``--no-virtualenv`` parameter.
+
+.. code:: shell
+
+ lambda-uploader --no-virtualenv
+
If you would prefer to upload another way you can tell the uploader to
ignore the upload. This will create a package and leave it in the
project directory.
diff --git a/example/lambda.json b/example/lambda.json
index bdf786a..7148a5b 100644
--- a/example/lambda.json
+++ b/example/lambda.json
@@ -5,11 +5,7 @@
"handler": "function.lambda_handler",
"role": "arn:aws:iam::00000000000:role/lambda_basic_execution",
"requirements": ["Jinja2==2.8"],
- "ignore": [
- "circle.yml",
- ".git",
- "*.pyc"
- ],
+ "ignore": ["circle.yml"],
"timeout": 30,
"memory": 512
}
diff --git a/lambda_uploader/package.py b/lambda_uploader/package.py
index 1bb0089..ccbe79e 100644
--- a/lambda_uploader/package.py
+++ b/lambda_uploader/package.py
@@ -27,37 +27,24 @@ ZIPFILE_NAME = 'lambda_function.zip'
def build_package(path, requirements, virtualenv=None, ignore=[]):
- pkg = Package(path, virtualenv)
+ pkg = Package(path, virtualenv, requirements)
pkg.clean_workspace()
pkg.clean_zipfile()
pkg.prepare_workspace()
- if virtualenv:
- if not os.path.isdir(virtualenv):
- raise Exception("supplied virtualenv %s not found" % virtualenv)
- LOG.info("Using existing virtualenv found in %s" % virtualenv)
- else:
- LOG.info('Building new virtualenv and installing requirements')
- pkg.prepare_virtualenv()
- pkg.install_requirements(requirements)
+ pkg.prepare_virtualenv()
pkg.package(ignore)
return pkg
class Package(object):
- def __init__(self, path, virtualenv=None):
+ def __init__(self, path, virtualenv=None, requirements=[]):
self._path = path
self._temp_workspace = os.path.join(path,
TEMP_WORKSPACE_NAME)
self.zip_file = os.path.join(path, ZIPFILE_NAME)
-
- if virtualenv:
- self._pkg_venv = virtualenv
- else:
- self._pkg_venv = os.path.join(self._temp_workspace, 'venv')
- self._venv_pip = 'bin/pip'
- if sys.platform == 'win32' or sys.platform == 'cygwin':
- self._venv_pip = 'Scripts\pip.exe'
+ self._virtualenv = virtualenv
+ self._requirements = requirements
def clean_workspace(self):
if os.path.isdir(self._temp_workspace):
@@ -72,21 +59,59 @@ class Package(object):
os.mkdir(self._temp_workspace)
def prepare_virtualenv(self):
- proc = Popen(["virtualenv", self._pkg_venv], stdout=PIPE, stderr=PIPE)
- stdout, stderr = proc.communicate()
- LOG.debug("Virtualenv stdout: %s" % stdout)
- LOG.debug("Virtualenv stderr: %s" % stderr)
+ requirements_exist = \
+ self._requirements or os.path.isfile("requirements.txt")
+ if self._virtualenv and self._virtualenv is not False:
+ if not os.path.isdir(self._virtualenv):
+ raise Exception("virtualenv %s not found" % self._virtualenv)
+ LOG.info("Using existing virtualenv at %s" % self._virtualenv)
+
+ # use supplied virtualenv path
+ self._pkg_venv = self._virtualenv
+ elif self._virtualenv is None and requirements_exist:
+ LOG.info('Building new virtualenv and installing requirements')
+ self.build_new_virtualenv()
+ self.install_requirements()
+ elif self._virtualenv is None and not requirements_exist:
+ LOG.info('No requirements found, so no virtualenv will be made')
+ self._pkg_venv = False
+ elif self._virtualenv is False:
+ LOG.info('Virtualenv has been omitted by supplied flag')
+ self._pkg_venv = False
+ else:
+ raise Exception('Cannot determine what to do about virtualenv')
- if proc.returncode is not 0:
- raise Exception('virtualenv returned unsuccessfully')
+ def build_new_virtualenv(self):
+ if self._virtualenv is None:
+ # virtualenv was "None" which means "do default"
+ self._pkg_venv = os.path.join(self._temp_workspace, 'venv')
+ self._venv_pip = 'bin/pip'
+ if sys.platform == 'win32' or sys.platform == 'cygwin':
+ self._venv_pip = 'Scripts\pip.exe'
+
+ proc = Popen(["virtualenv", self._pkg_venv],
+ stdout=PIPE, stderr=PIPE)
+ stdout, stderr = proc.communicate()
+ LOG.debug("Virtualenv stdout: %s" % stdout)
+ LOG.debug("Virtualenv stderr: %s" % stderr)
+
+ if proc.returncode is not 0:
+ raise Exception('virtualenv returned unsuccessfully')
+
+ else:
+ raise Exception('cannot build a new virtualenv when asked to omit')
+
+ def install_requirements(self):
+ if not hasattr(self, '_pkg_venv'):
+ err = 'Must call build_new_virtualenv before install_requirements'
+ raise Exception(err)
- def install_requirements(self, requirements):
cmd = None
- if requirements:
+ if self._requirements:
LOG.debug("Installing requirements found %s in config"
- % requirements)
+ % self._requirements)
cmd = [os.path.join(self._pkg_venv, self._venv_pip),
- 'install'] + requirements
+ 'install'] + self._requirements
elif os.path.isfile("requirements.txt"):
# Pip install
@@ -109,18 +134,19 @@ class Package(object):
# Copy site packages into package base
LOG.info('Copying site packages')
- site_packages = 'lib/python2.7/site-packages'
- lib64_site_packages = 'lib64/python2.7/site-packages'
- if sys.platform == 'win32' or sys.platform == 'cygwin':
- lib64_site_packages = 'lib64\\site-packages'
- site_packages = 'lib\\site-packages'
-
- utils.copy_tree(os.path.join(self._pkg_venv, site_packages),
- package)
- lib64_path = os.path.join(self._pkg_venv, lib64_site_packages)
- if not os.path.islink(lib64_path):
- LOG.info('Copying lib64 site packages')
- utils.copy_tree(lib64_path, package)
+ if hasattr(self, '_pkg_venv') and self._pkg_venv:
+ site_packages = 'lib/python2.7/site-packages'
+ lib64_site_packages = 'lib64/python2.7/site-packages'
+ if sys.platform == 'win32' or sys.platform == 'cygwin':
+ lib64_site_packages = 'lib64\\site-packages'
+ site_packages = 'lib\\site-packages'
+
+ utils.copy_tree(os.path.join(self._pkg_venv, site_packages),
+ package)
+ lib64_path = os.path.join(self._pkg_venv, lib64_site_packages)
+ if not os.path.islink(lib64_path):
+ LOG.info('Copying lib64 site packages')
+ utils.copy_tree(lib64_path, package)
# Append the temp workspace to the ignore list
ignore.append("^%s/*" % self._temp_workspace)
diff --git a/lambda_uploader/shell.py b/lambda_uploader/shell.py
index c36fe21..a62bdb5 100644
--- a/lambda_uploader/shell.py
+++ b/lambda_uploader/shell.py
@@ -49,9 +49,19 @@ def _execute(args):
cfg = config.Config(pth, args.config, role=args.role)
+ if args.no_virtualenv:
+ # specified flag to omit entirely
+ venv = False
+ elif args.virtualenv:
+ # specified a custom virtualenv
+ venv = args.virtualenv
+ else:
+ # build and include virtualenv, the default
+ venv = None
+
_print('Building Package')
pkg = package.build_package(pth, cfg.requirements,
- args.virtualenv, cfg.ignore)
+ venv, cfg.ignore)
if not args.no_clean:
pkg.clean_workspace()
@@ -101,6 +111,10 @@ def main(arv=None):
parser.add_argument('--virtualenv', '-e',
help='use specified virtualenv instead of making one',
default=None)
+ parser.add_argument('--no-virtualenv', dest='no_virtualenv',
+ action='store_const',
+ help='do not create or include a virtualenv at all',
+ const=True)
parser.add_argument('--role', dest='role',
default=getenv('LAMBDA_UPLOADER_ROLE'),
help=('IAM role to assign the lambda function, '
| Option zip and upload only the folder contents
Right now, uploading grabs a bunch of stuff I don't need (git, PyGitHub, etc). This creates a fairly large zip file, hence, larger lambda storage.
Not being strong with Python, I'm guessing the extra stuff relates to using virtualenv. I don't need all those dependencies, and would prefer to pip install to the local directory. Could this be added as a new switch? Or is there something I'm missing and there's some other way for me to skip them? | rackerlabs/lambda-uploader | diff --git a/test/test_package.py b/test/test_package.py
index 1a64572..a293d4f 100644
--- a/test/test_package.py
+++ b/test/test_package.py
@@ -1,5 +1,6 @@
import os
import sys
+import pytest
from shutil import rmtree
from os import path
@@ -53,9 +54,9 @@ def test_install_requirements():
temp_workspace = path.join(TESTING_TEMP_DIR,
package.TEMP_WORKSPACE_NAME)
- pkg = package.Package(TESTING_TEMP_DIR)
- # pkg.prepare_workspace()
- pkg.install_requirements(reqs)
+ pkg = package.Package(TESTING_TEMP_DIR, requirements=reqs)
+ pkg.prepare_virtualenv()
+
site_packages = path.join(temp_workspace,
'venv/lib/python2.7/site-packages')
if sys.platform == 'win32' or sys.platform == 'cygwin':
@@ -64,9 +65,39 @@ def test_install_requirements():
assert path.isdir(path.join(site_packages, '_pytest'))
+def test_default_virtualenv():
+ temp_workspace = path.join(TESTING_TEMP_DIR,
+ package.TEMP_WORKSPACE_NAME)
+ pkg = package.Package(TESTING_TEMP_DIR)
+ pkg.prepare_virtualenv()
+ # ensure we picked a real venv path if using default behavior
+ assert pkg._pkg_venv == ("%s/venv" % temp_workspace)
+
+
def test_existing_virtualenv():
+ venv_dir = "virtualenv_test"
+ temp_virtualenv = path.join(TESTING_TEMP_DIR, venv_dir)
+ os.mkdir(temp_virtualenv)
+
+ pkg = package.Package(TESTING_TEMP_DIR, temp_virtualenv)
+ pkg.prepare_virtualenv()
+
+ assert pkg._pkg_venv == temp_virtualenv
+
+
+def test_bad_existing_virtualenv():
pkg = package.Package(TESTING_TEMP_DIR, 'abc')
- assert pkg._pkg_venv == 'abc'
+ with pytest.raises(Exception):
+ pkg.prepare_virtualenv()
+
+
+def test_omit_virtualenv():
+ pkg = package.Package(TESTING_TEMP_DIR, False)
+ pkg.prepare_virtualenv()
+ assert pkg._pkg_venv is False
+
+ with pytest.raises(Exception):
+ pkg.build_new_virtualenv()
def test_package():
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 5
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"tests/pksetup_data/pksetupunit1/requirements.txt",
"tests/pksetup_data/pksetupunit2/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | boto3==1.1.4
botocore==1.2.11
certifi @ file:///croot/certifi_1671487769961/work/certifi
coverage==7.2.7
distlib==0.3.9
docutils==0.20.1
exceptiongroup==1.2.2
execnet==2.0.2
filelock==3.12.2
futures==2.2.0
importlib-metadata==6.7.0
iniconfig==2.0.0
jmespath==0.10.0
-e git+https://github.com/rackerlabs/lambda-uploader.git@c40923a6982a0a3d4fd41b135a4f9b7e97b74f90#egg=lambda_uploader
packaging==24.0
platformdirs==4.0.0
pluggy==1.2.0
pytest==7.4.4
pytest-asyncio==0.21.2
pytest-cov==4.1.0
pytest-mock==3.11.1
pytest-xdist==3.5.0
python-dateutil==2.9.0.post0
six==1.17.0
tomli==2.0.1
typing_extensions==4.7.1
virtualenv==20.26.6
zipp==3.15.0
| name: lambda-uploader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.1.4
- botocore==1.2.11
- coverage==7.2.7
- distlib==0.3.9
- docutils==0.20.1
- exceptiongroup==1.2.2
- execnet==2.0.2
- filelock==3.12.2
- futures==2.2.0
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- jmespath==0.10.0
- packaging==24.0
- platformdirs==4.0.0
- pluggy==1.2.0
- pytest==7.4.4
- pytest-asyncio==0.21.2
- pytest-cov==4.1.0
- pytest-mock==3.11.1
- pytest-xdist==3.5.0
- python-dateutil==2.9.0.post0
- six==1.17.0
- tomli==2.0.1
- typing-extensions==4.7.1
- virtualenv==20.26.6
- zipp==3.15.0
prefix: /opt/conda/envs/lambda-uploader
| [
"test/test_package.py::test_omit_virtualenv"
]
| [
"test/test_package.py::test_install_requirements",
"test/test_package.py::test_bad_existing_virtualenv"
]
| [
"test/test_package.py::test_package_zip_location",
"test/test_package.py::test_package_clean_workspace",
"test/test_package.py::test_prepare_workspace",
"test/test_package.py::test_default_virtualenv",
"test/test_package.py::test_existing_virtualenv",
"test/test_package.py::test_package"
]
| []
| Apache License 2.0 | 305 | [
"README.rst",
"lambda_uploader/shell.py",
"example/lambda.json",
"README.md",
"lambda_uploader/package.py"
]
| [
"README.rst",
"lambda_uploader/shell.py",
"example/lambda.json",
"README.md",
"lambda_uploader/package.py"
]
|
|
mozilla__puente-47 | 4208bed33250cd3c9d8dacdc73dbc274ce21c85d | 2015-11-20 21:37:58 | 4208bed33250cd3c9d8dacdc73dbc274ce21c85d | diff --git a/README.rst b/README.rst
index eab331f..8bafb63 100644
--- a/README.rst
+++ b/README.rst
@@ -12,6 +12,8 @@ using Jinja2 templates.
* merge command that merges new strings from a ``.pot`` file into locale ``.po``
files
* code to collapse whitespace for Jinja2's trans block
+* add pgettext and npgettext to template environment and they correctly
+ escape things and work the same way as Jinja2's newstyle gettext
* configured using Django settings
* solid documentation
* solid tests
diff --git a/docs/goals.rst b/docs/goals.rst
index b26bd8e..b57553a 100644
--- a/docs/goals.rst
+++ b/docs/goals.rst
@@ -33,7 +33,9 @@ Puente does three nice things:
1. makes it easy to migrate from Tower to something you can use with Django 1.8
2. collapses whitespace in Jinja2 trans blocks
-3. pulls bits from Django settings to configure extraction (e.g. Jinja2
+3. adds pgettext and npgettext to template environment that work like Jinja2's
+ newstyle gettext
+4. pulls bits from Django settings to configure extraction (e.g. Jinja2
extensions)
If you don't care about any of those things, go use Babel's pybabel command and
@@ -75,9 +77,11 @@ What's different between Tower and Puente?
indentation of the HTML template. That stinks because translators have to go
through and fix all the translations.
-3. Tower had a bunch of code to support msgctxt in extraction and gettext
- calls, but Puente relies on Django's pgettext functions and Babel's
- msgctxt support and that works super.
+3. Tower had a bunch of code to support msgctxt in extraction and gettext calls,
+ but Puente relies on Django's pgettext functions and Babel's msgctxt support
+ and that works super... except in Jinja2 templates. Puente adds pgettext and
+ npgettext to the template environment and they work just like Jinja2's
+ newstyle gettext.
4. Tower had its own gettext and ngettext that marked output as safe, but Puente
drops that because it's unneeded if you're using Jinja2's newstyle gettext
@@ -120,7 +124,11 @@ We need to do the following before we can end Puente:
https://github.com/mitsuhiko/jinja2/issues/504
-2. Puente's extract command should work more like Babel's pybabel extract
+2. IN PROGRESS: Jinja2 needs to support pgettext/npgettext in templates.
+
+ https://github.com/mitsuhiko/jinja2/issues/441
+
+3. Puente's extract command should work more like Babel's pybabel extract
command.
The way forward is to phase Puente out for pybabel. In order to make that
@@ -128,11 +136,10 @@ We need to do the following before we can end Puente:
This should probably be broken up into more steps as we discover differences.
-3. Ditch Puente's merge for pybabel's update?
+4. Ditch Puente's merge for pybabel's update?
-4. Need a nice way to use Django settings for pybabel configuration. For
+5. Need a nice way to use Django settings for pybabel configuration. For
example, I'd rather not have to define the list of Jinja2 extensions to use
in two places.
-5. Is there anything else?
-
+6. Is there anything else?
diff --git a/docs/installation.rst b/docs/installation.rst
index 4e9996c..83ba771 100644
--- a/docs/installation.rst
+++ b/docs/installation.rst
@@ -342,6 +342,24 @@ Note that ``BASE_DIR`` is the path to the project root. It's in the
}
+Templates
+=========
+
+We hope you're using Jinja2's newstyle gettext and ``autoescape = True``. If
+that's the case, then these docs will help:
+
+* `Jinja2 template i18n docs <http://jinja.pocoo.org/docs/dev/templates/#i18n>`_
+* `Jinja2 template newstyle docs <http://jinja.pocoo.org/docs/dev/extensions/#newstyle-gettext>`_
+
+Further, Puente adds support for ``pgettext`` and ``npgettext`` in templates::
+
+ {{ pgettext("some context", "message string") }}
+ {{ npgettext("some context", "singular message", "plural message", 5) }}
+
+
+FIXME: Expand on this and talk about escaping and ``|safe``.
+
+
Extract and merge usage
=======================
diff --git a/docs/migratingfromtower.rst b/docs/migratingfromtower.rst
index 891c07b..04dc570 100644
--- a/docs/migratingfromtower.rst
+++ b/docs/migratingfromtower.rst
@@ -182,6 +182,7 @@ to do something like the following to switch to Puente.
('**.js', 'javascript')
]
}
+ STANDALONE_DOMAINS = ['django']
The equivalent Puente configuration is something like this:
diff --git a/puente/commands.py b/puente/commands.py
index 24f07fe..16e5939 100644
--- a/puente/commands.py
+++ b/puente/commands.py
@@ -1,4 +1,5 @@
import os
+import tempfile
from subprocess import PIPE, Popen, call
from tempfile import TemporaryFile
@@ -11,6 +12,9 @@ from django.core.management.base import CommandError
from puente.utils import monkeypatch_i18n
+DEFAULT_DOMAIN_VALUE = 'all'
+
+
def generate_options_map():
"""Generate an ``options_map` to pass to ``extract_from_dir``
@@ -64,15 +68,16 @@ def generate_options_map():
)
-def extract_command(outputdir, domain_methods, text_domain, keywords,
- comment_tags, base_dir, project, version,
- msgid_bugs_address):
+def extract_command(domain, outputdir, domain_methods, standalone_domains,
+ text_domain, keywords, comment_tags, base_dir,
+ project, version, msgid_bugs_address):
"""Extracts strings into .pot files
:arg domain: domains to generate strings for or 'all' for all domains
:arg outputdir: output dir for .pot files; usually
locale/templates/LC_MESSAGES/
:arg domain_methods: DOMAIN_METHODS setting
+ :arg standalone_domains: STANDALONE_DOMAINS setting
:arg text_domain: TEXT_DOMAIN settings
:arg keywords: KEYWORDS setting
:arg comment_tags: COMMENT_TAGS setting
@@ -91,7 +96,11 @@ def extract_command(outputdir, domain_methods, text_domain, keywords,
print('Creating output dir %s ...' % outputdir)
os.makedirs(outputdir)
- domains = domain_methods.keys()
+ # Figure out what domains to extract
+ if domain == DEFAULT_DOMAIN_VALUE:
+ domains = domain_methods.keys()
+ else:
+ domains = [domain]
def callback(filename, method, options):
if method != 'ignore':
@@ -126,15 +135,49 @@ def extract_command(outputdir, domain_methods, text_domain, keywords,
with open(os.path.join(outputdir, '%s.pot' % domain), 'wb') as fp:
write_po(fp, catalog, width=80)
+ not_standalone_domains = [
+ dom for dom in domains
+ if dom not in standalone_domains
+ ]
+
+ pot_files = []
+ for dom in not_standalone_domains:
+ pot_files.append(os.path.join(outputdir, '%s.pot' % dom))
+
+ if len(pot_files) > 1:
+ pot_file = text_domain + '.pot'
+ print('Concatenating the non-standalone domains into %s' % pot_file)
+
+ final_out = os.path.join(outputdir, pot_file)
+
+ # We add final_out back on because msgcat will combine all
+ # specified files. We'll redirect everything back in to
+ # final_out in a minute.
+ pot_files.append(final_out)
+
+ meltingpot = tempfile.TemporaryFile()
+ p1 = Popen(['msgcat'] + pot_files, stdout=meltingpot)
+ p1.communicate()
+ meltingpot.seek(0)
+
+ # w+ truncates the file first
+ with open(final_out, 'w+') as final:
+ final.write(meltingpot.read())
+
+ meltingpot.close()
+
+ for dom in not_standalone_domains:
+ os.remove(os.path.join(outputdir, '%s.pot' % dom))
+
print('Done')
-def merge_command(create, base_dir, domain_methods, languages):
+def merge_command(create, base_dir, standalone_domains, languages):
"""
:arg create: whether or not to create directories if they don't
exist
:arg base_dir: BASE_DIR setting
- :arg domain_methods: DOMAIN_METHODS setting
+ :arg standalone_domains: STANDALONE_DOMAINS setting
:arg languages: LANGUAGES setting
"""
@@ -166,9 +209,8 @@ def merge_command(create, base_dir, domain_methods, languages):
if not os.path.exists(d):
os.makedirs(d)
- domains = domain_methods.keys()
- for domain in domains:
- print 'Merging %s strings to each locale...' % domain
+ for domain in standalone_domains:
+ print('Merging %s strings to each locale...' % domain)
domain_pot = os.path.join(locale_dir, 'templates', 'LC_MESSAGES',
'%s.pot' % domain)
if not os.path.isfile(domain_pot):
diff --git a/puente/ext.py b/puente/ext.py
index 8f3e7a7..ea6714b 100644
--- a/puente/ext.py
+++ b/puente/ext.py
@@ -1,8 +1,28 @@
+from django.utils.translation import pgettext as pgettext_real, npgettext as npgettext_real
+
from jinja2.ext import InternationalizationExtension
+from jinja2.utils import contextfunction, Markup
from puente.utils import collapse_whitespace
+@contextfunction
+def pgettext(__context, context, message, **variables):
+ rv = pgettext_real(context, message)
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv % variables
+
+
+@contextfunction
+def npgettext(__context, context, singular, plural, number, **variables):
+ variables.setdefault('num', number)
+ rv = npgettext_real(context, singular, plural, number)
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv % variables
+
+
class PuenteI18nExtension(InternationalizationExtension):
"""Provides whitespace collapsing trans behavior
@@ -12,6 +32,11 @@ class PuenteI18nExtension(InternationalizationExtension):
Jinja2 templates.
"""
+ def __init__(self, environment):
+ super(PuenteI18nExtension, self).__init__(environment)
+ environment.globals['pgettext'] = pgettext
+ environment.globals['npgettext'] = npgettext
+
def _parse_block(self, parser, allow_pluralize):
parse_block = InternationalizationExtension._parse_block
ref, buffer = parse_block(self, parser, allow_pluralize)
diff --git a/puente/management/commands/extract.py b/puente/management/commands/extract.py
index c688034..de8a5c2 100644
--- a/puente/management/commands/extract.py
+++ b/puente/management/commands/extract.py
@@ -3,7 +3,7 @@ from optparse import make_option
from django.core.management.base import BaseCommand
-from puente.commands import extract_command
+from puente.commands import DEFAULT_DOMAIN_VALUE, extract_command
from puente.settings import get_setting
@@ -11,6 +11,15 @@ class Command(BaseCommand):
help = 'Extracts strings for translation.'
option_list = BaseCommand.option_list + (
+ make_option(
+ '--domain', '-d', default=DEFAULT_DOMAIN_VALUE,
+ dest='domain',
+ help=(
+ 'The domain of the message files. If "all" '
+ 'everything will be extracted and combined into '
+ '%s.pot. (default: %%default).' % get_setting('TEXT_DOMAIN')
+ )
+ ),
make_option(
'--output-dir', '-o',
default=os.path.join(get_setting('BASE_DIR'), 'locale',
@@ -20,15 +29,17 @@ class Command(BaseCommand):
'The directory where extracted files will be placed. '
'(Default: %default)'
)
- ),
+ )
)
def handle(self, *args, **options):
return extract_command(
# Command line arguments
+ domain=options.get('domain'),
outputdir=options.get('outputdir'),
# From settings.py
domain_methods=get_setting('DOMAIN_METHODS'),
+ standalone_domains=get_setting('STANDALONE_DOMAINS'),
text_domain=get_setting('TEXT_DOMAIN'),
keywords=get_setting('KEYWORDS'),
comment_tags=get_setting('COMMENT_TAGS'),
diff --git a/puente/management/commands/merge.py b/puente/management/commands/merge.py
index 74de7d9..b34c2e4 100644
--- a/puente/management/commands/merge.py
+++ b/puente/management/commands/merge.py
@@ -38,7 +38,7 @@ class Command(BaseCommand):
return merge_command(
create=options.get('create'),
base_dir=get_setting('BASE_DIR'),
- domain_methods=get_setting('DOMAIN_METHODS'),
+ standalone_domains=get_setting('STANDALONE_DOMAINS'),
languages=getattr(settings, 'LANGUAGES', [])
)
diff --git a/puente/settings.py b/puente/settings.py
index 55609ca..edf5321 100644
--- a/puente/settings.py
+++ b/puente/settings.py
@@ -7,6 +7,12 @@ TEXT_DOMAIN = 'django'
# Keywords indicating gettext calls
KEYWORDS = generate_keywords()
+# By default, all the domains you speficy will be merged into one big django.po
+# file. If you want to separate a domain from the main .po file, specify it in
+# this list. Make sure to include TEXT_DOMAIN in this list, even if you have
+# other .po files you're generating
+STANDALONE_DOMAINS = [TEXT_DOMAIN]
+
# Prefixes that indicate a comment tag intended for localizers
COMMENT_TAGS = ['L10n:', 'L10N:', 'l10n:', 'l10N:', 'Translators:']
| pgettext for templates
Templates have `_` (alias for gettext), `gettext` and `ngettext` available. These are handled by Jinja2 contextfunctions which call the underlying installed gettext/ngettext callables, get strings back and then wrap in a `Markup`.
https://github.com/mitsuhiko/jinja2/blob/master/jinja2/ext.py#L135
Yay! That's great!
However, there's no pgettext and no way that I can see to pass in a msgctxt.
This issue covers figuring out what to do about that. At the moment, I'm leaning towards adding it as a global to our `PuenteI18nExtension` for the short-term.
Long-term, we should submit a PR for this issue: https://github.com/mitsuhiko/jinja2/issues/441 | mozilla/puente | diff --git a/tests/test_ext.py b/tests/test_ext.py
index c6ebc92..aa912f3 100644
--- a/tests/test_ext.py
+++ b/tests/test_ext.py
@@ -107,6 +107,74 @@ class TestPuenteI18nExtension:
)
assert render(tmpl) == '<b>multiple <i>bar</i></b>'
+ def test_pgettext(self):
+ tmpl = '{{ pgettext("context", "message") }}'
+ assert render(tmpl) == 'message'
+
+ def test_pgettext_is_safe(self):
+ tmpl = '{{ pgettext("context", "<b>foo</b>") }}'
+ assert render(tmpl) == '<b>foo</b>'
+
+ def test_pgettext_variable_value_notsafe(self):
+ tmpl = '{{ pgettext("context", "<b>%(foo)s</b>", foo="<i>bar</i>") }}'
+ assert render(tmpl) == '<b><i>bar</i></b>'
+
+ def test_pgettext_variable_value_marked_safe_is_safe(self):
+ tmpl = '{{ pgettext("context", "<b>%(foo)s</b>", foo="<i>bar</i>"|safe) }}'
+ assert render(tmpl) == '<b><i>bar</i></b>'
+
+ def test_pgettext_variable_values_autoescape_false(self):
+ tmpl = (
+ '{% autoescape False %}'
+ '{{ pgettext("context", "<b>%(foo)s</b>", foo="<i>bar</i>") }}'
+ '{% endautoescape %}'
+ )
+ assert render(tmpl) == '<b><i>bar</i></b>'
+
+ def test_npgettext(self):
+ tmpl = '{{ npgettext("context", "sing", "plur", 1) }}'
+ assert render(tmpl) == "sing"
+ tmpl = '{{ npgettext("context", "sing", "plur", 2) }}'
+ assert render(tmpl) == "plur"
+
+ def test_npgettext_is_safe(self):
+ tmpl = '{{ npgettext("context", "<b>sing</b>", "<b>plur</b>", 1) }}'
+ assert render(tmpl) == "<b>sing</b>"
+ tmpl = '{{ npgettext("context", "<b>sing</b>", "<b>plur</b>", 2) }}'
+ assert render(tmpl) == "<b>plur</b>"
+
+ def test_npgettext_variable_num(self):
+ tmpl = '{{ npgettext("context", "<b>sing %(num)s</b>", "<b>plur %(num)s</b>", 1) }}'
+ assert render(tmpl) == "<b>sing 1</b>"
+ tmpl = '{{ npgettext("context", "<b>sing %(num)s</b>", "<b>plur %(num)s</b>", 2) }}'
+ assert render(tmpl) == "<b>plur 2</b>"
+
+ def test_npgettext_variable_values_notsafe(self):
+ tmpl = '{{ npgettext("context", "<b>sing %(foo)s</b>", "<b>plur %(foo)s</b>", 1, foo="<i>bar</i>") }}'
+ assert render(tmpl) == '<b>sing <i>bar</i></b>'
+ tmpl = '{{ npgettext("context", "<b>sing %(foo)s</b>", "<b>plur %(foo)s</b>", 2, foo="<i>bar</i>") }}'
+ assert render(tmpl) == '<b>plur <i>bar</i></b>'
+
+ def test_npgettext_variable_value_marked_safe_is_safe(self):
+ tmpl = '{{ npgettext("context", "<b>sing %(foo)s</b>", "<b>plur %(foo)s</b>", 1, foo="<i>bar</i>"|safe) }}'
+ assert render(tmpl) == '<b>sing <i>bar</i></b>'
+ tmpl = '{{ npgettext("context", "<b>sing %(foo)s</b>", "<b>plur %(foo)s</b>", 2, foo="<i>bar</i>"|safe) }}'
+ assert render(tmpl) == '<b>plur <i>bar</i></b>'
+
+ def test_npgettext_variable_values_autoescape_false(self):
+ tmpl = (
+ '{% autoescape False %}'
+ '{{ npgettext("context", "<b>sing %(foo)s</b>", "<b>plur %(foo)s</b>", 1, foo="<i>bar</i>") }}'
+ '{% endautoescape %}'
+ )
+ assert render(tmpl) == '<b>sing <i>bar</i></b>'
+ tmpl = (
+ '{% autoescape False %}'
+ '{{ npgettext("context", "<b>sing %(foo)s</b>", "<b>plur %(foo)s</b>", 2, foo="<i>bar</i>") }}'
+ '{% endautoescape %}'
+ )
+ assert render(tmpl) == '<b>plur <i>bar</i></b>'
+
def test_trans(self):
tmpl = '<div>{% trans %}puente rules!{% endtrans %}</div>'
assert render(tmpl) == '<div>puente rules!</div>'
diff --git a/tests/test_extract.py b/tests/test_extract.py
index bc91f54..0a5a4c5 100644
--- a/tests/test_extract.py
+++ b/tests/test_extract.py
@@ -41,6 +41,7 @@ class TestExtractCommand:
# Extract
extract_command(
+ domain='all',
outputdir=str(tmpdir),
domain_methods={
'django': [
@@ -48,6 +49,7 @@ class TestExtractCommand:
('*.html', 'jinja2'),
]
},
+ standalone_domains=puente_settings.STANDALONE_DOMAINS,
text_domain=puente_settings.TEXT_DOMAIN,
keywords=puente_settings.KEYWORDS,
comment_tags=puente_settings.COMMENT_TAGS,
@@ -81,6 +83,7 @@ class TestExtractCommand:
def test_header(self, tmpdir):
# Extract
extract_command(
+ domain='all',
outputdir=str(tmpdir),
domain_methods={
'django': [
@@ -88,6 +91,7 @@ class TestExtractCommand:
('*.html', 'jinja2'),
]
},
+ standalone_domains=puente_settings.STANDALONE_DOMAINS,
text_domain=puente_settings.TEXT_DOMAIN,
keywords=puente_settings.KEYWORDS,
comment_tags=puente_settings.COMMENT_TAGS,
@@ -139,6 +143,7 @@ class TestExtractCommand:
# Extract
extract_command(
+ domain='all',
outputdir=str(tmpdir),
domain_methods={
'django': [
@@ -146,6 +151,7 @@ class TestExtractCommand:
('*.html', 'jinja2'),
]
},
+ standalone_domains=puente_settings.STANDALONE_DOMAINS,
text_domain=puente_settings.TEXT_DOMAIN,
keywords=puente_settings.KEYWORDS,
comment_tags=puente_settings.COMMENT_TAGS,
@@ -187,6 +193,7 @@ class TestExtractCommand:
# Extract
extract_command(
+ domain='all',
outputdir=str(tmpdir),
domain_methods={
'django': [
@@ -194,6 +201,7 @@ class TestExtractCommand:
('*.html', 'jinja2'),
]
},
+ standalone_domains=puente_settings.STANDALONE_DOMAINS,
text_domain=puente_settings.TEXT_DOMAIN,
keywords=puente_settings.KEYWORDS,
comment_tags=puente_settings.COMMENT_TAGS,
@@ -237,6 +245,7 @@ class TestExtractCommand:
# Extract
extract_command(
+ domain='all',
outputdir=str(tmpdir),
domain_methods={
'django': [
@@ -244,6 +253,7 @@ class TestExtractCommand:
('*.html', 'jinja2'),
]
},
+ standalone_domains=puente_settings.STANDALONE_DOMAINS,
text_domain=puente_settings.TEXT_DOMAIN,
keywords=puente_settings.KEYWORDS,
comment_tags=puente_settings.COMMENT_TAGS,
diff --git a/tests/test_merge.py b/tests/test_merge.py
index 29e1456..0278ba1 100644
--- a/tests/test_merge.py
+++ b/tests/test_merge.py
@@ -8,6 +8,7 @@ from django.core.management import CommandError
from django.test import TestCase
from puente.commands import merge_command
+from puente.settings import get_setting
class TestManageMerge(TestCase):
@@ -66,12 +67,7 @@ class TestMergecommand:
merge_command(
create=True,
base_dir=str(tmpdir),
- domain_methods={
- 'django': [
- ('*.py', 'python'),
- ('*.html', 'jinja2'),
- ]
- },
+ standalone_domains=get_setting('STANDALONE_DOMAINS'),
languages=['de', 'en-US', 'fr']
)
@@ -84,11 +80,6 @@ class TestMergecommand:
merge_command(
create=True,
base_dir=str(tmpdir),
- domain_methods={
- 'django': [
- ('*.py', 'python'),
- ('*.html', 'jinja2'),
- ]
- },
+ standalone_domains=get_setting('STANDALONE_DOMAINS'),
languages=['de', 'en-US', 'fr']
)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 9
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-pythonpath",
"pytest-django"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt",
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
asgiref==3.8.1
attrs==25.3.0
babel==2.17.0
backports.tarfile==1.2.0
build==1.2.2.post1
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
check-manifest==0.50
colorama==0.4.6
cryptography==44.0.2
distlib==0.3.9
Django==4.2.20
django-jinja==2.11.0
docutils==0.21.2
filelock==3.18.0
id==1.5.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
Jinja2==3.1.6
keyring==25.6.0
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mdurl==0.1.2
more-itertools==10.6.0
nh3==0.2.21
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
-e git+https://github.com/mozilla/puente.git@4208bed33250cd3c9d8dacdc73dbc274ce21c85d#egg=puente
py==1.11.0
pycparser==2.22
Pygments==2.19.1
pyproject-api==1.9.0
pyproject_hooks==1.2.0
pytest==6.2.5
pytest-django==4.5.2
pytest-pythonpath==0.7.4
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
sqlparse==0.5.3
swebench_matterhorn @ file:///swebench_matterhorn
toml==0.10.2
tomli==2.2.1
tox==4.25.0
twine==6.1.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: puente
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- asgiref==3.8.1
- attrs==25.3.0
- babel==2.17.0
- backports-tarfile==1.2.0
- build==1.2.2.post1
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- check-manifest==0.50
- colorama==0.4.6
- cryptography==44.0.2
- distlib==0.3.9
- django==4.2.20
- django-jinja==2.11.0
- docutils==0.21.2
- filelock==3.18.0
- id==1.5.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==25.6.0
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- mdurl==0.1.2
- more-itertools==10.6.0
- nh3==0.2.21
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- py==1.11.0
- pycparser==2.22
- pygments==2.19.1
- pyproject-api==1.9.0
- pyproject-hooks==1.2.0
- pytest==6.2.5
- pytest-django==4.5.2
- pytest-pythonpath==0.7.4
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sqlparse==0.5.3
- swebench-matterhorn==0.0.0
- toml==0.10.2
- tomli==2.2.1
- tox==4.25.0
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/puente
| [
"tests/test_merge.py::TestMergecommand::test_missing_pot_file"
]
| [
"tests/test_extract.py::TestManageExtract::test_help",
"tests/test_merge.py::TestManageMerge::test_help",
"tests/test_ext.py::TestPuenteI18nExtension::test_gettext",
"tests/test_ext.py::TestPuenteI18nExtension::test_gettext_is_safe",
"tests/test_ext.py::TestPuenteI18nExtension::test_gettext_variable_values_notsafe",
"tests/test_ext.py::TestPuenteI18nExtension::test_gettext_variable_values_autoescape_false",
"tests/test_ext.py::TestPuenteI18nExtension::test_gettext_variable_values_marked_safe_are_safe",
"tests/test_ext.py::TestPuenteI18nExtension::test_gettext_format_notsafe",
"tests/test_ext.py::TestPuenteI18nExtension::test_gettext_format_autoescape_false",
"tests/test_ext.py::TestPuenteI18nExtension::test_ngettext",
"tests/test_ext.py::TestPuenteI18nExtension::test_ngettext_is_safe",
"tests/test_ext.py::TestPuenteI18nExtension::test_ngettext_variable_num",
"tests/test_ext.py::TestPuenteI18nExtension::test_ngettext_variable_values_notsafe",
"tests/test_ext.py::TestPuenteI18nExtension::test_ngettext_variable_value_marked_safe_is_safe",
"tests/test_ext.py::TestPuenteI18nExtension::test_ngettext_variable_values_autoescape_false",
"tests/test_ext.py::TestPuenteI18nExtension::test_pgettext",
"tests/test_ext.py::TestPuenteI18nExtension::test_pgettext_is_safe",
"tests/test_ext.py::TestPuenteI18nExtension::test_pgettext_variable_value_notsafe",
"tests/test_ext.py::TestPuenteI18nExtension::test_pgettext_variable_value_marked_safe_is_safe",
"tests/test_ext.py::TestPuenteI18nExtension::test_pgettext_variable_values_autoescape_false",
"tests/test_ext.py::TestPuenteI18nExtension::test_npgettext",
"tests/test_ext.py::TestPuenteI18nExtension::test_npgettext_is_safe",
"tests/test_ext.py::TestPuenteI18nExtension::test_npgettext_variable_num",
"tests/test_ext.py::TestPuenteI18nExtension::test_npgettext_variable_values_notsafe",
"tests/test_ext.py::TestPuenteI18nExtension::test_npgettext_variable_value_marked_safe_is_safe",
"tests/test_ext.py::TestPuenteI18nExtension::test_npgettext_variable_values_autoescape_false",
"tests/test_ext.py::TestPuenteI18nExtension::test_trans",
"tests/test_ext.py::TestPuenteI18nExtension::test_trans_whitespace",
"tests/test_ext.py::TestPuenteI18nExtension::test_trans_plural",
"tests/test_ext.py::TestPuenteI18nExtension::test_trans_interpolation",
"tests/test_ext.py::TestPuenteI18nExtension::test_trans_interpolation_with_autoescape_off",
"tests/test_ext.py::TestPuenteI18nExtension::test_trans_interpolation_and_safe",
"tests/test_ext.py::TestPuenteI18nExtension::test_trans_interpolation_and_safe_with_autoescape_off",
"tests/test_extract.py::TestExtractCommand::test_basic_extraction",
"tests/test_extract.py::TestExtractCommand::test_header",
"tests/test_extract.py::TestExtractCommand::test_whitespace_collapsing",
"tests/test_extract.py::TestExtractCommand::test_context",
"tests/test_extract.py::TestExtractCommand::test_plurals",
"tests/test_merge.py::TestMergecommand::test_basic"
]
| []
| []
| BSD 3-Clause "New" or "Revised" License | 306 | [
"README.rst",
"puente/settings.py",
"docs/installation.rst",
"docs/migratingfromtower.rst",
"puente/ext.py",
"puente/management/commands/merge.py",
"puente/management/commands/extract.py",
"docs/goals.rst",
"puente/commands.py"
]
| [
"README.rst",
"puente/settings.py",
"docs/installation.rst",
"docs/migratingfromtower.rst",
"puente/ext.py",
"puente/management/commands/merge.py",
"puente/management/commands/extract.py",
"docs/goals.rst",
"puente/commands.py"
]
|
|
twisted__tubes-32 | 188aad3c9caf5d07e894d1fda6fb5f41b8bfc41a | 2015-11-22 00:11:15 | 188aad3c9caf5d07e894d1fda6fb5f41b8bfc41a | diff --git a/.travis.yml b/.travis.yml
index 04f9ff1..6944375 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,7 +5,6 @@ env:
- secure: "CvFj8Df5OiDRrW7EsTGhkltdmNlYerx9hH/tSKxiNFVDBUUFaTN7rUr7kWcOKchzerGwk7zjZ4SRXyoSCs+Srht6GZxWHkNROwKpp5Xvf5clbLXbp7GO1X/L5rLgrXpGwtkhgNuHx0X2IUCDHUQAUSumPgZcNFu3emgVxEqabN0="
matrix:
- TOX_ENV=lint
- - TOX_ENV=py26
- TOX_ENV=py27
- TOX_ENV=pypy
- TOX_ENV=docs
diff --git a/tubes/fan.py b/tubes/fan.py
index 6322c99..67ad05f 100644
--- a/tubes/fan.py
+++ b/tubes/fan.py
@@ -355,7 +355,7 @@ class _OutDrain(object):
self._pause = None
if p is not None:
p.unpause()
- self.fount = fount
+ beginFlowingFrom(self, fount)
def receive(self, item):
diff --git a/tubes/routing.py b/tubes/routing.py
index 7682fe5..725d868 100644
--- a/tubes/routing.py
+++ b/tubes/routing.py
@@ -1,4 +1,4 @@
-# -*- test-case-name: tubes.test.test_fan -*-
+# -*- test-case-name: tubes.test.test_routing -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
@@ -8,87 +8,97 @@ an appropriate output, stripping the addressing information off.
Use like so::
- from tubes.routing import Router, Routed, to
+ from tubes.tube import receiver, series
+ from tubes.routing import Router, to
- aRouter = Router(int)
+ aRouter = Router()
+ evens = aRouter.newRoute()
+ odds = aRouter.newRoute()
- evens, evenFount = aRouter.newRoute()
- odds, oddFount = aRouter.newRoute()
+ @receiver()
+ def evenOdd(item):
+ if (item % 2) == 0:
+ yield to(evens, item)
+ else:
+ yield to(odds, item)
- @tube
- class EvenOdd(object):
- outputType = Routed(int)
- def received(self, item):
- if (item % 2) == 0:
- yield to(evens, item)
- else:
- yield to(odds, item)
+ numbers.flowTo(series(evenOdd, aRouter.drain))
- numbers.flowTo(aRouter)
+Assuming C{numbers} is a fount of counting integers, this creates two founts:
+C{evens} and C{odds}, whose outputs are even and odd integers, respectively.
+Note that C{evenOdd} also uses C{evens} and C{odds} as I{addresses}; the first
+argument to L{to} says I{where} the value will go.
-This creates a fount in evenFount and oddFount, which each have an outputType
-of "int".
-
-Why do this rather than just having C{EvenOdd} just call methods directly based
+Why do this rather than just having C{evenOdd} just call methods directly based
on whether a number is even or odd?
By using a L{Router}, flow control relationships are automatically preserved by
-the same mechanism that tubes usually use. The distinct drains of evenFount
-and oddFount can both be independently paused, and the pause state will be
+the same mechanism that tubes usually use. The distinct drains of C{evens} and
+C{odds} can both independently pause their founts, and the pause state will be
propagated to the "numbers" fount. If you want to send on outputs to multiple
drains which may have complex flow-control interrelationships, you can't do
that by calling the C{receive} method directly since any one of those methods
-might reentrantly pause you.
+might reentrantly pause its fount.
"""
-from .tube import tube, receiver
+from zope.interface import implementer
+
+from .tube import receiver, series
+from .itube import IDrain
from .fan import Out
+from .kit import beginFlowingFrom
if 0:
- from zope.interface.interfaces import IInterface
- IInterface
+ from zope.interface.interfaces import ISpecification
+ ISpecification
+__all__ = [
+ "Router",
+ "Routed",
+ "to",
+]
class Routed(object):
"""
- A L{Routed} is an interface describing another interface that has been
- wrapped in a C{to}. As such, it is an incomplete implementation of
- L{IInterface}.
+ A L{Routed} is a specification describing another specification that has
+ been wrapped in a C{to}. As such, it is an incomplete implementation of
+ L{ISpecification}.
"""
- def __init__(self, interface=None):
+ def __init__(self, specification=None):
"""
- Derive a L{Routed} version of C{interface}.
+ Derive a L{Routed} version of C{specification}.
- @param interface: the interface that will be provided by the C{what}
- attribute of providers of this interface.
- @type interface: L{IInterface}
+ @param specification: the specification that will be provided by the
+ C{what} attribute of providers of this specification.
+ @type specification: L{ISpecification}
"""
- self.interface = interface
+ self.specification = specification
def isOrExtends(self, other):
"""
Is this L{Routed} substitutable for the given specification?
- @param other: Another L{Routed} or interface.
- @type other: L{IInterface}
+ @param other: Another L{Routed} or specification.
+ @type other: L{ISpecification}
@return: L{True} if so, L{False} if not.
"""
if not isinstance(other, Routed):
return False
- if self.interface is None or other.interface is None:
+ if self.specification is None or other.specification is None:
return True
- return self.interface.isOrExtends(other.interface)
+ return self.specification.isOrExtends(other.specification)
def providedBy(self, instance):
"""
Is this L{Routed} provided by a particular value?
- @param instance: an object which may or may not provide this interface.
+ @param instance: an object which may or may not provide this
+ specification.
@type instance: L{object}
@return: L{True} if so, L{False} if not.
@@ -96,9 +106,27 @@ class Routed(object):
"""
if not isinstance(instance, _To):
return False
- if self.interface is None:
+ if self.specification is None:
return True
- return self.interface.providedBy(instance._what)
+ return self.specification.providedBy(instance._what)
+
+
+ def __eq__(self, other):
+ """
+ Routed(X) compares equal to Routed(X).
+ """
+ if not isinstance(other, Routed):
+ return NotImplemented
+ return self.specification == other.specification
+
+
+ def __ne__(self, other):
+ """
+ Routed(X) compares unequal to Routed(Y).
+ """
+ if not isinstance(other, Routed):
+ return NotImplemented
+ return self.specification != other.specification
@@ -119,6 +147,13 @@ class _To(object):
self._what = what
+ def __repr__(self):
+ """
+ @return: an explanatory string.
+ """
+ return "to({!r}, {!r})".format(self._where, self._what)
+
+
def to(where, what):
"""
@@ -138,7 +173,6 @@ def to(where, what):
-@tube
class Router(object):
"""
A drain with multiple founts that consumes L{Routed}C{(IX)} from its input
@@ -154,10 +188,21 @@ class Router(object):
def __init__(self, outputType=None):
self._out = Out()
self._outputType = outputType
+ @implementer(IDrain)
+ class NullDrain(object):
+ inputType = outputType
+ fount = None
+ def flowingFrom(self, fount):
+ beginFlowingFrom(self, fount)
+ def receive(self, item):
+ pass
+ def flowStopped(self, reason):
+ pass
+ self.newRoute().flowTo(NullDrain())
self.drain = self._out.drain
- def newRoute(self):
+ def newRoute(self, name=None):
"""
Create a new route.
@@ -168,13 +213,18 @@ class Router(object):
to L{Router.drain} should be a L{to} constructed with a value returned
from this method as the "where" parameter.
+ @param name: Give the route a name for debugging purposes.
+ @type name: native L{str}
+
@return: L{IFount}
"""
@receiver(inputType=Routed(self._outputType),
- outputType=self._outputType)
+ outputType=self._outputType,
+ name=name)
def received(item):
- if isinstance(item, to):
- if item._where is fount:
- yield item._what
- fount = self._out.newFount().flowTo(received)
+ if not isinstance(item, _To):
+ raise TypeError("{0} is not routed".format(item))
+ if item._where is fount:
+ yield item._what
+ fount = self._out.newFount().flowTo(series(received))
return fount
diff --git a/tubes/tube.py b/tubes/tube.py
index fe23b49..39de857 100644
--- a/tubes/tube.py
+++ b/tubes/tube.py
@@ -123,17 +123,20 @@ class _Tubule(object):
"""
A tube created for the C{@tube} decorator.
"""
- def __init__(self, inputType, outputType, received):
+ def __init__(self, inputType, outputType, received, name):
"""
@param inputType: An interface for the input type.
@param outputType: an interface for the output type.
@param received: a callable to implement C{received}.
+
+ @param name: a string describing this L{_Tubule}.
"""
self.inputType = inputType
self.outputType = outputType
self.received = received
+ self._name = name
def started(self):
@@ -156,8 +159,15 @@ class _Tubule(object):
return ()
+ def __repr__(self):
+ """
+ @return: this L{_Tubule}'s name.
+ """
+ return self._name
-def receiver(inputType=None, outputType=None):
+
+
+def receiver(inputType=None, outputType=None, name=None):
"""
Decorator for a stateless function which receives inputs.
@@ -171,12 +181,16 @@ def receiver(inputType=None, outputType=None):
@param outputType: The C{outputType} attribute of the resulting L{ITube}.
+ @param name: a name describing the tubule for it to show as in a C{repr}.
+ @type name: native L{str}
+
@return: a stateless tube with the decorated method as its C{received}
method.
@rtype: L{ITube}
"""
def decorator(decoratee):
- return _Tubule(inputType, outputType, decoratee)
+ return _Tubule(inputType, outputType, decoratee,
+ name if name is not None else decoratee.__name__)
return decorator
| tubes Router is broken
Besides the broken docstring for the Router as reported in #21 the Router also generally appears not to work; or am I missing something simple here?
see my attempt at creating unit tests for Router:
https://github.com/david415/tubes/tree/add_routing_tests.0
https://github.com/david415/tubes/blob/add_routing_tests.0/tubes/test/test_routing.py
When the Router's newRoute() method is called this results in:
```
$ trial tubes.test.test_routing
tubes.test.test_routing
TestBasicRouter
test_basic_router ... [ERROR]
===============================================================================
[ERROR]
Traceback (most recent call last):
File "/home/user/tubes/tubes/test/test_routing.py", line 64, in test_basic_router
evenOddTube.addRoutes()
File "/home/user/tubes/tubes/test/test_routing.py", line 41, in addRoutes
self.evenRoute = self.newRoute()
File "/home/user/tubes/tubes/routing.py", line 179, in newRoute
fount = self._out.newFount().flowTo(received)
File "/home/user/tubes/tubes/fan.py", line 253, in flowTo
return beginFlowingTo(self, drain)
File "/home/user/tubes/tubes/kit.py", line 104, in beginFlowingTo
return drain.flowingFrom(fount)
exceptions.AttributeError: '_Tubule' object has no attribute 'flowingFrom'
tubes.test.test_routing.TestBasicRouter.test_basic_router
-------------------------------------------------------------------------------
Ran 1 tests in 0.018s
FAILED (errors=1)
```
| twisted/tubes | diff --git a/tubes/test/test_routing.py b/tubes/test/test_routing.py
new file mode 100644
index 0000000..f0b4225
--- /dev/null
+++ b/tubes/test/test_routing.py
@@ -0,0 +1,117 @@
+# -*- test-case-name: tubes.test.test_routing -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Tests for L{tubes.routing}.
+"""
+
+from unittest import TestCase
+
+from ..routing import Router, to, Routed
+from ..tube import series, receiver
+from .util import FakeFount, FakeDrain, IFakeOutput, IFakeInput
+
+if 0:
+ # Names used by PyDoctor.
+ from ..itube import IFount
+ IFount
+
+
+
+class RouterTests(TestCase):
+ """
+ Tests for L{Router}.
+ """
+
+ def test_twoRoutes(self):
+ """
+ The L{IFount} feeding into a L{Router} may yield L{to} each route
+ returned from L{Router.newRoute}.
+ """
+ @receiver()
+ def chooser(item):
+ if item % 2:
+ yield to(odd, item)
+ else:
+ yield to(even, item)
+ router = Router()
+ even = router.newRoute("even")
+ evens = FakeDrain()
+ even.flowTo(evens)
+ odd = router.newRoute("odd")
+ odds = FakeDrain()
+ odd.flowTo(odds)
+ ff = FakeFount()
+ routeDrain = series(chooser, router.drain)
+ ff.flowTo(routeDrain)
+ for x in range(10):
+ ff.drain.receive(x)
+ self.assertEqual(odds.received, [1, 3, 5, 7, 9])
+ self.assertEqual(evens.received, [0, 2, 4, 6, 8])
+
+
+ def test_routeRepr(self):
+ """
+ It's useful to C{repr} a route for debugging purposes; if we give it a
+ name, its C{repr} will contain that name.
+ """
+ router = Router()
+ route = router.newRoute("hello")
+ self.assertTrue("hello" in repr(route))
+
+
+ def test_defaultTypeChecking(self):
+ """
+ L{Router}'s drain accepts only L{Routed} objects; if no other type is
+ specified, L{Routed}C{(None)}.
+ """
+ router = Router()
+ ff = FakeFount(IFakeOutput)
+ self.assertEqual(router.drain.inputType, Routed(None))
+ self.assertRaises(TypeError, ff.flowTo, router.drain)
+ self.assertEqual(router.newRoute().outputType, None)
+
+
+ def test_specifiedTypeChecking(self):
+ """
+ The C{outputType} argument to L{Router}'s constructor specifies the
+ type of output that its routes will provide, and also the routed type
+ required as an input.
+ """
+ router = Router(IFakeInput)
+ incorrect = FakeDrain(IFakeOutput)
+ correct = FakeDrain(IFakeInput)
+ self.assertEqual(router.drain.inputType, Routed(IFakeInput))
+ self.assertEqual(router.newRoute().outputType, IFakeInput)
+ self.assertRaises(TypeError, router.newRoute().flowTo, incorrect)
+ self.assertEqual(router.newRoute().flowTo(correct), None)
+ correctFount = FakeFount(Routed(IFakeInput))
+ incorrectFount = FakeFount(Routed(IFakeOutput))
+ self.assertRaises(TypeError, incorrectFount.flowTo, router.drain)
+ self.assertEquals(None, correctFount.flowTo(router.drain))
+
+
+
+class RoutedTests(TestCase):
+ """
+ Tests for L{Routed}.
+ """
+
+ def test_eq(self):
+ """
+ C{==} on L{Routed} is L{True} for equivalent ones, L{False} otherwise.
+ """
+ self.assertEqual(True, Routed(IFakeInput) == Routed(IFakeInput))
+ self.assertEqual(False, Routed(IFakeInput) == Routed(IFakeOutput))
+ self.assertEqual(False, Routed() == 7)
+
+
+ def test_ne(self):
+ """
+ C{==} on L{Routed} is L{False} for equivalent ones, L{True} otherwise.
+ """
+ self.assertEqual(False, Routed(IFakeInput) != Routed(IFakeInput))
+ self.assertEqual(True, Routed(IFakeInput) != Routed(IFakeOutput))
+ self.assertEqual(True, Routed() != 7)
+
diff --git a/tubes/test/util.py b/tubes/test/util.py
index dd36224..0ebf41b 100644
--- a/tubes/test/util.py
+++ b/tubes/test/util.py
@@ -77,13 +77,12 @@ class FakeDrain(object):
@type stopped: L{list}
"""
- inputType = None
-
fount = None
- def __init__(self):
+ def __init__(self, inputType=None):
self.received = []
self.stopped = []
+ self.inputType = inputType
def flowingFrom(self, fount):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
Automat==24.8.1
characteristic==14.3.0
constantly==23.10.4
coverage==7.8.0
exceptiongroup==1.2.2
hyperlink==21.0.0
idna==3.10
incremental==24.7.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
six==1.17.0
tomli==2.2.1
-e git+https://github.com/twisted/tubes.git@188aad3c9caf5d07e894d1fda6fb5f41b8bfc41a#egg=Tubes
Twisted==24.11.0
typing_extensions==4.13.0
zope.interface==7.2
| name: tubes
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- automat==24.8.1
- characteristic==14.3.0
- constantly==23.10.4
- coverage==7.8.0
- exceptiongroup==1.2.2
- hyperlink==21.0.0
- idna==3.10
- incremental==24.7.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- six==1.17.0
- tomli==2.2.1
- twisted==24.11.0
- typing-extensions==4.13.0
- zope-interface==7.2
prefix: /opt/conda/envs/tubes
| [
"tubes/test/test_routing.py::RouterTests::test_defaultTypeChecking",
"tubes/test/test_routing.py::RouterTests::test_routeRepr",
"tubes/test/test_routing.py::RouterTests::test_specifiedTypeChecking",
"tubes/test/test_routing.py::RouterTests::test_twoRoutes",
"tubes/test/test_routing.py::RoutedTests::test_eq",
"tubes/test/test_routing.py::RoutedTests::test_ne"
]
| []
| []
| []
| MIT License | 308 | [
"tubes/routing.py",
".travis.yml",
"tubes/fan.py",
"tubes/tube.py"
]
| [
"tubes/routing.py",
".travis.yml",
"tubes/fan.py",
"tubes/tube.py"
]
|
|
joblib__joblib-277 | 484405ccea3cbcbd95e3cf241f15bf3eeb1aa8b6 | 2015-11-23 15:25:34 | 40341615cc2600675ce7457d9128fb030f6f89fa | diff --git a/joblib/hashing.py b/joblib/hashing.py
index f8a9ee6..93bc5e3 100644
--- a/joblib/hashing.py
+++ b/joblib/hashing.py
@@ -59,7 +59,8 @@ class Hasher(Pickler):
try:
self.dump(obj)
except pickle.PicklingError as e:
- warnings.warn('PicklingError while hashing %r: %r' % (obj, e))
+ e.args += ('PicklingError while hashing %r: %r' % (obj, e),)
+ raise
dumps = self.stream.getvalue()
self._hash.update(dumps)
if return_digest:
| Surprising behaviour when one of the cached function arguments is not picklable
As reported by @arthurmensch. When one of the arguments is not picklable, the cached function result will only depend on the hash of all the arguments before the non picklable one.
A simple snippet to show the problem:
```python
import joblib
mem = joblib.Memory('/tmp/joblib')
@mem.cache()
def f(a, b):
return b
non_picklable = lambda: None
print(f(non_picklable, 'first'))
print(f(non_picklable, 'second'))
```
Output:
```
/home/lesteve/dev/joblib/joblib/hashing.py:62: UserWarning: PicklingError while hashing {'b': 'first', 'a': <function <lambda> at 0x7f400d7ec8c8>}: PicklingError("Can't pickle <function <lambda> at 0x7f400d7ec8c8>: it's not found as __main__.<lambda>",)
warnings.warn('PicklingError while hashing %r: %r' % (obj, e))
________________________________________________________________________________
[Memory] Calling __main__--tmp-test_hash_non_picklable_arguments.f...
f(<function <lambda> at 0x7f400d7ec8c8>, 'first')
________________________________________________________________f - 0.0s, 0.0min
first
/home/lesteve/dev/joblib/joblib/hashing.py:62: UserWarning: PicklingError while hashing {'b': 'second', 'a': <function <lambda> at 0x7f400d7ec8c8>}: PicklingError("Can't pickle <function <lambda> at 0x7f400d7ec8c8>: it's not found as __main__.<lambda>",)
warnings.warn('PicklingError while hashing %r: %r' % (obj, e))
first
```
Why not just raise an exception in this case rather than returning a result with a warning that is almost certain to be ignored by the user ? @GaelVaroquaux @ogrisel. | joblib/joblib | diff --git a/joblib/test/test_hashing.py b/joblib/test/test_hashing.py
index f0ce0eb..88407d0 100644
--- a/joblib/test/test_hashing.py
+++ b/joblib/test/test_hashing.py
@@ -23,6 +23,7 @@ from nose.tools import assert_equal
from joblib.hashing import hash, PY3
from joblib.func_inspect import filter_args
from joblib.memory import Memory
+from joblib.testing import assert_raises_regex
from joblib.test.test_memory import env as test_memory_env
from joblib.test.test_memory import setup_module as test_memory_setup_func
@@ -429,3 +430,12 @@ def test_hashes_stay_the_same_with_numpy_objects():
for to_hash, expected in zip(to_hash_list, expected_list):
yield assert_equal, hash(to_hash), expected
+
+
+def test_hashing_pickling_error():
+ def non_picklable():
+ return 42
+
+ assert_raises_regex(pickle.PicklingError,
+ 'PicklingError while hashing',
+ hash, non_picklable)
diff --git a/joblib/testing.py b/joblib/testing.py
index 8555a5a..e5cbae5 100644
--- a/joblib/testing.py
+++ b/joblib/testing.py
@@ -5,7 +5,7 @@ Helper for testing.
import sys
import warnings
import os.path
-
+import re
def warnings_to_stdout():
""" Redirect all warnings to stdout.
@@ -17,3 +17,30 @@ def warnings_to_stdout():
warnings.showwarning = showwarning
#warnings.simplefilter('always')
+
+
+try:
+ from nose.tools import assert_raises_regex
+except ImportError:
+ # For Python 2.7
+ try:
+ from nose.tools import assert_raises_regexp as assert_raises_regex
+ except ImportError:
+ # for Python 2.6
+ def assert_raises_regex(expected_exception, expected_regexp,
+ callable_obj=None, *args, **kwargs):
+ """Helper function to check for message patterns in exceptions"""
+
+ not_raised = False
+ try:
+ callable_obj(*args, **kwargs)
+ not_raised = True
+ except Exception as e:
+ error_message = str(e)
+ if not re.compile(expected_regexp).search(error_message):
+ raise AssertionError("Error message should match pattern "
+ "%r. %r does not." %
+ (expected_regexp, error_message))
+ if not_raised:
+ raise AssertionError("Should have raised %r" %
+ expected_exception(expected_regexp))
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"nose",
"coverage",
"numpy>=1.6.1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/joblib/joblib.git@484405ccea3cbcbd95e3cf241f15bf3eeb1aa8b6#egg=joblib
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: joblib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/joblib
| [
"joblib/test/test_hashing.py::test_hashing_pickling_error"
]
| []
| [
"joblib/test/test_hashing.py::test_memory_setup_func",
"joblib/test/test_hashing.py::test_memory_teardown_func",
"joblib/test/test_hashing.py::test_hash_methods",
"joblib/test/test_hashing.py::test_numpy_datetime_array",
"joblib/test/test_hashing.py::test_hash_numpy_noncontiguous",
"joblib/test/test_hashing.py::test_hash_numpy_performance",
"joblib/test/test_hashing.py::test_bound_methods_hash",
"joblib/test/test_hashing.py::test_bound_cached_methods_hash",
"joblib/test/test_hashing.py::test_hash_object_dtype",
"joblib/test/test_hashing.py::test_numpy_scalar",
"joblib/test/test_hashing.py::test_dict_hash",
"joblib/test/test_hashing.py::test_set_hash",
"joblib/test/test_hashing.py::test_string",
"joblib/test/test_hashing.py::test_dtype"
]
| []
| BSD 3-Clause "New" or "Revised" License | 310 | [
"joblib/hashing.py"
]
| [
"joblib/hashing.py"
]
|
|
mozilla__puente-52 | a7d648b09a9b28feafdec48492aa1722d5add9ff | 2015-11-23 22:58:31 | f78d702e0d1376425d8d613a6573a896fc8d11a1 | diff --git a/puente/commands.py b/puente/commands.py
index a36d035..c21cdd3 100644
--- a/puente/commands.py
+++ b/puente/commands.py
@@ -129,10 +129,11 @@ def extract_command(outputdir, domain_methods, text_domain, keywords,
print('Done')
-def merge_command(create, base_dir, domain_methods, languages):
+def merge_command(create, backup, base_dir, domain_methods, languages):
"""
:arg create: whether or not to create directories if they don't
exist
+ :arg backup: whether or not to create backup .po files
:arg base_dir: BASE_DIR setting
:arg domain_methods: DOMAIN_METHODS setting
:arg languages: LANGUAGES setting
@@ -213,6 +214,7 @@ def merge_command(create, base_dir, domain_methods, languages):
'msgmerge',
'--update',
'--width=200',
+ '--backup=%s' % ('simple' if backup else 'off'),
domain_po,
'-'
]
diff --git a/puente/management/commands/merge.py b/puente/management/commands/merge.py
index 74de7d9..acf9b26 100644
--- a/puente/management/commands/merge.py
+++ b/puente/management/commands/merge.py
@@ -32,11 +32,17 @@ class Command(BaseCommand):
action='store_true', dest='create', default=False,
help='Create locale subdirectories'
),
+ make_option(
+ '-b', '--backup',
+ action='store_true', dest='backup', default=False,
+ help='Create backup files of .po files'
+ ),
)
def handle(self, *args, **options):
return merge_command(
create=options.get('create'),
+ backup=options.get('backup'),
base_dir=get_setting('BASE_DIR'),
domain_methods=get_setting('DOMAIN_METHODS'),
languages=getattr(settings, 'LANGUAGES', [])
| Add option to not create merge backup
According to https://www.gnu.org/software/gettext/manual/html_node/msgmerge-Invocation.html that should be `--backup=off` to the `msgmerge` tool. | mozilla/puente | diff --git a/tests/test_merge.py b/tests/test_merge.py
index 29e1456..60b46f1 100644
--- a/tests/test_merge.py
+++ b/tests/test_merge.py
@@ -65,6 +65,7 @@ class TestMergecommand:
merge_command(
create=True,
+ backup=True,
base_dir=str(tmpdir),
domain_methods={
'django': [
@@ -83,6 +84,7 @@ class TestMergecommand:
with pytest.raises(CommandError):
merge_command(
create=True,
+ backup=True,
base_dir=str(tmpdir),
domain_methods={
'django': [
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-pythonpath",
"pytest-django"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt",
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
asgiref==3.8.1
attrs==25.3.0
babel==2.17.0
backports.tarfile==1.2.0
build==1.2.2.post1
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
check-manifest==0.50
colorama==0.4.6
cryptography==44.0.2
distlib==0.3.9
Django==4.2.20
django-jinja==2.11.0
docutils==0.21.2
filelock==3.18.0
id==1.5.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
Jinja2==3.1.6
keyring==25.6.0
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mdurl==0.1.2
more-itertools==10.6.0
nh3==0.2.21
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
-e git+https://github.com/mozilla/puente.git@a7d648b09a9b28feafdec48492aa1722d5add9ff#egg=puente
py==1.11.0
pycparser==2.22
Pygments==2.19.1
pyproject-api==1.9.0
pyproject_hooks==1.2.0
pytest==6.2.5
pytest-django==4.5.2
pytest-pythonpath==0.7.4
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
sqlparse==0.5.3
swebench_matterhorn @ file:///swebench_matterhorn
toml==0.10.2
tomli==2.2.1
tox==4.25.0
twine==6.1.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: puente
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- asgiref==3.8.1
- attrs==25.3.0
- babel==2.17.0
- backports-tarfile==1.2.0
- build==1.2.2.post1
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- check-manifest==0.50
- colorama==0.4.6
- cryptography==44.0.2
- distlib==0.3.9
- django==4.2.20
- django-jinja==2.11.0
- docutils==0.21.2
- filelock==3.18.0
- id==1.5.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==25.6.0
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- mdurl==0.1.2
- more-itertools==10.6.0
- nh3==0.2.21
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- py==1.11.0
- pycparser==2.22
- pygments==2.19.1
- pyproject-api==1.9.0
- pyproject-hooks==1.2.0
- pytest==6.2.5
- pytest-django==4.5.2
- pytest-pythonpath==0.7.4
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sqlparse==0.5.3
- swebench-matterhorn==0.0.0
- toml==0.10.2
- tomli==2.2.1
- tox==4.25.0
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/puente
| [
"tests/test_merge.py::TestMergecommand::test_missing_pot_file"
]
| [
"tests/test_merge.py::TestManageMerge::test_help",
"tests/test_merge.py::TestMergecommand::test_basic"
]
| []
| []
| BSD 3-Clause "New" or "Revised" License | 311 | [
"puente/management/commands/merge.py",
"puente/commands.py"
]
| [
"puente/management/commands/merge.py",
"puente/commands.py"
]
|
|
docker__docker-py-861 | 1ca2bc58f0cf2e2cdda2734395bd3e7ad9b178bf | 2015-11-24 02:33:41 | 1ca2bc58f0cf2e2cdda2734395bd3e7ad9b178bf | diff --git a/docker/api/image.py b/docker/api/image.py
index f891e210..8493b38d 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -158,8 +158,6 @@ class ImageApiMixin(object):
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
- if repo_name.count(":") == 1:
- repository, tag = repository.rsplit(":", 1)
params = {
'tag': tag,
@@ -174,7 +172,8 @@ class ImageApiMixin(object):
log.debug('Looking for auth config')
if not self._auth_configs:
log.debug(
- "No auth config in memory - loading from filesystem")
+ "No auth config in memory - loading from filesystem"
+ )
self._auth_configs = auth.load_config()
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# Do not fail here if no authentication exists for this
diff --git a/docker/auth/auth.py b/docker/auth/auth.py
index 416dd7c4..f771dedd 100644
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -16,11 +16,9 @@ import base64
import json
import logging
import os
-import warnings
import six
-from .. import constants
from .. import errors
INDEX_NAME = 'index.docker.io'
@@ -31,31 +29,29 @@ LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
log = logging.getLogger(__name__)
-def resolve_repository_name(repo_name, insecure=False):
- if insecure:
- warnings.warn(
- constants.INSECURE_REGISTRY_DEPRECATION_WARNING.format(
- 'resolve_repository_name()'
- ), DeprecationWarning
- )
-
+def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
- 'Repository name cannot contain a scheme ({0})'.format(repo_name))
- parts = repo_name.split('/', 1)
- if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':
- # This is a docker index repo (ex: foo/bar or ubuntu)
- return INDEX_NAME, repo_name
- if len(parts) < 2:
- raise errors.InvalidRepository(
- 'Invalid repository name ({0})'.format(repo_name))
+ 'Repository name cannot contain a scheme ({0})'.format(repo_name)
+ )
- if 'index.docker.io' in parts[0]:
+ index_name, remote_name = split_repo_name(repo_name)
+ if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
- 'Invalid repository name, try "{0}" instead'.format(parts[1])
+ 'Invalid index name ({0}). Cannot begin or end with a'
+ ' hyphen.'.format(index_name)
)
+ return index_name, remote_name
+
- return parts[0], parts[1]
+def split_repo_name(repo_name):
+ parts = repo_name.split('/', 1)
+ if len(parts) == 1 or (
+ '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
+ ):
+ # This is a docker index repo (ex: username/foobar or ubuntu)
+ return INDEX_NAME, repo_name
+ return tuple(parts)
def resolve_authconfig(authconfig, registry=None):
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 366f8696..560ee8e2 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -283,16 +283,14 @@ def convert_volume_binds(binds):
return result
-def parse_repository_tag(repo):
- column_index = repo.rfind(':')
- if column_index < 0:
- return repo, None
- tag = repo[column_index + 1:]
- slash_index = tag.find('/')
- if slash_index < 0:
- return repo[:column_index], tag
-
- return repo, None
+def parse_repository_tag(repo_name):
+ parts = repo_name.rsplit('@', 1)
+ if len(parts) == 2:
+ return tuple(parts)
+ parts = repo_name.rsplit(':', 1)
+ if len(parts) == 2 and '/' not in parts[1]:
+ return tuple(parts)
+ return repo_name, None
# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
| Can't pull images with . In name.
Docker images that have a `.` in their name cannot be pulled with docker-py. This is a result of:
https://github.com/docker/docker-py/blob/master/docker/auth/auth.py#L46 | docker/docker-py | diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py
index 67830381..8e0b1d43 100644
--- a/tests/unit/auth_test.py
+++ b/tests/unit/auth_test.py
@@ -9,6 +9,7 @@ import shutil
import tempfile
from docker import auth
+from docker import errors
from .. import base
@@ -29,25 +30,31 @@ class RegressionTest(base.BaseTestCase):
assert b'_' in encoded
-class ResolveAuthTest(base.BaseTestCase):
- auth_config = {
- 'https://index.docker.io/v1/': {'auth': 'indexuser'},
- 'my.registry.net': {'auth': 'privateuser'},
- 'http://legacy.registry.url/v1/': {'auth': 'legacyauth'}
- }
-
+class ResolveRepositoryNameTest(base.BaseTestCase):
def test_resolve_repository_name_hub_library_image(self):
self.assertEqual(
auth.resolve_repository_name('image'),
('index.docker.io', 'image'),
)
+ def test_resolve_repository_name_dotted_hub_library_image(self):
+ self.assertEqual(
+ auth.resolve_repository_name('image.valid'),
+ ('index.docker.io', 'image.valid')
+ )
+
def test_resolve_repository_name_hub_image(self):
self.assertEqual(
auth.resolve_repository_name('username/image'),
('index.docker.io', 'username/image'),
)
+ def test_explicit_hub_index_library_image(self):
+ self.assertEqual(
+ auth.resolve_repository_name('index.docker.io/image'),
+ ('index.docker.io', 'image')
+ )
+
def test_resolve_repository_name_private_registry(self):
self.assertEqual(
auth.resolve_repository_name('my.registry.net/image'),
@@ -90,6 +97,20 @@ class ResolveAuthTest(base.BaseTestCase):
('localhost', 'username/image'),
)
+ def test_invalid_index_name(self):
+ self.assertRaises(
+ errors.InvalidRepository,
+ lambda: auth.resolve_repository_name('-gecko.com/image')
+ )
+
+
+class ResolveAuthTest(base.BaseTestCase):
+ auth_config = {
+ 'https://index.docker.io/v1/': {'auth': 'indexuser'},
+ 'my.registry.net': {'auth': 'privateuser'},
+ 'http://legacy.registry.url/v1/': {'auth': 'legacyauth'}
+ }
+
def test_resolve_authconfig_hostname_only(self):
self.assertEqual(
auth.resolve_authconfig(self.auth_config, 'my.registry.net'),
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index 3c9f6e2f..57ad4435 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -352,23 +352,55 @@ class ParseHostTest(base.BaseTestCase):
assert parse_host(val, 'win32') == tcp_port
+class ParseRepositoryTagTest(base.BaseTestCase):
+ sha = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
+
+ def test_index_image_no_tag(self):
+ self.assertEqual(
+ parse_repository_tag("root"), ("root", None)
+ )
+
+ def test_index_image_tag(self):
+ self.assertEqual(
+ parse_repository_tag("root:tag"), ("root", "tag")
+ )
+
+ def test_index_user_image_no_tag(self):
+ self.assertEqual(
+ parse_repository_tag("user/repo"), ("user/repo", None)
+ )
+
+ def test_index_user_image_tag(self):
+ self.assertEqual(
+ parse_repository_tag("user/repo:tag"), ("user/repo", "tag")
+ )
+
+ def test_private_reg_image_no_tag(self):
+ self.assertEqual(
+ parse_repository_tag("url:5000/repo"), ("url:5000/repo", None)
+ )
+
+ def test_private_reg_image_tag(self):
+ self.assertEqual(
+ parse_repository_tag("url:5000/repo:tag"), ("url:5000/repo", "tag")
+ )
+
+ def test_index_image_sha(self):
+ self.assertEqual(
+ parse_repository_tag("root@sha256:{0}".format(self.sha)),
+ ("root", "sha256:{0}".format(self.sha))
+ )
+
+ def test_private_reg_image_sha(self):
+ self.assertEqual(
+ parse_repository_tag("url:5000/repo@sha256:{0}".format(self.sha)),
+ ("url:5000/repo", "sha256:{0}".format(self.sha))
+ )
+
+
class UtilsTest(base.BaseTestCase):
longMessage = True
- def test_parse_repository_tag(self):
- self.assertEqual(parse_repository_tag("root"),
- ("root", None))
- self.assertEqual(parse_repository_tag("root:tag"),
- ("root", "tag"))
- self.assertEqual(parse_repository_tag("user/repo"),
- ("user/repo", None))
- self.assertEqual(parse_repository_tag("user/repo:tag"),
- ("user/repo", "tag"))
- self.assertEqual(parse_repository_tag("url:5000/repo"),
- ("url:5000/repo", None))
- self.assertEqual(parse_repository_tag("url:5000/repo:tag"),
- ("url:5000/repo", "tag"))
-
def test_parse_bytes(self):
self.assertEqual(parse_bytes("512MB"), (536870912))
self.assertEqual(parse_bytes("512M"), (536870912))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 3
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
-e git+https://github.com/docker/docker-py.git@1ca2bc58f0cf2e2cdda2734395bd3e7ad9b178bf#egg=docker_py
exceptiongroup==1.2.2
flake8==7.2.0
iniconfig==2.1.0
mccabe==0.7.0
packaging==24.2
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
pytest-cov==6.0.0
requests==2.5.3
six==1.17.0
tomli==2.2.1
websocket_client==0.32.0
| name: docker-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- flake8==7.2.0
- iniconfig==2.1.0
- mccabe==0.7.0
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- pytest-cov==6.0.0
- requests==2.5.3
- six==1.17.0
- tomli==2.2.1
- websocket-client==0.32.0
prefix: /opt/conda/envs/docker-py
| [
"tests/unit/auth_test.py::ResolveRepositoryNameTest::test_explicit_hub_index_library_image",
"tests/unit/auth_test.py::ResolveRepositoryNameTest::test_invalid_index_name",
"tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_dotted_hub_library_image",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_sha",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_sha"
]
| []
| [
"tests/unit/auth_test.py::RegressionTest::test_803_urlsafe_encode",
"tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_hub_image",
"tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_hub_library_image",
"tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_localhost",
"tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_localhost_with_username",
"tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_no_dots_but_port",
"tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_no_dots_but_port_and_username",
"tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_private_registry",
"tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_private_registry_with_port",
"tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_private_registry_with_username",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_default_explicit_none",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_default_registry",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_fully_explicit",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_hostname_only",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_legacy_config",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_match",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_path",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_path_trailing_slash",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_path_wrong_insecure_proto",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_path_wrong_secure_proto",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_protocol",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_path_wrong_proto",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_registry_and_auth_hub_image",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_registry_and_auth_library_image",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_registry_and_auth_private_registry",
"tests/unit/auth_test.py::ResolveAuthTest::test_resolve_registry_and_auth_unauthenticated_registry",
"tests/unit/auth_test.py::LoadConfigTest::test_load_config",
"tests/unit/auth_test.py::LoadConfigTest::test_load_config_custom_config_env",
"tests/unit/auth_test.py::LoadConfigTest::test_load_config_custom_config_env_utf8",
"tests/unit/auth_test.py::LoadConfigTest::test_load_config_custom_config_env_with_auths",
"tests/unit/auth_test.py::LoadConfigTest::test_load_config_no_file",
"tests/unit/auth_test.py::LoadConfigTest::test_load_config_with_random_name",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_invalid_cpu_cfs_types",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_no_options",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_no_options_newer_api_version",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_period",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_quota",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_oom_kill_disable",
"tests/unit/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit",
"tests/unit/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit_capitals",
"tests/unit/utils_test.py::UlimitTest::test_create_host_config_obj_ulimit",
"tests/unit/utils_test.py::UlimitTest::test_ulimit_invalid_type",
"tests/unit/utils_test.py::LogConfigTest::test_create_host_config_dict_logconfig",
"tests/unit/utils_test.py::LogConfigTest::test_create_host_config_obj_logconfig",
"tests/unit/utils_test.py::LogConfigTest::test_logconfig_invalid_config_type",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_empty",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_no_cert_path",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_tls",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_compact",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_complete",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_empty",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_list",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_no_mode",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_unicode_bytes_input",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_unicode_unicode_input",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_commented_line",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_invalid_line",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_proper",
"tests/unit/utils_test.py::ParseHostTest::test_parse_host",
"tests/unit/utils_test.py::ParseHostTest::test_parse_host_empty_value",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_no_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_user_image_no_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_user_image_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_no_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_tag",
"tests/unit/utils_test.py::UtilsTest::test_convert_filters",
"tests/unit/utils_test.py::UtilsTest::test_decode_json_header",
"tests/unit/utils_test.py::UtilsTest::test_parse_bytes",
"tests/unit/utils_test.py::SplitCommandTest::test_split_command_with_unicode",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_port_ranges",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_ports",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_port_ranges",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_ports",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_one_port",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_port_range",
"tests/unit/utils_test.py::PortsTest::test_host_only_with_colon",
"tests/unit/utils_test.py::PortsTest::test_non_matching_length_port_ranges",
"tests/unit/utils_test.py::PortsTest::test_port_and_range_invalid",
"tests/unit/utils_test.py::PortsTest::test_port_only_with_colon",
"tests/unit/utils_test.py::PortsTest::test_split_port_invalid",
"tests/unit/utils_test.py::PortsTest::test_split_port_no_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_no_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_with_host_ip_no_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_with_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_with_protocol",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_host_ip",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_host_ip_no_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_protocol",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_single_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_subdir_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_trailing_slash",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_wildcard_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_exclude_custom_dockerfile",
"tests/unit/utils_test.py::ExcludePathsTest::test_exclude_dockerfile_dockerignore",
"tests/unit/utils_test.py::ExcludePathsTest::test_no_dupes",
"tests/unit/utils_test.py::ExcludePathsTest::test_no_excludes",
"tests/unit/utils_test.py::ExcludePathsTest::test_question_mark",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_filename_trailing_slash",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_single_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_wildcard_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_subdirectory",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_exclude",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_filename_end",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_filename_start",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_subdir_single_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_subdir_wildcard_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_with_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_with_wildcard_exception",
"tests/unit/utils_test.py::TarTest::test_tar_with_directory_symlinks",
"tests/unit/utils_test.py::TarTest::test_tar_with_empty_directory",
"tests/unit/utils_test.py::TarTest::test_tar_with_excludes",
"tests/unit/utils_test.py::TarTest::test_tar_with_file_symlinks"
]
| []
| Apache License 2.0 | 313 | [
"docker/auth/auth.py",
"docker/utils/utils.py",
"docker/api/image.py"
]
| [
"docker/auth/auth.py",
"docker/utils/utils.py",
"docker/api/image.py"
]
|
|
docker__docker-py-863 | 28864df27b2cf289478d5fa9d5ca27a9f0daa9a8 | 2015-11-24 12:06:48 | 2f2d50d0c7be5882b150f6ff3bae31d469720e5b | aanand: Nice. I agree with @kanzure's comments, plus it'd be good to document the logic of `should_include`.
thomasboyt: @aanand cool, added some docs/comments!
aanand: Thanks! I think this can be squashed to one commit. | diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 9c4bb477..762b39a4 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -107,38 +107,68 @@ def exclude_paths(root, patterns, dockerfile=None):
exclude_patterns = list(set(patterns) - set(exceptions))
- all_paths = get_paths(root)
-
- # Remove all paths that are matched by any exclusion pattern
- paths = [
- p for p in all_paths
- if not any(match_path(p, pattern) for pattern in exclude_patterns)
- ]
-
- # Add back the set of paths that are matched by any inclusion pattern.
- # Include parent dirs - if we add back 'foo/bar', add 'foo' as well
- for p in all_paths:
- if any(match_path(p, pattern) for pattern in include_patterns):
- components = p.split('/')
- paths += [
- '/'.join(components[:end])
- for end in range(1, len(components) + 1)
- ]
+ paths = get_paths(root, exclude_patterns, include_patterns,
+ has_exceptions=len(exceptions) > 0)
return set(paths)
-def get_paths(root):
+def should_include(path, exclude_patterns, include_patterns):
+ """
+ Given a path, a list of exclude patterns, and a list of inclusion patterns:
+
+ 1. Returns True if the path doesn't match any exclusion pattern
+ 2. Returns False if the path matches an exclusion pattern and doesn't match
+ an inclusion pattern
+ 3. Returns true if the path matches an exclusion pattern and matches an
+ inclusion pattern
+ """
+ for pattern in exclude_patterns:
+ if match_path(path, pattern):
+ for pattern in include_patterns:
+ if match_path(path, pattern):
+ return True
+ return False
+ return True
+
+
+def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
paths = []
- for parent, dirs, files in os.walk(root, followlinks=False):
+ for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):
parent = os.path.relpath(parent, root)
if parent == '.':
parent = ''
+
+ # If exception rules exist, we can't skip recursing into ignored
+ # directories, as we need to look for exceptions in them.
+ #
+ # It may be possible to optimize this further for exception patterns
+ # that *couldn't* match within ignored directores.
+ #
+ # This matches the current docker logic (as of 2015-11-24):
+ # https://github.com/docker/docker/blob/37ba67bf636b34dc5c0c0265d62a089d0492088f/pkg/archive/archive.go#L555-L557
+
+ if not has_exceptions:
+
+ # Remove excluded patterns from the list of directories to traverse
+ # by mutating the dirs we're iterating over.
+ # This looks strange, but is considered the correct way to skip
+ # traversal. See https://docs.python.org/2/library/os.html#os.walk
+
+ dirs[:] = [d for d in dirs if
+ should_include(os.path.join(parent, d),
+ exclude_patterns, include_patterns)]
+
for path in dirs:
- paths.append(os.path.join(parent, path))
+ if should_include(os.path.join(parent, path),
+ exclude_patterns, include_patterns):
+ paths.append(os.path.join(parent, path))
+
for path in files:
- paths.append(os.path.join(parent, path))
+ if should_include(os.path.join(parent, path),
+ exclude_patterns, include_patterns):
+ paths.append(os.path.join(parent, path))
return paths
| dockerignore implementation is relatively slow compared to Docker's implementation
I ran into an issue in a project where my builds - run through `docker-compose` - seemed to be taking an awfully long time (around ~60 seconds) during the context build/upload stage. `strace` showed a ton of time was being spent `stat()`ing files that were included in my `.dockerignore` rules, which I found curious.
Oddly, when I simply used `docker build` to build the container, I didn't have this issue, and context build/upload took about ~3-5 seconds. I couldn't figure out what was going wrong, so I investigated `docker-py`, and found that almost all of my execution time was spent in [this `get_paths` call](https://github.com/docker/docker-py/blob/master/docker/utils/utils.py#L110).
It appears that the difference in execution time is because docker-py's implementation of dockerignore/tar exclusion is far slower than Docker's:
Docker's implementation of the dockerignore exclusion algorithm, (seen [here](https://github.com/docker/docker/blob/master/pkg/archive/archive.go#L518)), walks through each folder, but [does not descend into a directory if it matched an exclusion pattern](https://github.com/docker/docker/blob/master/pkg/archive/archive.go#L556). Meanwhile, docker-py first gets an array of *every single file in the context folder*, and then applies a filter to the array. This seems to be what is causing the massive difference in execution time when I build my project - docker-py is iterating over thousands of files that Docker correctly ignores.
I started on a fix, using what I believe are the same rules as Docker's algorithm: https://github.com/thomasboyt/docker-py/commit/9f302f6721bb8492140cf5b218a80d62a2b62e19
This runs just as fast as Docker's implementation, but doesn't fully implement exception rules (e.g. `!foo`), leading it to fail a few tests. Before I go through and add this feature, I wanted to confirm that I'm on the right path (and that no one else has a better solution/algorithm to apply). | docker/docker-py | diff --git a/tests/integration/build_test.py b/tests/integration/build_test.py
index 011ddc3e..26164ae0 100644
--- a/tests/integration/build_test.py
+++ b/tests/integration/build_test.py
@@ -65,6 +65,7 @@ class BuildTest(helpers.BaseTestCase):
'ignored',
'Dockerfile',
'.dockerignore',
+ '!ignored/subdir/excepted-file',
'', # empty line
]))
@@ -76,6 +77,9 @@ class BuildTest(helpers.BaseTestCase):
with open(os.path.join(subdir, 'file'), 'w') as f:
f.write("this file should be ignored")
+ with open(os.path.join(subdir, 'excepted-file'), 'w') as f:
+ f.write("this file should not be ignored")
+
tag = 'docker-py-test-build-with-dockerignore'
stream = self.client.build(
path=base_dir,
@@ -84,7 +88,7 @@ class BuildTest(helpers.BaseTestCase):
for chunk in stream:
pass
- c = self.client.create_container(tag, ['ls', '-1A', '/test'])
+ c = self.client.create_container(tag, ['find', '/test', '-type', 'f'])
self.client.start(c)
self.client.wait(c)
logs = self.client.logs(c)
@@ -93,8 +97,9 @@ class BuildTest(helpers.BaseTestCase):
logs = logs.decode('utf-8')
self.assertEqual(
- list(filter(None, logs.split('\n'))),
- ['not-ignored'],
+ sorted(list(filter(None, logs.split('\n')))),
+ sorted(['/test/ignored/subdir/excepted-file',
+ '/test/not-ignored']),
)
@requires_api_version('1.21')
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index 57ad4435..a68e1e78 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -671,17 +671,17 @@ class ExcludePathsTest(base.BaseTestCase):
def test_directory_with_single_exception(self):
assert self.exclude(['foo', '!foo/bar/a.py']) == self.all_paths - set([
- 'foo/a.py', 'foo/b.py',
+ 'foo/a.py', 'foo/b.py', 'foo', 'foo/bar'
])
def test_directory_with_subdir_exception(self):
assert self.exclude(['foo', '!foo/bar']) == self.all_paths - set([
- 'foo/a.py', 'foo/b.py',
+ 'foo/a.py', 'foo/b.py', 'foo'
])
def test_directory_with_wildcard_exception(self):
assert self.exclude(['foo', '!foo/*.py']) == self.all_paths - set([
- 'foo/bar', 'foo/bar/a.py',
+ 'foo/bar', 'foo/bar/a.py', 'foo'
])
def test_subdirectory(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
-e git+https://github.com/docker/docker-py.git@28864df27b2cf289478d5fa9d5ca27a9f0daa9a8#egg=docker_py
exceptiongroup==1.2.2
execnet==2.1.1
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
requests==2.5.3
six==1.17.0
tomli==2.2.1
typing_extensions==4.13.0
websocket_client==0.32.0
| name: docker-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- requests==2.5.3
- six==1.17.0
- tomli==2.2.1
- typing-extensions==4.13.0
- websocket-client==0.32.0
prefix: /opt/conda/envs/docker-py
| [
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_single_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_subdir_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_wildcard_exception"
]
| []
| [
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_invalid_cpu_cfs_types",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_no_options",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_no_options_newer_api_version",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_period",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_quota",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_oom_kill_disable",
"tests/unit/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit",
"tests/unit/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit_capitals",
"tests/unit/utils_test.py::UlimitTest::test_create_host_config_obj_ulimit",
"tests/unit/utils_test.py::UlimitTest::test_ulimit_invalid_type",
"tests/unit/utils_test.py::LogConfigTest::test_create_host_config_dict_logconfig",
"tests/unit/utils_test.py::LogConfigTest::test_create_host_config_obj_logconfig",
"tests/unit/utils_test.py::LogConfigTest::test_logconfig_invalid_config_type",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_empty",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_no_cert_path",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_tls",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_compact",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_complete",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_empty",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_list",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_no_mode",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_unicode_bytes_input",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_unicode_unicode_input",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_commented_line",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_invalid_line",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_proper",
"tests/unit/utils_test.py::ParseHostTest::test_parse_host",
"tests/unit/utils_test.py::ParseHostTest::test_parse_host_empty_value",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_no_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_sha",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_user_image_no_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_user_image_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_no_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_sha",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_tag",
"tests/unit/utils_test.py::UtilsTest::test_convert_filters",
"tests/unit/utils_test.py::UtilsTest::test_decode_json_header",
"tests/unit/utils_test.py::UtilsTest::test_parse_bytes",
"tests/unit/utils_test.py::SplitCommandTest::test_split_command_with_unicode",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_port_ranges",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_ports",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_port_ranges",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_ports",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_one_port",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_port_range",
"tests/unit/utils_test.py::PortsTest::test_host_only_with_colon",
"tests/unit/utils_test.py::PortsTest::test_non_matching_length_port_ranges",
"tests/unit/utils_test.py::PortsTest::test_port_and_range_invalid",
"tests/unit/utils_test.py::PortsTest::test_port_only_with_colon",
"tests/unit/utils_test.py::PortsTest::test_split_port_invalid",
"tests/unit/utils_test.py::PortsTest::test_split_port_no_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_no_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_with_host_ip_no_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_with_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_with_protocol",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_host_ip",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_host_ip_no_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_protocol",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_trailing_slash",
"tests/unit/utils_test.py::ExcludePathsTest::test_exclude_custom_dockerfile",
"tests/unit/utils_test.py::ExcludePathsTest::test_exclude_dockerfile_dockerignore",
"tests/unit/utils_test.py::ExcludePathsTest::test_no_dupes",
"tests/unit/utils_test.py::ExcludePathsTest::test_no_excludes",
"tests/unit/utils_test.py::ExcludePathsTest::test_question_mark",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_filename_trailing_slash",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_single_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_wildcard_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_subdirectory",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_exclude",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_filename_end",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_filename_start",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_subdir_single_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_subdir_wildcard_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_with_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_with_wildcard_exception",
"tests/unit/utils_test.py::TarTest::test_tar_with_directory_symlinks",
"tests/unit/utils_test.py::TarTest::test_tar_with_empty_directory",
"tests/unit/utils_test.py::TarTest::test_tar_with_excludes",
"tests/unit/utils_test.py::TarTest::test_tar_with_file_symlinks"
]
| []
| Apache License 2.0 | 314 | [
"docker/utils/utils.py"
]
| [
"docker/utils/utils.py"
]
|
getlogbook__logbook-176 | f4d4d9309d0a0ce097cfa52f0f3dad6280d7f2e3 | 2015-11-26 23:39:36 | bb0f4fbeec318a140780b1ac8781599474cf2666 | diff --git a/logbook/compat.py b/logbook/compat.py
index c3896db..b65ac00 100644
--- a/logbook/compat.py
+++ b/logbook/compat.py
@@ -9,12 +9,13 @@
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
-import sys
+import collections
import logging
+import sys
import warnings
-import logbook
from datetime import date, datetime
+import logbook
from logbook.helpers import u, string_types, iteritems
_epoch_ord = date(1970, 1, 1).toordinal()
@@ -63,8 +64,12 @@ class redirected_logging(object):
class LoggingCompatRecord(logbook.LogRecord):
def _format_message(self, msg, *args, **kwargs):
- assert not kwargs
- return msg % tuple(args)
+ if kwargs:
+ assert not args
+ return msg % kwargs
+ else:
+ assert not kwargs
+ return msg % tuple(args)
class RedirectLoggingHandler(logging.Handler):
@@ -124,10 +129,17 @@ class RedirectLoggingHandler(logging.Handler):
def convert_record(self, old_record):
"""Converts an old logging record into a logbook log record."""
+ args = old_record.args
+ kwargs = None
+
+ # Logging allows passing a mapping object, in which case args will be a mapping.
+ if isinstance(args, collections.Mapping):
+ kwargs = args
+ args = None
record = LoggingCompatRecord(old_record.name,
self.convert_level(old_record.levelno),
- old_record.msg, old_record.args,
- None, old_record.exc_info,
+ old_record.msg, args,
+ kwargs, old_record.exc_info,
self.find_extra(old_record),
self.find_caller(old_record))
record.time = self.convert_time(old_record.created)
| Exception in LoggingCompatRecord for mapping keys
Example:
```
logger = logging.getLogger("test")
logger.setLevel("DEBUG")
logger.addHandler(RedirectLoggingHandler())
with logbook.StderrHandler():
logger.debug("test map %(name)s", {"name": "mapname"})
# raise exception in LoggingCompatRecord:
#Traceback (most recent call last):
# File "D:\bin\Python34\lib\site-packages\logbook\base.py", line 515, in message
# return self._format_message(self.msg, *self.args, **self.kwargs)
# File "D:\bin\Python34\lib\site-packages\logbook\compat.py", line 66, in _format_message
# return msg % tuple(args)
# TypeError: format requires a mapping
```
The quote from "logging/\_\_init\_\_.py"
>
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
# Issue #21172: a request was made to relax the isinstance check
# to hasattr(args[0], '__getitem__'). However, the docs on string
# formatting still seem to suggest a mapping object is required.
# Thus, while not removing the isinstance check, it does now look
# for collections.Mapping rather than, as before, dict.
if (args and len(args) == 1 and isinstance(args[0], collections.Mapping)
and args[0]):
args = args[0]
| getlogbook/logbook | diff --git a/tests/test_logging_compat.py b/tests/test_logging_compat.py
index 48dfebe..31fdd40 100644
--- a/tests/test_logging_compat.py
+++ b/tests/test_logging_compat.py
@@ -36,8 +36,11 @@ def test_basic_compat(request, set_root_logger_level):
logger.warn('This is from the old %s', 'system')
logger.error('This is from the old system')
logger.critical('This is from the old system')
+ logger.error('This is a %(what)s %(where)s', {'what': 'mapping', 'where': 'test'})
assert ('WARNING: %s: This is from the old system' %
name) in captured.getvalue()
+ assert ('ERROR: %s: This is a mapping test' %
+ name) in captured.getvalue()
if set_root_logger_level:
assert handler.records[0].level == logbook.DEBUG
else:
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | async-timeout==5.0.1
Cython==3.0.12
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
greenlet==3.1.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
-e git+https://github.com/getlogbook/logbook.git@f4d4d9309d0a0ce097cfa52f0f3dad6280d7f2e3#egg=Logbook
MarkupSafe==3.0.2
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pyzmq==26.3.0
redis==5.2.1
SQLAlchemy==2.0.40
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
| name: logbook
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- async-timeout==5.0.1
- cython==3.0.12
- execnet==2.1.1
- greenlet==3.1.1
- jinja2==3.1.6
- markupsafe==3.0.2
- pyzmq==26.3.0
- redis==5.2.1
- sqlalchemy==2.0.40
- typing-extensions==4.13.0
prefix: /opt/conda/envs/logbook
| [
"tests/test_logging_compat.py::test_basic_compat[True]",
"tests/test_logging_compat.py::test_basic_compat[False]"
]
| []
| [
"tests/test_logging_compat.py::test_redirect_logbook",
"tests/test_logging_compat.py::test_warning_redirections"
]
| []
| BSD License | 315 | [
"logbook/compat.py"
]
| [
"logbook/compat.py"
]
|
|
mapbox__mapbox-sdk-py-76 | ab45a8e1a40b5ecbe9e6c59002883e291856dcc9 | 2015-11-30 13:29:16 | 06728ffc30fba83003e9c76645ecec3eec1c63de | diff --git a/.travis.yml b/.travis.yml
index d99e41d..e323672 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -15,6 +15,7 @@ env:
install:
- pip install -U pip --cache-dir $HOME/.pip-cache
- pip install -e .[test] --cache-dir $HOME/.pip-cache
+ - if [[ $TRAVIS_PYTHON_VERSION == 2.6 ]]; then pip install ordereddict; fi
script:
- py.test --cov mapbox --cov-report term-missing
- py.test --doctest-glob='*.md' docs/*.md
diff --git a/README.rst b/README.rst
index 61fc790..e15a4c9 100644
--- a/README.rst
+++ b/README.rst
@@ -31,6 +31,11 @@ Services
- Forward (place names ⇢ longitude, latitude)
- Reverse (longitude, latitude ⇢ place names)
+- `Static Maps <https://www.mapbox.com/developers/api/static/>`__
+
+ - Generate standalone images from existing Mapbox mapids
+ - Render with GeoJSON overlays
+
- `Surface <https://www.mapbox.com/developers/api/surface/>`__
- Interpolates values along lines. Useful for elevation traces.
@@ -138,6 +143,20 @@ which returns::
See ``import mapbox; help(mapbox.Distance)`` for more detailed usage.
+Static Maps
+-----------
+Static maps are standalone images that can be displayed on web and mobile devices without the aid of a mapping library or API. Static maps can display GeoJSON overlays and the `simplestyle-spec <https://github.com/mapbox/simplestyle-spec>`_ styles will be respected and rendered.
+
+.. code:: python
+
+ from mapbox import Static
+ res = Static().image('mapbox.satellite',
+ lon=-61.7, lat=12.1, z=12,
+ features=list_of_points)
+
+ with open('map.png', 'wb') as output:
+ output.write(res.content)
+
Surface
-------
diff --git a/mapbox/__init__.py b/mapbox/__init__.py
index 5743280..5decf79 100644
--- a/mapbox/__init__.py
+++ b/mapbox/__init__.py
@@ -7,3 +7,4 @@ from .services.distance import Distance
from .services.geocoding import Geocoder, InvalidPlaceTypeError
from .services.surface import Surface
from .services.uploads import Uploader
+from .services.static import Static
diff --git a/mapbox/services/static.py b/mapbox/services/static.py
new file mode 100644
index 0000000..8e9febe
--- /dev/null
+++ b/mapbox/services/static.py
@@ -0,0 +1,58 @@
+import json
+
+from uritemplate import URITemplate
+
+from mapbox.services.base import Service
+
+
+class Static(Service):
+
+ def __init__(self, access_token=None):
+ self.baseuri = 'https://api.mapbox.com/v4'
+ self.session = self.get_session(access_token)
+
+ def image(self, mapid, lon=None, lat=None, z=None, features=None,
+ width=600, height=600, image_format='png256'):
+
+ if lon and lat and z:
+ auto = False
+ else:
+ auto = True
+
+ values = dict(
+ mapid=mapid,
+ lon=str(lon),
+ lat=str(lat),
+ z=str(z),
+ width=str(width),
+ height=str(height),
+ format=image_format)
+
+ if features:
+ values['overlay'] = json.dumps({'type': 'FeatureCollection',
+ 'features': features})
+
+ if len(values['overlay']) > 4087: # limit is 4096 minus the 'geojson()'
+ raise ValueError("geojson is too large for the static maps API, "
+ "must be less than 4096 characters")
+
+ if auto:
+ uri = URITemplate(
+ '%s/{mapid}/geojson({overlay})/auto/{width}x{height}.{format}' %
+ self.baseuri).expand(**values)
+ else:
+ uri = URITemplate(
+ '%s/{mapid}/geojson({overlay})/{lon},{lat},{z}/{width}x{height}.{format}' %
+ self.baseuri).expand(**values)
+ else:
+ if auto:
+ raise ValueError("Must provide features if lat, lon, z are None")
+
+ # No overlay
+ uri = URITemplate(
+ '%s/{mapid}/{lon},{lat},{z}/{width}x{height}.{format}' %
+ self.baseuri).expand(**values)
+
+ res = self.session.get(uri)
+ self.handle_http_error(res)
+ return res
| Static Map API | mapbox/mapbox-sdk-py | diff --git a/tests/test_staticmaps.py b/tests/test_staticmaps.py
new file mode 100644
index 0000000..548ab2f
--- /dev/null
+++ b/tests/test_staticmaps.py
@@ -0,0 +1,99 @@
+import json
+
+try:
+ from urllib import quote
+except ImportError:
+ # python 3
+ from urllib.parse import quote
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ # python 2.6
+ from ordereddict import OrderedDict
+
+import pytest
+import responses
+
+import mapbox
+
+
[email protected]
+def points():
+ points = [
+ OrderedDict(
+ type="Feature",
+ properties=OrderedDict(title="point1"),
+ geometry=OrderedDict(
+ type="Point",
+ coordinates=[-61.7, 12.1])),
+ OrderedDict(
+ type="Feature",
+ properties=OrderedDict(title="point2"),
+ geometry=OrderedDict(
+ type="Point",
+ coordinates=[-61.6, 12.0]))]
+
+ return points
+
+
[email protected]
+def test_staticmap_lonlatz_only():
+
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/v4/mapbox.satellite/-61.7,12.1,12/600x600.png256?access_token=pk.test',
+ match_querystring=True,
+ body='png123',
+ status=200,
+ content_type='image/png')
+
+ res = mapbox.Static(access_token='pk.test').image('mapbox.satellite', -61.7, 12.1, 12)
+ assert res.status_code == 200
+
+
[email protected]
+def test_staticmap_lonlatz_features(points):
+
+ overlay = json.dumps({'type': 'FeatureCollection',
+ 'features': points})
+ overlay = quote(overlay)
+ url = ('https://api.mapbox.com/v4/mapbox.satellite/geojson({0})/'
+ '-61.7,12.1,12/600x600.png256?access_token=pk.test'.format(overlay))
+
+ responses.add(
+ responses.GET, url,
+ match_querystring=True,
+ body='png123',
+ status=200,
+ content_type='image/png')
+
+ res = mapbox.Static(access_token='pk.test').image('mapbox.satellite',
+ -61.7, 12.1, 12,
+ points)
+ assert res.status_code == 200
+
[email protected]
+def test_staticmap_auto_features(points):
+
+ overlay = json.dumps({'type': 'FeatureCollection',
+ 'features': points})
+ overlay = quote(overlay)
+ url = ('https://api.mapbox.com/v4/mapbox.satellite/geojson({0})/'
+ 'auto/600x600.png256?access_token=pk.test'.format(overlay))
+
+ responses.add(
+ responses.GET, url,
+ match_querystring=True,
+ body='png123',
+ status=200,
+ content_type='image/png')
+
+ res = mapbox.Static(access_token='pk.test').image('mapbox.satellite',
+ features=points)
+ assert res.status_code == 200
+
+
+def test_staticmap_auto_nofeatures(points):
+ with pytest.raises(ValueError):
+ mapbox.Static(access_token='pk.test').image('mapbox.satellite')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 1
},
"num_modified_files": 3
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | boto3==1.37.23
botocore==1.37.23
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
click==8.1.8
click-plugins==1.1.1
cligj==0.7.2
colorama==0.4.6
coverage==7.8.0
coveralls==4.0.1
distlib==0.3.9
docopt==0.6.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jmespath==1.0.1
-e git+https://github.com/mapbox/mapbox-sdk-py.git@ab45a8e1a40b5ecbe9e6c59002883e291856dcc9#egg=mapbox
packaging @ file:///croot/packaging_1734472117206/work
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pyproject-api==1.9.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
responses==0.25.7
s3transfer==0.11.4
six==1.17.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==1.26.20
virtualenv==20.29.3
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.37.23
- botocore==1.37.23
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- click==8.1.8
- click-plugins==1.1.1
- cligj==0.7.2
- colorama==0.4.6
- coverage==7.8.0
- coveralls==4.0.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.18.0
- idna==3.10
- jmespath==1.0.1
- platformdirs==4.3.7
- pyproject-api==1.9.0
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- responses==0.25.7
- s3transfer==0.11.4
- six==1.17.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==1.26.20
- virtualenv==20.29.3
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_staticmaps.py::test_staticmap_lonlatz_only",
"tests/test_staticmaps.py::test_staticmap_lonlatz_features",
"tests/test_staticmaps.py::test_staticmap_auto_features",
"tests/test_staticmaps.py::test_staticmap_auto_nofeatures"
]
| []
| []
| []
| MIT License | 316 | [
"README.rst",
".travis.yml",
"mapbox/__init__.py",
"mapbox/services/static.py"
]
| [
"README.rst",
".travis.yml",
"mapbox/__init__.py",
"mapbox/services/static.py"
]
|
|
falconry__falcon-664 | 3a6ce66edb68261f66bd74f2f0f756900da78225 | 2015-11-30 18:10:25 | b78ffaac7c412d3b3d6cd3c70dd05024d79d2cce | diff --git a/doc/api/errors.rst b/doc/api/errors.rst
index 26a5960..6cabdfa 100644
--- a/doc/api/errors.rst
+++ b/doc/api/errors.rst
@@ -36,5 +36,5 @@ Predefined Errors
HTTPBadRequest, HTTPUnauthorized, HTTPForbidden, HTTPNotFound,
HTTPMethodNotAllowed, HTTPNotAcceptable, HTTPConflict,
HTTPLengthRequired, HTTPPreconditionFailed, HTTPUnsupportedMediaType,
- HTTPRangeNotSatisfiable, HTTPInternalServerError, HTTPBadGateway,
- HTTPServiceUnavailable
+ HTTPRangeNotSatisfiable, HTTPUnprocessableEntity, HTTPInternalServerError,
+ HTTPBadGateway, HTTPServiceUnavailable
diff --git a/doc/api/status.rst b/doc/api/status.rst
index 9e72add..0aa6c68 100644
--- a/doc/api/status.rst
+++ b/doc/api/status.rst
@@ -95,6 +95,7 @@ string objects that must be created when preparing responses.
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE = HTTP_416
HTTP_EXPECTATION_FAILED = HTTP_417
HTTP_IM_A_TEAPOT = HTTP_418
+ HTTP_UNPROCESSABLE_ENTITY = HTTP_422
HTTP_UPGRADE_REQUIRED = HTTP_426
HTTP_PRECONDITION_REQUIRED = HTTP_428
HTTP_TOO_MANY_REQUESTS = HTTP_429
@@ -119,6 +120,7 @@ string objects that must be created when preparing responses.
HTTP_416 = '416 Range Not Satisfiable'
HTTP_417 = '417 Expectation Failed'
HTTP_418 = "418 I'm a teapot"
+ HTTP_422 = "422 Unprocessable Entity"
HTTP_426 = '426 Upgrade Required'
HTTP_428 = '428 Precondition Required'
HTTP_429 = '429 Too Many Requests'
diff --git a/falcon/errors.py b/falcon/errors.py
index 49f2747..7f75b3e 100644
--- a/falcon/errors.py
+++ b/falcon/errors.py
@@ -306,6 +306,24 @@ class HTTPRangeNotSatisfiable(NoRepresentation, HTTPError):
headers=headers)
+class HTTPUnprocessableEntity(HTTPError):
+ """422 Unprocessable Entity.
+
+ The request was well-formed but was unable to be followed due to semantic
+ errors. See also: http://www.ietf.org/rfc/rfc4918.
+
+ Args:
+ title (str): Error title (e.g., 'Missing title field').
+ description (str): Human-friendly description of the error, along with
+ a helpful suggestion or two.
+ kwargs (optional): Same as for ``HTTPError``.
+ """
+
+ def __init__(self, title, description, **kwargs):
+ super(HTTPUnprocessableEntity, self).__init__(status.HTTP_422, title,
+ description, **kwargs)
+
+
class HTTPInternalServerError(HTTPError):
"""500 Internal Server Error.
diff --git a/falcon/status_codes.py b/falcon/status_codes.py
index f0c87a4..c27c602 100644
--- a/falcon/status_codes.py
+++ b/falcon/status_codes.py
@@ -90,6 +90,8 @@ HTTP_417 = '417 Expectation Failed'
HTTP_EXPECTATION_FAILED = HTTP_417
HTTP_418 = "418 I'm a teapot"
HTTP_IM_A_TEAPOT = HTTP_418
+HTTP_422 = "422 Unprocessable Entity"
+HTTP_UNPROCESSABLE_ENTITY = HTTP_422
HTTP_426 = '426 Upgrade Required'
HTTP_UPGRADE_REQUIRED = HTTP_426
HTTP_428 = '428 Precondition Required'
| Adding HTTP_422, Unprocessable Entity
I guess we need such response support. | falconry/falcon | diff --git a/tests/test_httperror.py b/tests/test_httperror.py
index 1a99ccb..e48d4b0 100644
--- a/tests/test_httperror.py
+++ b/tests/test_httperror.py
@@ -676,5 +676,6 @@ class TestHTTPError(testing.TestBase):
self._misc_test(falcon.HTTPPreconditionFailed, falcon.HTTP_412)
self._misc_test(falcon.HTTPUnsupportedMediaType, falcon.HTTP_415,
needs_title=False)
+ self._misc_test(falcon.HTTPUnprocessableEntity, falcon.HTTP_422)
self._misc_test(falcon.HTTPInternalServerError, falcon.HTTP_500)
self._misc_test(falcon.HTTPBadGateway, falcon.HTTP_502)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 4
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"coverage",
"ddt",
"pyyaml",
"requests",
"testtools",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"tools/test-requires"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
ddt==1.7.2
exceptiongroup==1.2.2
-e git+https://github.com/falconry/falcon.git@3a6ce66edb68261f66bd74f2f0f756900da78225#egg=falcon
idna==3.10
iniconfig==2.1.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
python-mimeparse==2.0.0
PyYAML==6.0.2
requests==2.32.3
six==1.17.0
testtools==2.7.2
tomli==2.2.1
urllib3==2.3.0
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- ddt==1.7.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- python-mimeparse==2.0.0
- pyyaml==6.0.2
- requests==2.32.3
- six==1.17.0
- testtools==2.7.2
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/falcon
| [
"tests/test_httperror.py::TestHTTPError::test_misc"
]
| []
| [
"tests/test_httperror.py::TestHTTPError::test_401",
"tests/test_httperror.py::TestHTTPError::test_404_with_body",
"tests/test_httperror.py::TestHTTPError::test_404_without_body",
"tests/test_httperror.py::TestHTTPError::test_405_with_body",
"tests/test_httperror.py::TestHTTPError::test_405_without_body",
"tests/test_httperror.py::TestHTTPError::test_405_without_body_with_extra_headers",
"tests/test_httperror.py::TestHTTPError::test_405_without_body_with_extra_headers_double_check",
"tests/test_httperror.py::TestHTTPError::test_411",
"tests/test_httperror.py::TestHTTPError::test_413",
"tests/test_httperror.py::TestHTTPError::test_416",
"tests/test_httperror.py::TestHTTPError::test_503_datetime_retry_after",
"tests/test_httperror.py::TestHTTPError::test_503_integer_retry_after",
"tests/test_httperror.py::TestHTTPError::test_base_class",
"tests/test_httperror.py::TestHTTPError::test_client_does_not_accept_anything",
"tests/test_httperror.py::TestHTTPError::test_client_does_not_accept_json_or_xml",
"tests/test_httperror.py::TestHTTPError::test_custom_error_serializer",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_json",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml_1_text_xml",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml_2_application_xml",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml_3_application_vnd_company_system_project_resource_xml_v_1_1",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml_4_application_atom_xml",
"tests/test_httperror.py::TestHTTPError::test_forbidden_1_application_json",
"tests/test_httperror.py::TestHTTPError::test_forbidden_2_application_vnd_company_system_project_resource_json_v_1_1",
"tests/test_httperror.py::TestHTTPError::test_forbidden_3_application_json_patch_json",
"tests/test_httperror.py::TestHTTPError::test_invalid_header",
"tests/test_httperror.py::TestHTTPError::test_invalid_param",
"tests/test_httperror.py::TestHTTPError::test_missing_header",
"tests/test_httperror.py::TestHTTPError::test_missing_param",
"tests/test_httperror.py::TestHTTPError::test_no_description_json",
"tests/test_httperror.py::TestHTTPError::test_no_description_xml",
"tests/test_httperror.py::TestHTTPError::test_temporary_413_datetime_retry_after",
"tests/test_httperror.py::TestHTTPError::test_temporary_413_integer_retry_after",
"tests/test_httperror.py::TestHTTPError::test_unicode_json",
"tests/test_httperror.py::TestHTTPError::test_unicode_xml"
]
| []
| Apache License 2.0 | 317 | [
"doc/api/status.rst",
"falcon/errors.py",
"falcon/status_codes.py",
"doc/api/errors.rst"
]
| [
"doc/api/status.rst",
"falcon/errors.py",
"falcon/status_codes.py",
"doc/api/errors.rst"
]
|
|
juju-solutions__charms.benchmark-3 | df2acf8736cce39d905990fd5008d6afa57863c3 | 2015-12-01 12:31:43 | df2acf8736cce39d905990fd5008d6afa57863c3 | tvansteenburgh: Unrelated to your change, but I don't think the `in_relation_hook()` guard on __init__.py:133 is correct. Benchmark.start() is usually called in an action, not a relation, and we want the action_uuid to be set on the relation regardless of when Benchmark.start() is called. | diff --git a/charms/benchmark/__init__.py b/charms/benchmark/__init__.py
index 9a5458e..f0c15c5 100644
--- a/charms/benchmark/__init__.py
+++ b/charms/benchmark/__init__.py
@@ -130,14 +130,14 @@ class Benchmark(object):
charm_dir = os.environ.get('CHARM_DIR')
action_uuid = os.environ.get('JUJU_ACTION_UUID')
- if in_relation_hook() and charm_dir and action_uuid:
+ if charm_dir and action_uuid:
"""
If the cabs-collector charm is installed, take a snapshot
of the current profile data.
"""
# Do profile data collection immediately on this unit
if os.path.exists(COLLECT_PROFILE_DATA):
- subprocess.check_output([COLLECT_PROFILE_DATA])
+ subprocess.check_output([COLLECT_PROFILE_DATA, action_uuid])
with open(
os.path.join(
| Action UUID needs to be set explicitly to the collect-profile-data script | juju-solutions/charms.benchmark | diff --git a/tests/test_charms-benchmark.py b/tests/test_charms-benchmark.py
index 3b78034..8528993 100644
--- a/tests/test_charms-benchmark.py
+++ b/tests/test_charms-benchmark.py
@@ -146,7 +146,7 @@ class TestBenchmark(TestCase):
COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
exists.assert_any_call(COLLECT_PROFILE_DATA)
- check_output.assert_any_call([COLLECT_PROFILE_DATA])
+ check_output.assert_any_call([COLLECT_PROFILE_DATA, 'my_action'])
@mock.patch('charms.benchmark.action_set')
def test_benchmark_finish(self, action_set):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 1
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"coverage",
"testtools",
"pep8",
"mock",
"cherrypy",
"pyyaml",
"six",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"test-requires.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | autocommand==2.2.2
backports.tarfile==1.2.0
charmhelpers==1.2.1
-e git+https://github.com/juju-solutions/charms.benchmark.git@df2acf8736cce39d905990fd5008d6afa57863c3#egg=charms.benchmark
cheroot==10.0.1
CherryPy==18.10.0
coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
jaraco.collections==5.1.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jaraco.text==4.0.0
Jinja2==3.1.6
MarkupSafe==3.0.2
mock==5.2.0
more-itertools==10.6.0
netaddr==1.3.0
nose==1.3.7
packaging==24.2
pbr==6.1.1
pep8==1.7.1
pluggy==1.5.0
portend==3.2.0
pytest==8.3.5
python-dateutil==2.9.0.post0
PyYAML==6.0.2
six==1.17.0
tempora==5.8.0
testtools==2.7.2
tomli==2.2.1
zc.lockfile==3.0.post1
| name: charms.benchmark
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- autocommand==2.2.2
- backports-tarfile==1.2.0
- charmhelpers==1.2.1
- cheroot==10.0.1
- cherrypy==18.10.0
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- jaraco-collections==5.1.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jaraco-text==4.0.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mock==5.2.0
- more-itertools==10.6.0
- netaddr==1.3.0
- nose==1.3.7
- packaging==24.2
- pbr==6.1.1
- pep8==1.7.1
- pluggy==1.5.0
- portend==3.2.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- six==1.17.0
- tempora==5.8.0
- testtools==2.7.2
- tomli==2.2.1
- zc-lockfile==3.0.post1
prefix: /opt/conda/envs/charms.benchmark
| [
"tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_start"
]
| []
| [
"tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_finish",
"tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_finish_oserror",
"tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_init",
"tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_meta",
"tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_set_composite_score",
"tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_start_oserror",
"tests/test_charms-benchmark.py::TestBenchmark::test_set_data"
]
| []
| null | 318 | [
"charms/benchmark/__init__.py"
]
| [
"charms/benchmark/__init__.py"
]
|
pre-commit__pre-commit-310 | 6b005cff0d5d4f579be5dbb97102c4fee3b4e39f | 2015-12-01 16:34:13 | c1c3f3b571adcd0cf5a8cea7d9d80574c2572c02 | diff --git a/pre_commit/error_handler.py b/pre_commit/error_handler.py
index c8d2bfc..60038f4 100644
--- a/pre_commit/error_handler.py
+++ b/pre_commit/error_handler.py
@@ -7,7 +7,9 @@ import io
import os.path
import traceback
+from pre_commit import five
from pre_commit.errors import FatalError
+from pre_commit.output import sys_stdout_write_wrapper
from pre_commit.store import Store
@@ -16,15 +18,15 @@ class PreCommitSystemExit(SystemExit):
pass
-def _log_and_exit(msg, exc, formatted, print_fn=print):
- error_msg = '{0}: {1}: {2}'.format(msg, type(exc).__name__, exc)
- print_fn(error_msg)
- print_fn('Check the log at ~/.pre-commit/pre-commit.log')
+def _log_and_exit(msg, exc, formatted, write_fn=sys_stdout_write_wrapper):
+ error_msg = '{0}: {1}: {2}\n'.format(msg, type(exc).__name__, exc)
+ write_fn(error_msg)
+ write_fn('Check the log at ~/.pre-commit/pre-commit.log\n')
store = Store()
store.require_created()
- with io.open(os.path.join(store.directory, 'pre-commit.log'), 'w') as log:
- log.write(error_msg + '\n')
- log.write(formatted + '\n')
+ with io.open(os.path.join(store.directory, 'pre-commit.log'), 'wb') as log:
+ log.write(five.to_bytes(error_msg))
+ log.write(five.to_bytes(formatted) + b'\n')
raise PreCommitSystemExit(1)
| Non-ascii prints in error handler without tty cause stacktrace
```
23:00:13 style runtests: commands[0] | pre-commit run --all-files
23:00:13 [INFO] Installing environment for [email protected]:mirrors/pre-commit/mirrors-jshint.
23:00:13 [INFO] Once installed this environment will be reused.
23:00:13 [INFO] This may take a few minutes...
23:01:33 Traceback (most recent call last):
23:01:33 File ".tox/style/bin/pre-commit", line 11, in <module>
23:01:33 sys.exit(main())
23:01:33 File ".../.tox/style/local/lib/python2.7/site-packages/pre_commit/main.py", line 157, in main
23:01:33 'Command {0} failed to exit with a returncode'.format(args.command)
23:01:33 File "/usr/lib64/python2.7/contextlib.py", line 35, in __exit__
23:01:33 self.gen.throw(type, value, traceback)
23:01:33 File ".../.tox/style/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 41, in error_handler
23:01:33 traceback.format_exc(),
23:01:33 File ".../.tox/style/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 21, in _log_and_exit
23:01:33 print_fn(error_msg)
23:01:33 UnicodeEncodeError: 'ascii' codec can't encode characters in position 735-737: ordinal not in range(128)
``` | pre-commit/pre-commit | diff --git a/tests/error_handler_test.py b/tests/error_handler_test.py
index 161b88f..d8f966a 100644
--- a/tests/error_handler_test.py
+++ b/tests/error_handler_test.py
@@ -1,15 +1,18 @@
+# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import os.path
import re
+import sys
import mock
import pytest
from pre_commit import error_handler
from pre_commit.errors import FatalError
+from pre_commit.util import cmd_output
@pytest.yield_fixture
@@ -72,17 +75,17 @@ def test_error_handler_uncaught_error(mocked_log_and_exit):
def test_log_and_exit(mock_out_store_directory):
- mocked_print = mock.Mock()
+ mocked_write = mock.Mock()
with pytest.raises(error_handler.PreCommitSystemExit):
error_handler._log_and_exit(
'msg', FatalError('hai'), "I'm a stacktrace",
- print_fn=mocked_print,
+ write_fn=mocked_write,
)
- printed = '\n'.join(call[0][0] for call in mocked_print.call_args_list)
+ printed = ''.join(call[0][0] for call in mocked_write.call_args_list)
assert printed == (
'msg: FatalError: hai\n'
- 'Check the log at ~/.pre-commit/pre-commit.log'
+ 'Check the log at ~/.pre-commit/pre-commit.log\n'
)
log_file = os.path.join(mock_out_store_directory, 'pre-commit.log')
@@ -92,3 +95,25 @@ def test_log_and_exit(mock_out_store_directory):
'msg: FatalError: hai\n'
"I'm a stacktrace\n"
)
+
+
+def test_error_handler_non_ascii_exception(mock_out_store_directory):
+ with pytest.raises(error_handler.PreCommitSystemExit):
+ with error_handler.error_handler():
+ raise ValueError('☃')
+
+
+def test_error_handler_no_tty(tempdir_factory):
+ output = cmd_output(
+ sys.executable, '-c',
+ 'from __future__ import unicode_literals\n'
+ 'from pre_commit.error_handler import error_handler\n'
+ 'with error_handler():\n'
+ ' raise ValueError("\\u2603")\n',
+ env=dict(os.environ, PRE_COMMIT_HOME=tempdir_factory.get()),
+ retcode=1,
+ )
+ assert output[1].replace('\r', '') == (
+ 'An unexpected error has occurred: ValueError: ☃\n'
+ 'Check the log at ~/.pre-commit/pre-commit.log\n'
+ )
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
astroid==1.3.2
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
jsonschema==3.2.0
logilab-common==1.9.7
mccabe==0.7.0
mock==5.2.0
mypy-extensions==1.0.0
nodeenv==1.6.0
ordereddict==1.1
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
-e git+https://github.com/pre-commit/pre-commit.git@6b005cff0d5d4f579be5dbb97102c4fee3b4e39f#egg=pre_commit
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pylint==1.3.1
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
PyYAML==6.0.1
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.17.1
zipp==3.6.0
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- aspy-yaml==1.3.0
- astroid==1.3.2
- attrs==22.2.0
- cached-property==1.5.2
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jsonschema==3.2.0
- logilab-common==1.9.7
- mccabe==0.7.0
- mock==5.2.0
- mypy-extensions==1.0.0
- nodeenv==1.6.0
- ordereddict==1.1
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pylint==1.3.1
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pyyaml==6.0.1
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/pre-commit
| [
"tests/error_handler_test.py::test_log_and_exit"
]
| []
| [
"tests/error_handler_test.py::test_error_handler_no_exception",
"tests/error_handler_test.py::test_error_handler_fatal_error",
"tests/error_handler_test.py::test_error_handler_uncaught_error",
"tests/error_handler_test.py::test_error_handler_non_ascii_exception",
"tests/error_handler_test.py::test_error_handler_no_tty"
]
| []
| MIT License | 319 | [
"pre_commit/error_handler.py"
]
| [
"pre_commit/error_handler.py"
]
|
|
getlogbook__logbook-183 | 1d999a784d0d8f5f7423f25c684cc1100843ccc5 | 2015-12-03 01:44:29 | bb0f4fbeec318a140780b1ac8781599474cf2666 | diff --git a/logbook/handlers.py b/logbook/handlers.py
index 82e518f..5f66978 100644
--- a/logbook/handlers.py
+++ b/logbook/handlers.py
@@ -20,6 +20,7 @@ try:
except ImportError:
from sha import new as sha1
import traceback
+import collections
from datetime import datetime, timedelta
from collections import deque
from textwrap import dedent
@@ -1014,14 +1015,42 @@ class MailHandler(Handler, StringFormatterHandlerMixin,
The default timedelta is 60 seconds (one minute).
- The mail handler is sending mails in a blocking manner. If you are not
+ The mail handler sends mails in a blocking manner. If you are not
using some centralized system for logging these messages (with the help
of ZeroMQ or others) and the logging system slows you down you can
wrap the handler in a :class:`logbook.queues.ThreadedWrapperHandler`
that will then send the mails in a background thread.
+ `server_addr` can be a tuple of host and port, or just a string containing
+ the host to use the default port (25, or 465 if connecting securely.)
+
+ `credentials` can be a tuple or dictionary of arguments that will be passed
+ to :py:meth:`smtplib.SMTP.login`.
+
+ `secure` can be a tuple, dictionary, or boolean. As a boolean, this will
+ simply enable or disable a secure connection. The tuple is unpacked as
+ parameters `keyfile`, `certfile`. As a dictionary, `secure` should contain
+ those keys. For backwards compatibility, ``secure=()`` will enable a secure
+ connection. If `starttls` is enabled (default), these parameters will be
+ passed to :py:meth:`smtplib.SMTP.starttls`, otherwise
+ :py:class:`smtplib.SMTP_SSL`.
+
+
.. versionchanged:: 0.3
The handler supports the batching system now.
+
+ .. versionadded:: 1.0
+ `starttls` parameter added to allow disabling STARTTLS for SSL
+ connections.
+
+ .. versionchanged:: 1.0
+ If `server_addr` is a string, the default port will be used.
+
+ .. versionchanged:: 1.0
+ `credentials` parameter can now be a dictionary of keyword arguments.
+
+ .. versionchanged:: 1.0
+ `secure` can now be a dictionary or boolean in addition to to a tuple.
"""
default_format_string = MAIL_FORMAT_STRING
default_related_format_string = MAIL_RELATED_FORMAT_STRING
@@ -1039,7 +1068,7 @@ class MailHandler(Handler, StringFormatterHandlerMixin,
server_addr=None, credentials=None, secure=None,
record_limit=None, record_delta=None, level=NOTSET,
format_string=None, related_format_string=None,
- filter=None, bubble=False):
+ filter=None, bubble=False, starttls=True):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
LimitingHandlerMixin.__init__(self, record_limit, record_delta)
@@ -1054,6 +1083,7 @@ class MailHandler(Handler, StringFormatterHandlerMixin,
if related_format_string is None:
related_format_string = self.default_related_format_string
self.related_format_string = related_format_string
+ self.starttls = starttls
def _get_related_format_string(self):
if isinstance(self.related_formatter, StringFormatter):
@@ -1148,20 +1178,63 @@ class MailHandler(Handler, StringFormatterHandlerMixin,
"""Returns an SMTP connection. By default it reconnects for
each sent mail.
"""
- from smtplib import SMTP, SMTP_PORT, SMTP_SSL_PORT
+ from smtplib import SMTP, SMTP_SSL, SMTP_PORT, SMTP_SSL_PORT
if self.server_addr is None:
host = '127.0.0.1'
port = self.secure and SMTP_SSL_PORT or SMTP_PORT
else:
- host, port = self.server_addr
- con = SMTP()
- con.connect(host, port)
+ try:
+ host, port = self.server_addr
+ except ValueError:
+ # If server_addr is a string, the tuple unpacking will raise
+ # ValueError, and we can use the default port.
+ host = self.server_addr
+ port = self.secure and SMTP_SSL_PORT or SMTP_PORT
+
+ # Previously, self.secure was passed as con.starttls(*self.secure). This
+ # meant that starttls couldn't be used without a keyfile and certfile
+ # unless an empty tuple was passed. See issue #94.
+ #
+ # The changes below allow passing:
+ # - secure=True for secure connection without checking identity.
+ # - dictionary with keys 'keyfile' and 'certfile'.
+ # - tuple to be unpacked to variables keyfile and certfile.
+ # - secure=() equivalent to secure=True for backwards compatibility.
+ # - secure=False equivalent to secure=None to disable.
+ if isinstance(self.secure, collections.Mapping):
+ keyfile = self.secure.get('keyfile', None)
+ certfile = self.secure.get('certfile', None)
+ elif isinstance(self.secure, collections.Iterable):
+ # Allow empty tuple for backwards compatibility
+ if len(self.secure) == 0:
+ keyfile = certfile = None
+ else:
+ keyfile, certfile = self.secure
+ else:
+ keyfile = certfile = None
+
+ # Allow starttls to be disabled by passing starttls=False.
+ if not self.starttls and self.secure:
+ con = SMTP_SSL(host, port, keyfile=keyfile, certfile=certfile)
+ else:
+ con = SMTP(host, port)
+
if self.credentials is not None:
- if self.secure is not None:
+ secure = self.secure
+ if self.starttls and secure is not None and secure is not False:
con.ehlo()
- con.starttls(*self.secure)
+ con.starttls(keyfile=keyfile, certfile=certfile)
con.ehlo()
- con.login(*self.credentials)
+
+ # Allow credentials to be a tuple or dict.
+ if isinstance(self.credentials, collections.Mapping):
+ credentials_args = ()
+ credentials_kwargs = self.credentials
+ else:
+ credentials_args = self.credentials
+ credentials_kwargs = dict()
+
+ con.login(*credentials_args, **credentials_kwargs)
return con
def close_connection(self, con):
@@ -1175,7 +1248,7 @@ class MailHandler(Handler, StringFormatterHandlerMixin,
pass
def deliver(self, msg, recipients):
- """Delivers the given message to a list of recpients."""
+ """Delivers the given message to a list of recipients."""
con = self.get_connection()
try:
con.sendmail(self.from_addr, recipients, msg.as_string())
@@ -1227,7 +1300,7 @@ class GMailHandler(MailHandler):
def __init__(self, account_id, password, recipients, **kw):
super(GMailHandler, self).__init__(
- account_id, recipients, secure=(),
+ account_id, recipients, secure=True,
server_addr=("smtp.gmail.com", 587),
credentials=(account_id, password), **kw)
diff --git a/setup.py b/setup.py
index 26df542..bdb9b00 100644
--- a/setup.py
+++ b/setup.py
@@ -158,6 +158,10 @@ with open(version_file_path) as version_file:
extras_require = dict()
extras_require['test'] = set(['pytest', 'pytest-cov'])
+
+if sys.version_info[:2] < (3, 3):
+ extras_require['test'] |= set(['mock'])
+
extras_require['dev'] = set(['cython']) | extras_require['test']
extras_require['execnet'] = set(['execnet>=1.0.9'])
| SMTP Handler STARTTLS
Due to the lack of documentation on this handler it took a little digging to work out how to get it to work...
One thing that confused me was the "secure" argument. Python SMTPLib starttls() accepts two optional values: a keyfile and certfile - but these are only required for *checking* the identity. If neither are specified then SMTPLib will still try establish an encrypted connection but without checking the identity. If you do not specify an argument to Logbook, it will not attempt to establish an encrypted connection at all.
So, if you want a tls connection to the SMTP server but don't care about checking the identity you can do `secure = []` which will pass the `if self.secure is not None`, however if you do `secure = True` you will get an error because you cannot unpack a boolean! (as logbook populates the arguments using: `conn.starttls(*self.secure)`).
It'd help if the documentation explained the arguments for the mail handlers. | getlogbook/logbook | diff --git a/tests/test_mail_handler.py b/tests/test_mail_handler.py
index babc4e2..fd7730b 100644
--- a/tests/test_mail_handler.py
+++ b/tests/test_mail_handler.py
@@ -7,6 +7,11 @@ from logbook.helpers import u
from .utils import capturing_stderr_context, make_fake_mail_handler
+try:
+ from unittest.mock import Mock, call, patch
+except ImportError:
+ from mock import Mock, call, patch
+
__file_without_pyc__ = __file__
if __file_without_pyc__.endswith('.pyc'):
__file_without_pyc__ = __file_without_pyc__[:-1]
@@ -104,3 +109,126 @@ def test_group_handler_mail_combo(activation_strategy, logger):
assert len(related) == 2
assert re.search('Message type:\s+WARNING', related[0])
assert re.search('Message type:\s+DEBUG', related[1])
+
+
+def test_mail_handler_arguments():
+ with patch('smtplib.SMTP', autospec=True) as mock_smtp:
+
+ # Test the mail handler with supported arguments before changes to
+ # secure, credentials, and starttls
+ mail_handler = logbook.MailHandler(
+ from_addr='[email protected]',
+ recipients='[email protected]',
+ server_addr=('server.example.com', 465),
+ credentials=('username', 'password'),
+ secure=('keyfile', 'certfile'))
+
+ mail_handler.get_connection()
+
+ assert mock_smtp.call_args == call('server.example.com', 465)
+ assert mock_smtp.method_calls[1] == call().starttls(
+ keyfile='keyfile', certfile='certfile')
+ assert mock_smtp.method_calls[3] == call().login('username', 'password')
+
+ # Test secure=()
+ mail_handler = logbook.MailHandler(
+ from_addr='[email protected]',
+ recipients='[email protected]',
+ server_addr=('server.example.com', 465),
+ credentials=('username', 'password'),
+ secure=())
+
+ mail_handler.get_connection()
+
+ assert mock_smtp.call_args == call('server.example.com', 465)
+ assert mock_smtp.method_calls[5] == call().starttls(
+ certfile=None, keyfile=None)
+ assert mock_smtp.method_calls[7] == call().login('username', 'password')
+
+ # Test implicit port with string server_addr, dictionary credentials,
+ # dictionary secure.
+ mail_handler = logbook.MailHandler(
+ from_addr='[email protected]',
+ recipients='[email protected]',
+ server_addr='server.example.com',
+ credentials={'user': 'username', 'password': 'password'},
+ secure={'certfile': 'certfile2', 'keyfile': 'keyfile2'})
+
+ mail_handler.get_connection()
+
+ assert mock_smtp.call_args == call('server.example.com', 465)
+ assert mock_smtp.method_calls[9] == call().starttls(
+ certfile='certfile2', keyfile='keyfile2')
+ assert mock_smtp.method_calls[11] == call().login(
+ user='username', password='password')
+
+ # Test secure=True
+ mail_handler = logbook.MailHandler(
+ from_addr='[email protected]',
+ recipients='[email protected]',
+ server_addr=('server.example.com', 465),
+ credentials=('username', 'password'),
+ secure=True)
+
+ mail_handler.get_connection()
+
+ assert mock_smtp.call_args == call('server.example.com', 465)
+ assert mock_smtp.method_calls[13] == call().starttls(
+ certfile=None, keyfile=None)
+ assert mock_smtp.method_calls[15] == call().login('username', 'password')
+ assert len(mock_smtp.method_calls) == 16
+
+ # Test secure=False
+ mail_handler = logbook.MailHandler(
+ from_addr='[email protected]',
+ recipients='[email protected]',
+ server_addr=('server.example.com', 465),
+ credentials=('username', 'password'),
+ secure=False)
+
+ mail_handler.get_connection()
+
+ # starttls not called because we check len of method_calls before and
+ # after this test.
+ assert mock_smtp.call_args == call('server.example.com', 465)
+ assert mock_smtp.method_calls[16] == call().login('username', 'password')
+ assert len(mock_smtp.method_calls) == 17
+
+ with patch('smtplib.SMTP_SSL', autospec=True) as mock_smtp_ssl:
+ # Test starttls=False
+ mail_handler = logbook.MailHandler(
+ from_addr='[email protected]',
+ recipients='[email protected]',
+ server_addr='server.example.com',
+ credentials={'user': 'username', 'password': 'password'},
+ secure={'certfile': 'certfile', 'keyfile': 'keyfile'},
+ starttls=False)
+
+ mail_handler.get_connection()
+
+ assert mock_smtp_ssl.call_args == call(
+ 'server.example.com', 465, keyfile='keyfile', certfile='certfile')
+ assert mock_smtp_ssl.method_calls[0] == call().login(
+ user='username', password='password')
+
+ # Test starttls=False with secure=True
+ mail_handler = logbook.MailHandler(
+ from_addr='[email protected]',
+ recipients='[email protected]',
+ server_addr='server.example.com',
+ credentials={'user': 'username', 'password': 'password'},
+ secure=True,
+ starttls=False)
+
+ mail_handler.get_connection()
+
+ assert mock_smtp_ssl.call_args == call(
+ 'server.example.com', 465, keyfile=None, certfile=None)
+ assert mock_smtp_ssl.method_calls[1] == call().login(
+ user='username', password='password')
+
+
+
+
+
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"Cython"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
Cython==3.0.12
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/getlogbook/logbook.git@1d999a784d0d8f5f7423f25c684cc1100843ccc5#egg=Logbook
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: logbook
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- cython==3.0.12
- pytest-cov==6.0.0
prefix: /opt/conda/envs/logbook
| [
"tests/test_mail_handler.py::test_mail_handler_arguments"
]
| []
| [
"tests/test_mail_handler.py::test_mail_handler[ContextEnteringStrategy]",
"tests/test_mail_handler.py::test_mail_handler[PushingStrategy]",
"tests/test_mail_handler.py::test_mail_handler_batching[ContextEnteringStrategy]",
"tests/test_mail_handler.py::test_mail_handler_batching[PushingStrategy]",
"tests/test_mail_handler.py::test_group_handler_mail_combo[ContextEnteringStrategy]",
"tests/test_mail_handler.py::test_group_handler_mail_combo[PushingStrategy]"
]
| []
| BSD License | 320 | [
"logbook/handlers.py",
"setup.py"
]
| [
"logbook/handlers.py",
"setup.py"
]
|
|
projectmesa__mesa-178 | 57a0beb5947fc16b7b665f297504907e300b043c | 2015-12-04 04:39:32 | 6db9efde7c659b9338fc8cf551f066cdba7031c3 | diff --git a/mesa/space.py b/mesa/space.py
index 5e3a9544..77fe8174 100644
--- a/mesa/space.py
+++ b/mesa/space.py
@@ -24,6 +24,20 @@ X = 0
Y = 1
+def accept_tuple_argument(wrapped_function):
+ '''
+ Decorator to allow grid methods that take a list of (x, y) position tuples
+ to also handle a single position, by automatically wrapping tuple in
+ single-item list rather than forcing user to do it.
+ '''
+ def wrapper(*args):
+ if isinstance(args[1], tuple) and len(args[1]) == 2:
+ return wrapped_function(args[0], [args[1]])
+ else:
+ return wrapped_function(*args)
+ return wrapper
+
+
class Grid(object):
'''
Base class for a square grid.
@@ -238,10 +252,11 @@ class Grid(object):
x, y = pos
return x < 0 or x >= self.width or y < 0 or y >= self.height
+ @accept_tuple_argument
def iter_cell_list_contents(self, cell_list):
'''
Args:
- cell_list: Array-like of (x, y) tuples
+ cell_list: Array-like of (x, y) tuples, or single tuple.
Returns:
A iterator of the contents of the cells identified in cell_list
@@ -249,10 +264,11 @@ class Grid(object):
return (
self[y][x] for x, y in cell_list if not self.is_cell_empty((x, y)))
+ @accept_tuple_argument
def get_cell_list_contents(self, cell_list):
'''
Args:
- cell_list: Array-like of (x, y) tuples
+ cell_list: Array-like of (x, y) tuples, or single tuple.
Returns:
A list of the contents of the cells identified in cell_list
@@ -418,10 +434,11 @@ class MultiGrid(Grid):
x, y = pos
self.grid[y][x].remove(agent)
+ @accept_tuple_argument
def iter_cell_list_contents(self, cell_list):
'''
Args:
- cell_list: Array-like of (x, y) tuples
+ cell_list: Array-like of (x, y) tuples, or single tuple.
Returns:
A iterator of the contents of the cells identified in cell_list
| baby patch/tweak: allow cell methods to take single cell
Cf. discussion in #176 -- taking on the small "(TODO: someone should probably fix that...)" from the tutorial re: grid's `get_cell_list_contents` requiring a list of cells even if it's just being passed a single cell. It's a very easy fix, I'm on the case. :-) | projectmesa/mesa | diff --git a/tests/test_grid.py b/tests/test_grid.py
index b558f4d9..c09f0496 100644
--- a/tests/test_grid.py
+++ b/tests/test_grid.py
@@ -53,6 +53,43 @@ class TestBaseGrid(unittest.TestCase):
x, y = agent.pos
assert self.grid[y][x] == agent
+ def test_cell_agent_reporting(self):
+ '''
+ Ensure that if an agent is in a cell, get_cell_list_contents accurately
+ reports that fact.
+ '''
+ for agent in self.agents:
+ x, y = agent.pos
+ assert agent in self.grid.get_cell_list_contents([(x, y)])
+
+ def test_listfree_cell_agent_reporting(self):
+ '''
+ Ensure that if an agent is in a cell, get_cell_list_contents accurately
+ reports that fact, even when single position is not wrapped in a list.
+ '''
+ for agent in self.agents:
+ x, y = agent.pos
+ assert agent in self.grid.get_cell_list_contents((x, y))
+
+ def test_iter_cell_agent_reporting(self):
+ '''
+ Ensure that if an agent is in a cell, iter_cell_list_contents
+ accurately reports that fact.
+ '''
+ for agent in self.agents:
+ x, y = agent.pos
+ assert agent in self.grid.iter_cell_list_contents([(x, y)])
+
+ def test_listfree_iter_cell_agent_reporting(self):
+ '''
+ Ensure that if an agent is in a cell, iter_cell_list_contents
+ accurately reports that fact, even when single position is not
+ wrapped in a list.
+ '''
+ for agent in self.agents:
+ x, y = agent.pos
+ assert agent in self.grid.iter_cell_list_contents((x, y))
+
def test_neighbors(self):
'''
Test the base neighborhood methods on the non-toroid.
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
flake8==7.2.0
iniconfig==2.1.0
mccabe==0.7.0
-e git+https://github.com/projectmesa/mesa.git@57a0beb5947fc16b7b665f297504907e300b043c#egg=Mesa
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
tomli==2.2.1
tornado==6.4.2
tzdata==2025.2
| name: mesa
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- flake8==7.2.0
- iniconfig==2.1.0
- mccabe==0.7.0
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- tomli==2.2.1
- tornado==6.4.2
- tzdata==2025.2
prefix: /opt/conda/envs/mesa
| [
"tests/test_grid.py::TestBaseGrid::test_listfree_cell_agent_reporting",
"tests/test_grid.py::TestBaseGrid::test_listfree_iter_cell_agent_reporting",
"tests/test_grid.py::TestBaseGridTorus::test_listfree_cell_agent_reporting",
"tests/test_grid.py::TestBaseGridTorus::test_listfree_iter_cell_agent_reporting"
]
| []
| [
"tests/test_grid.py::TestBaseGrid::test_agent_positions",
"tests/test_grid.py::TestBaseGrid::test_cell_agent_reporting",
"tests/test_grid.py::TestBaseGrid::test_coord_iter",
"tests/test_grid.py::TestBaseGrid::test_iter_cell_agent_reporting",
"tests/test_grid.py::TestBaseGrid::test_neighbors",
"tests/test_grid.py::TestBaseGridTorus::test_agent_positions",
"tests/test_grid.py::TestBaseGridTorus::test_cell_agent_reporting",
"tests/test_grid.py::TestBaseGridTorus::test_coord_iter",
"tests/test_grid.py::TestBaseGridTorus::test_iter_cell_agent_reporting",
"tests/test_grid.py::TestBaseGridTorus::test_neighbors",
"tests/test_grid.py::TestSingleGrid::test_enforcement",
"tests/test_grid.py::TestMultiGrid::test_agent_positions",
"tests/test_grid.py::TestMultiGrid::test_neighbors"
]
| []
| Apache License 2.0 | 321 | [
"mesa/space.py"
]
| [
"mesa/space.py"
]
|
|
mapbox__mapbox-sdk-py-85 | 06728ffc30fba83003e9c76645ecec3eec1c63de | 2015-12-04 14:13:44 | 06728ffc30fba83003e9c76645ecec3eec1c63de | diff --git a/mapbox/services/base.py b/mapbox/services/base.py
index 4a25402..efd0eeb 100644
--- a/mapbox/services/base.py
+++ b/mapbox/services/base.py
@@ -1,6 +1,8 @@
"""Base Service class"""
import os
+import base64
+import json
import requests
@@ -27,6 +29,20 @@ class Service:
"""A product token for use in User-Agent headers."""
return 'mapbox-sdk-py/{0}'.format(__version__)
+ @property
+ def username(self):
+ """Get username from access token
+ Token contains base64 encoded json object with username"""
+ token = self.session.params['access_token']
+ if not token:
+ raise ValueError("session does not have a valid access_token param")
+ data = token.split('.')[1]
+ data = data.replace('-', '+').replace('_', '/')
+ try:
+ return json.loads(base64.b64decode(data).decode('utf-8'))['u']
+ except (ValueError, KeyError):
+ raise ValueError("access_token does not contain username")
+
def handle_http_error(self, response, custom_messages=None,
raise_for_status=False):
if not custom_messages:
diff --git a/mapbox/services/uploads.py b/mapbox/services/uploads.py
index 47e2451..635a53e 100644
--- a/mapbox/services/uploads.py
+++ b/mapbox/services/uploads.py
@@ -11,7 +11,7 @@ class Uploader(Service):
from mapbox import Uploader
- u = Uploader('username')
+ u = Uploader()
url = u.stage('test.tif')
job = u.create(url, 'test1').json()
@@ -24,8 +24,7 @@ class Uploader(Service):
assert job not in u.list().json()
"""
- def __init__(self, username, access_token=None):
- self.username = username
+ def __init__(self, access_token=None):
self.baseuri = 'https://api.mapbox.com/uploads/v1'
self.session = self.get_session(access_token)
| Get user from token
As in https://github.com/mapbox/mapbox-sdk-js/blob/master/lib/get_user.js. This allows us to eliminate a duplicate parameter when creating dataset and upload service instances. | mapbox/mapbox-sdk-py | diff --git a/tests/test_base.py b/tests/test_base.py
index ff773d7..39a3311 100644
--- a/tests/test_base.py
+++ b/tests/test_base.py
@@ -1,3 +1,6 @@
+import base64
+import os
+
import pytest
import requests
import responses
@@ -60,3 +63,36 @@ def test_custom_messages():
with pytest.raises(requests.exceptions.HTTPError) as exc:
assert service.handle_http_error(response, raise_for_status=True)
assert "401" in exc.value.message
+
+
+class MockService(mapbox.Service):
+ def __init__(self, access_token=None):
+ # In order to get a username, a session must be created on init
+ self.session = self.get_session(access_token)
+
+def test_username(monkeypatch):
+ token = 'pk.{0}.test'.format(base64.b64encode(b'{"u":"testuser"}').decode('utf-8'))
+ service = MockService(access_token=token)
+ assert service.username == 'testuser'
+
+def test_username_failures(monkeypatch):
+ # If your child class doesn't create a session
+ service = mapbox.Service()
+ with pytest.raises(AttributeError) as exc:
+ service.username
+ assert 'session' in exc.value.message
+
+ if 'MAPBOX_ACCESS_TOKEN' in os.environ:
+ monkeypatch.delenv('MAPBOX_ACCESS_TOKEN')
+ service = MockService()
+ with pytest.raises(ValueError) as exc:
+ service.username
+ assert 'access_token' in exc.value.message
+ assert 'param' in exc.value.message
+
+ token = "not.good"
+ service = MockService(access_token=token)
+ with pytest.raises(ValueError) as exc:
+ service.username
+ assert 'access_token' in exc.value.message
+ assert 'username' in exc.value.message
diff --git a/tests/test_upload.py b/tests/test_upload.py
index 252c540..9d92810 100644
--- a/tests/test_upload.py
+++ b/tests/test_upload.py
@@ -1,4 +1,5 @@
import json
+import base64
import responses
@@ -6,6 +7,9 @@ import mapbox
username = 'testuser'
+access_token = 'pk.{0}.test'.format(
+ base64.b64encode(b'{"u":"testuser"}').decode('utf-8'))
+
upload_response_body = """
{{"progress": 0,
"modified": "date.test",
@@ -30,12 +34,12 @@ def test_get_credentials():
responses.add(
responses.GET,
- 'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token=pk.test'.format(username),
+ 'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token={1}'.format(username, access_token),
match_querystring=True,
body=query_body, status=200,
content_type='application/json')
- res = mapbox.Uploader(username, access_token='pk.test')._get_credentials()
+ res = mapbox.Uploader(access_token=access_token)._get_credentials()
assert res.status_code == 200
creds = res.json()
assert username in creds['url']
@@ -48,18 +52,18 @@ def test_get_credentials():
def test_create():
responses.add(
responses.POST,
- 'https://api.mapbox.com/uploads/v1/{0}?access_token=pk.test'.format(username),
+ 'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
body=upload_response_body, status=201,
content_type='application/json')
- res = mapbox.Uploader(username, access_token='pk.test').create(
+ res = mapbox.Uploader(access_token=access_token).create(
'http://example.com/test.json', 'test1') # without username prefix
assert res.status_code == 201
job = res.json()
assert job['tileset'] == "{0}.test1".format(username)
- res2 = mapbox.Uploader(username, access_token='pk.test').create(
+ res2 = mapbox.Uploader(access_token=access_token).create(
'http://example.com/test.json', 'testuser.test1') # also takes full tileset
assert res2.status_code == 201
job = res2.json()
@@ -86,11 +90,11 @@ def test_create_name():
responses.add_callback(
responses.POST,
- 'https://api.mapbox.com/uploads/v1/{0}?access_token=pk.test'.format(username),
+ 'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
callback=request_callback)
- res = mapbox.Uploader(username, access_token='pk.test').create(
+ res = mapbox.Uploader(access_token=access_token).create(
'http://example.com/test.json', 'testuser.test1', name="testname")
assert res.status_code == 201
job = res.json()
@@ -101,12 +105,12 @@ def test_create_name():
def test_list():
responses.add(
responses.GET,
- 'https://api.mapbox.com/uploads/v1/{0}?access_token=pk.test'.format(username),
+ 'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
body="[{0}]".format(upload_response_body), status=200,
content_type='application/json')
- res = mapbox.Uploader(username, access_token='pk.test').list()
+ res = mapbox.Uploader(access_token=access_token).list()
assert res.status_code == 200
uploads = res.json()
assert len(uploads) == 1
@@ -118,12 +122,12 @@ def test_status():
job = json.loads(upload_response_body)
responses.add(
responses.GET,
- 'https://api.mapbox.com/uploads/v1/{0}/{1}?access_token=pk.test'.format(username, job['id']),
+ 'https://api.mapbox.com/uploads/v1/{0}/{1}?access_token={2}'.format(username, job['id'], access_token),
match_querystring=True,
body=upload_response_body, status=200,
content_type='application/json')
- res = mapbox.Uploader(username, access_token='pk.test').status(job)
+ res = mapbox.Uploader(access_token=access_token).status(job)
assert res.status_code == 200
status = res.json()
assert job == status
@@ -134,10 +138,10 @@ def test_delete():
job = json.loads(upload_response_body)
responses.add(
responses.DELETE,
- 'https://api.mapbox.com/uploads/v1/{0}/{1}?access_token=pk.test'.format(username, job['id']),
+ 'https://api.mapbox.com/uploads/v1/{0}/{1}?access_token={2}'.format(username, job['id'], access_token),
match_querystring=True,
body=None, status=204,
content_type='application/json')
- res = mapbox.Uploader(username, access_token='pk.test').delete(job)
+ res = mapbox.Uploader(access_token=access_token).delete(job)
assert res.status_code == 204
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.4",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
boto3==1.23.10
botocore==1.26.10
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
click-plugins==1.1.1
cligj==0.7.2
coverage==6.2
coveralls==3.3.1
distlib==0.3.9
docopt==0.6.2
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jmespath==0.10.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@06728ffc30fba83003e9c76645ecec3eec1c63de#egg=mapbox
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
requests==2.27.1
responses==0.17.0
s3transfer==0.5.2
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==1.26.20
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.23.10
- botocore==1.26.10
- charset-normalizer==2.0.12
- click==8.0.4
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==6.2
- coveralls==3.3.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- jmespath==0.10.0
- platformdirs==2.4.0
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- requests==2.27.1
- responses==0.17.0
- s3transfer==0.5.2
- six==1.17.0
- tomli==1.2.3
- tox==3.28.0
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==1.26.20
- virtualenv==20.17.1
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_base.py::test_username",
"tests/test_base.py::test_username_failures",
"tests/test_upload.py::test_get_credentials",
"tests/test_upload.py::test_create",
"tests/test_upload.py::test_create_name",
"tests/test_upload.py::test_list",
"tests/test_upload.py::test_status",
"tests/test_upload.py::test_delete"
]
| []
| [
"tests/test_base.py::test_service_session",
"tests/test_base.py::test_service_session_env",
"tests/test_base.py::test_service_session_os_environ",
"tests/test_base.py::test_service_session_os_environ_caps",
"tests/test_base.py::test_product_token",
"tests/test_base.py::test_user_agent",
"tests/test_base.py::test_custom_messages"
]
| []
| MIT License | 322 | [
"mapbox/services/base.py",
"mapbox/services/uploads.py"
]
| [
"mapbox/services/base.py",
"mapbox/services/uploads.py"
]
|
|
andycasey__ads-36 | 5bd62ef2bf924116374455e222ea9ac8dc416b3a | 2015-12-04 17:01:02 | c039d67c2b2e9dad936758bc89df1fdd1cbd0aa1 | diff --git a/ads/search.py b/ads/search.py
index eb64e70..fed6c0c 100644
--- a/ads/search.py
+++ b/ads/search.py
@@ -275,7 +275,7 @@ class SearchQuery(BaseQuery):
"title", "reference", "citation"]
def __init__(self, query_dict=None, q=None, fq=None, fl=DEFAULT_FIELDS,
- sort=None, start=0, rows=50, max_pages=3, **kwargs):
+ sort=None, start=0, rows=50, max_pages=1, **kwargs):
"""
constructor
:param query_dict: raw query that will be sent unmodified. raw takes
@@ -288,7 +288,7 @@ class SearchQuery(BaseQuery):
:param start: solr "start" param (start)
:param rows: solr "rows" param (rows)
:param max_pages: Maximum number of pages to return. This value may
- be modified after instansiation to increase the number of results
+ be modified after instantiation to increase the number of results
:param kwargs: kwargs to add to `q` as "key:value"
"""
self._articles = []
@@ -385,7 +385,7 @@ class SearchQuery(BaseQuery):
# if we have hit the max_pages limit, then iteration is done.
page = math.ceil(len(self.articles)/self.query['rows'])
- if page > self.max_pages:
+ if page >= self.max_pages:
raise StopIteration("Maximum number of pages queried")
# We aren't on the max_page of results nor do we have all
| Number of returned results doesn't correspond to 'rows' key value in SearchQuery
Example code:
````
In [5]: papers = ads.SearchQuery(q="supernova", sort="citation_count", rows=10)
In [6]: print(len(list(papers)))
40
````
Not massively important, but a bit surprising anyway. Any explanation? Thanks! | andycasey/ads | diff --git a/ads/tests/test_search.py b/ads/tests/test_search.py
index 346d957..331a9f5 100644
--- a/ads/tests/test_search.py
+++ b/ads/tests/test_search.py
@@ -131,18 +131,24 @@ class TestSearchQuery(unittest.TestCase):
self.assertEqual(len(sq.articles), 1)
self.assertEqual(sq._query['start'], 1)
self.assertEqual(next(sq).bibcode, '2012GCN..13229...1S')
- self.assertEqual(len(list(sq)), 19) # 2 already returned
+ self.assertEqual(len(list(sq)), 18) # 2 already returned
with self.assertRaisesRegexp(
StopIteration,
"Maximum number of pages queried"):
next(sq)
sq.max_pages = 500
- self.assertEqual(len(list(sq)), 28-19-2)
+ self.assertEqual(len(list(sq)), 28-18-2)
with self.assertRaisesRegexp(
StopIteration,
"All records found"):
next(sq)
+ # not setting max_pages should return the exact number of rows requests
+ sq = SearchQuery(q="unittest", rows=3)
+ with MockSolrResponse(sq.HTTP_ENDPOINT):
+ self.assertEqual(len(list(sq)), 3)
+
+
def test_init(self):
"""
init should result in a properly formatted query attribute
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/andycasey/ads.git@5bd62ef2bf924116374455e222ea9ac8dc416b3a#egg=ads
certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
httpretty==1.1.4
idna==3.10
iniconfig==2.1.0
MarkupSafe==3.0.2
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
requests==2.32.3
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
Werkzeug==3.1.3
| name: ads
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- httpretty==1.1.4
- idna==3.10
- iniconfig==2.1.0
- markupsafe==3.0.2
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.32.3
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
- werkzeug==3.1.3
prefix: /opt/conda/envs/ads
| [
"ads/tests/test_search.py::TestSearchQuery::test_iter"
]
| []
| [
"ads/tests/test_search.py::TestArticle::test_cached_properties",
"ads/tests/test_search.py::TestArticle::test_equals",
"ads/tests/test_search.py::TestArticle::test_get_field",
"ads/tests/test_search.py::TestArticle::test_init",
"ads/tests/test_search.py::TestArticle::test_print_methods",
"ads/tests/test_search.py::TestSearchQuery::test_init",
"ads/tests/test_search.py::TestSolrResponse::test_articles",
"ads/tests/test_search.py::TestSolrResponse::test_init",
"ads/tests/test_search.py::TestSolrResponse::test_load_http_response",
"ads/tests/test_search.py::Testquery::test_init"
]
| []
| MIT License | 323 | [
"ads/search.py"
]
| [
"ads/search.py"
]
|
|
typesafehub__conductr-cli-90 | 5947a412f5c61a10f5e5f0aa2da85cac2c66faca | 2015-12-08 23:44:42 | 1df5ea26ef321f9aca5cb5cab60c4388baab0ffc | diff --git a/conductr_cli/bundle_installation.py b/conductr_cli/bundle_installation.py
index 885499a..a97e64a 100644
--- a/conductr_cli/bundle_installation.py
+++ b/conductr_cli/bundle_installation.py
@@ -21,29 +21,45 @@ def count_installations(bundle_id, args):
return 0
+def wait_for_uninstallation(bundle_id, args):
+ return wait_for_condition(bundle_id, is_uninstalled, 'uninstalled', args)
+
+
def wait_for_installation(bundle_id, args):
+ return wait_for_condition(bundle_id, is_installed, 'installed', args)
+
+
+def wait_for_condition(bundle_id, condition, condition_name, args):
log = logging.getLogger(__name__)
start_time = datetime.now()
installed_bundles = count_installations(bundle_id, args)
- if installed_bundles > 0:
- log.info('Bundle {} is installed'.format(bundle_id))
+ if condition(installed_bundles):
+ log.info('Bundle {} is {}'.format(bundle_id, condition_name))
return
else:
- log.info('Bundle {} waiting to be installed'.format(bundle_id))
+ log.info('Bundle {} waiting to be {}'.format(bundle_id, condition_name))
bundle_events_url = conduct_url.url('bundles/events', args)
sse_events = sse_client.get_events(bundle_events_url)
for event in sse_events:
elapsed = (datetime.now() - start_time).total_seconds()
if elapsed > args.wait_timeout:
- raise WaitTimeoutError('Bundle {} waiting to be installed'.format(bundle_id))
+ raise WaitTimeoutError('Bundle {} waiting to be {}'.format(bundle_id, condition_name))
if event.event and event.event.startswith('bundleInstallation'):
installed_bundles = count_installations(bundle_id, args)
- if installed_bundles > 0:
- log.info('Bundle {} installed'.format(bundle_id))
+ if condition(installed_bundles):
+ log.info('Bundle {} {}'.format(bundle_id, condition_name))
return
else:
- log.info('Bundle {} still waiting to be installed'.format(bundle_id))
+ log.info('Bundle {} still waiting to be {}'.format(bundle_id, condition_name))
+
+ raise WaitTimeoutError('Bundle {} still waiting to be {}'.format(bundle_id, condition_name))
+
+
+def is_installed(number_of_installations):
+ return number_of_installations > 0
+
- raise WaitTimeoutError('Bundle {} still waiting to be installed'.format(bundle_id))
+def is_uninstalled(number_of_installations):
+ return number_of_installations <= 0
diff --git a/conductr_cli/conduct.py b/conductr_cli/conduct.py
index 7b8c042..1e6f945 100755
--- a/conductr_cli/conduct.py
+++ b/conductr_cli/conduct.py
@@ -205,6 +205,8 @@ def build_parser():
unload_parser.add_argument('bundle',
help='The ID of the bundle')
add_default_arguments(unload_parser)
+ add_wait_timeout(unload_parser)
+ add_no_wait(unload_parser)
unload_parser.set_defaults(func=conduct_unload.unload)
# Sub-parser for `events` sub-command
diff --git a/conductr_cli/conduct_unload.py b/conductr_cli/conduct_unload.py
index 5e1f47a..24f7466 100644
--- a/conductr_cli/conduct_unload.py
+++ b/conductr_cli/conduct_unload.py
@@ -1,4 +1,5 @@
-from conductr_cli import conduct_url, validation
+from conductr_cli import conduct_url, validation, bundle_installation
+import json
import logging
import requests
from conductr_cli.http import DEFAULT_HTTP_TIMEOUT
@@ -19,6 +20,14 @@ def unload(args):
log.verbose(validation.pretty_json(response.text))
log.info('Bundle unload request sent.')
+
+ response_json = json.loads(response.text)
+ if not args.no_wait:
+ bundle_installation.wait_for_uninstallation(response_json['bundleId'], args)
+
log.info('Print ConductR info with: conduct info{}'.format(args.cli_parameters))
+ if not log.is_info_enabled() and log.is_quiet_enabled():
+ log.quiet(response_json['bundleId'])
+
return True
| Introduce wait behaviour for conduct unload command
The `conduct load`, `conduct run`, and `conduct stop` has its default behaviour (to wait) and `--no-wait` option.
Implement the same behaviour for `conduct unload` command to allow scripting of the stop and uninstallation of bundles within ConductR | typesafehub/conductr-cli | diff --git a/conductr_cli/test/test_bundle_installation.py b/conductr_cli/test/test_bundle_installation.py
index 567b81a..eb32335 100644
--- a/conductr_cli/test/test_bundle_installation.py
+++ b/conductr_cli/test/test_bundle_installation.py
@@ -8,8 +8,14 @@ except ImportError:
from mock import call, patch, MagicMock
+def create_test_event(event_name):
+ sse_mock = MagicMock()
+ sse_mock.event = event_name
+ return sse_mock
+
+
class TestCountInstallation(CliTestCase):
- def test_return_scale(self):
+ def test_return_installation_count(self):
bundles_endpoint_reply = """
[{
"bundleId": "a101449418187d92c789d1adc240b6d6",
@@ -36,7 +42,7 @@ class TestCountInstallation(CliTestCase):
http_method.assert_called_with('http://127.0.0.1:9005/bundles')
- def test_return_scale_v2(self):
+ def test_return_installation_count_v2(self):
bundles_endpoint_reply = """
[{
"bundleId": "a101449418187d92c789d1adc240b6d6",
@@ -63,7 +69,7 @@ class TestCountInstallation(CliTestCase):
http_method.assert_called_with('http://127.0.0.1:9005/v2/bundles')
- def test_return_zero_v1(self):
+ def test_return_zero_installation_count_v1(self):
bundles_endpoint_reply = '[]'
http_method = self.respond_with(text=bundles_endpoint_reply)
@@ -79,7 +85,7 @@ class TestCountInstallation(CliTestCase):
http_method.assert_called_with('http://127.0.0.1:9005/bundles')
- def test_return_zero_v2(self):
+ def test_return_zero_installation_count_v2(self):
bundles_endpoint_reply = '[]'
http_method = self.respond_with(text=bundles_endpoint_reply)
@@ -96,14 +102,14 @@ class TestCountInstallation(CliTestCase):
http_method.assert_called_with('http://127.0.0.1:9005/v2/bundles')
-class TestWaitForScale(CliTestCase):
+class TestWaitForInstallation(CliTestCase):
def test_wait_for_installation(self):
count_installations_mock = MagicMock(side_effect=[0, 1])
url_mock = MagicMock(return_value='/bundle-events/endpoint')
get_events_mock = MagicMock(return_value=[
- self.create_test_event(None),
- self.create_test_event('bundleInstallationAdded'),
- self.create_test_event('bundleInstallationAdded')
+ create_test_event(None),
+ create_test_event('bundleInstallationAdded'),
+ create_test_event('bundleInstallationAdded')
])
stdout = MagicMock()
@@ -124,7 +130,6 @@ class TestWaitForScale(CliTestCase):
])
url_mock.assert_called_with('bundles/events', args)
- self.maxDiff = None
self.assertEqual(strip_margin("""|Bundle a101449418187d92c789d1adc240b6d6 waiting to be installed
|Bundle a101449418187d92c789d1adc240b6d6 installed
@@ -154,9 +159,9 @@ class TestWaitForScale(CliTestCase):
count_installations_mock = MagicMock(side_effect=[0, 1, 1])
url_mock = MagicMock(return_value='/bundle-events/endpoint')
get_events_mock = MagicMock(return_value=[
- self.create_test_event('bundleExecutionAdded'),
- self.create_test_event('bundleExecutionAdded'),
- self.create_test_event('bundleExecutionAdded')
+ create_test_event('bundleInstallationAdded'),
+ create_test_event('bundleInstallationAdded'),
+ create_test_event('bundleInstallationAdded')
])
stdout = MagicMock()
@@ -185,9 +190,9 @@ class TestWaitForScale(CliTestCase):
count_installations_mock = MagicMock(return_value=0)
url_mock = MagicMock(return_value='/bundle-events/endpoint')
get_events_mock = MagicMock(return_value=[
- self.create_test_event('bundleInstallationAdded'),
- self.create_test_event('bundleInstallationAdded'),
- self.create_test_event('bundleInstallationAdded')
+ create_test_event('bundleInstallationAdded'),
+ create_test_event('bundleInstallationAdded'),
+ create_test_event('bundleInstallationAdded')
])
stdout = MagicMock()
@@ -217,7 +222,123 @@ class TestWaitForScale(CliTestCase):
|Bundle a101449418187d92c789d1adc240b6d6 still waiting to be installed
|"""), self.output(stdout))
- def create_test_event(self, event_name):
- sse_mock = MagicMock()
- sse_mock.event = event_name
- return sse_mock
+
+class TestWaitForUninstallation(CliTestCase):
+ def test_wait_for_uninstallation(self):
+ count_installations_mock = MagicMock(side_effect=[1, 0])
+ url_mock = MagicMock(return_value='/bundle-events/endpoint')
+ get_events_mock = MagicMock(return_value=[
+ create_test_event(None),
+ create_test_event('bundleInstallationRemoved'),
+ create_test_event('bundleInstallationRemoved')
+ ])
+
+ stdout = MagicMock()
+
+ bundle_id = 'a101449418187d92c789d1adc240b6d6'
+ args = MagicMock(**{
+ 'wait_timeout': 10
+ })
+ with patch('conductr_cli.conduct_url.url', url_mock), \
+ patch('conductr_cli.bundle_installation.count_installations', count_installations_mock), \
+ patch('conductr_cli.sse_client.get_events', get_events_mock):
+ logging_setup.configure_logging(args, stdout)
+ bundle_installation.wait_for_uninstallation(bundle_id, args)
+
+ self.assertEqual(count_installations_mock.call_args_list, [
+ call(bundle_id, args),
+ call(bundle_id, args)
+ ])
+
+ url_mock.assert_called_with('bundles/events', args)
+
+ self.assertEqual(strip_margin("""|Bundle a101449418187d92c789d1adc240b6d6 waiting to be uninstalled
+ |Bundle a101449418187d92c789d1adc240b6d6 uninstalled
+ |"""), self.output(stdout))
+
+ def test_return_immediately_if_uninstalled(self):
+ count_installations_mock = MagicMock(side_effect=[0])
+
+ stdout = MagicMock()
+
+ bundle_id = 'a101449418187d92c789d1adc240b6d6'
+ args = MagicMock(**{
+ 'wait_timeout': 10
+ })
+ with patch('conductr_cli.bundle_installation.count_installations', count_installations_mock):
+ logging_setup.configure_logging(args, stdout)
+ bundle_installation.wait_for_uninstallation(bundle_id, args)
+
+ self.assertEqual(count_installations_mock.call_args_list, [
+ call(bundle_id, args)
+ ])
+
+ self.assertEqual(strip_margin("""|Bundle a101449418187d92c789d1adc240b6d6 is uninstalled
+ |"""), self.output(stdout))
+
+ def test_wait_timeout(self):
+ count_installations_mock = MagicMock(side_effect=[1, 1, 1])
+ url_mock = MagicMock(return_value='/bundle-events/endpoint')
+ get_events_mock = MagicMock(return_value=[
+ create_test_event('bundleInstallationAdded'),
+ create_test_event('bundleInstallationAdded'),
+ create_test_event('bundleInstallationAdded')
+ ])
+
+ stdout = MagicMock()
+
+ bundle_id = 'a101449418187d92c789d1adc240b6d6'
+ args = MagicMock(**{
+ # Purposely set no timeout to invoke the error
+ 'wait_timeout': 0
+ })
+ with patch('conductr_cli.conduct_url.url', url_mock), \
+ patch('conductr_cli.bundle_installation.count_installations', count_installations_mock), \
+ patch('conductr_cli.sse_client.get_events', get_events_mock):
+ logging_setup.configure_logging(args, stdout)
+ self.assertRaises(WaitTimeoutError, bundle_installation.wait_for_uninstallation, bundle_id, args)
+
+ self.assertEqual(count_installations_mock.call_args_list, [
+ call(bundle_id, args)
+ ])
+
+ url_mock.assert_called_with('bundles/events', args)
+
+ self.assertEqual(strip_margin("""|Bundle a101449418187d92c789d1adc240b6d6 waiting to be uninstalled
+ |"""), self.output(stdout))
+
+ def test_wait_timeout_all_events(self):
+ count_installations_mock = MagicMock(return_value=1)
+ url_mock = MagicMock(return_value='/bundle-events/endpoint')
+ get_events_mock = MagicMock(return_value=[
+ create_test_event('bundleInstallationAdded'),
+ create_test_event('bundleInstallationAdded'),
+ create_test_event('bundleInstallationAdded')
+ ])
+
+ stdout = MagicMock()
+
+ bundle_id = 'a101449418187d92c789d1adc240b6d6'
+ args = MagicMock(**{
+ 'wait_timeout': 10
+ })
+ with patch('conductr_cli.conduct_url.url', url_mock), \
+ patch('conductr_cli.bundle_installation.count_installations', count_installations_mock), \
+ patch('conductr_cli.sse_client.get_events', get_events_mock):
+ logging_setup.configure_logging(args, stdout)
+ self.assertRaises(WaitTimeoutError, bundle_installation.wait_for_uninstallation, bundle_id, args)
+
+ self.assertEqual(count_installations_mock.call_args_list, [
+ call(bundle_id, args),
+ call(bundle_id, args),
+ call(bundle_id, args),
+ call(bundle_id, args)
+ ])
+
+ url_mock.assert_called_with('bundles/events', args)
+
+ self.assertEqual(strip_margin("""|Bundle a101449418187d92c789d1adc240b6d6 waiting to be uninstalled
+ |Bundle a101449418187d92c789d1adc240b6d6 still waiting to be uninstalled
+ |Bundle a101449418187d92c789d1adc240b6d6 still waiting to be uninstalled
+ |Bundle a101449418187d92c789d1adc240b6d6 still waiting to be uninstalled
+ |"""), self.output(stdout))
diff --git a/conductr_cli/test/test_conduct.py b/conductr_cli/test/test_conduct.py
index f0c2141..15df94d 100644
--- a/conductr_cli/test/test_conduct.py
+++ b/conductr_cli/test/test_conduct.py
@@ -60,6 +60,8 @@ class TestConduct(TestCase):
self.assertEqual(args.resolve_cache_dir, '{}/.conductr/cache'.format(os.path.expanduser('~')))
self.assertEqual(args.verbose, False)
self.assertEqual(args.long_ids, False)
+ self.assertEqual(args.no_wait, False)
+ self.assertEqual(args.wait_timeout, 60)
self.assertEqual(args.bundle, 'path-to-bundle')
self.assertEqual(args.configuration, 'path-to-conf')
@@ -74,6 +76,8 @@ class TestConduct(TestCase):
self.assertEqual(args.resolve_cache_dir, '/somewhere')
self.assertEqual(args.verbose, False)
self.assertEqual(args.long_ids, False)
+ self.assertEqual(args.no_wait, False)
+ self.assertEqual(args.wait_timeout, 60)
self.assertEqual(args.bundle, 'path-to-bundle')
self.assertEqual(args.configuration, 'path-to-conf')
self.assertFalse(args.quiet)
@@ -88,6 +92,8 @@ class TestConduct(TestCase):
self.assertEqual(args.cli_settings_dir, '{}/.conductr'.format(os.path.expanduser('~')))
self.assertEqual(args.verbose, False)
self.assertEqual(args.long_ids, False)
+ self.assertEqual(args.no_wait, False)
+ self.assertEqual(args.wait_timeout, 60)
self.assertEqual(args.scale, 5)
self.assertEqual(args.bundle, 'path-to-bundle')
@@ -101,6 +107,8 @@ class TestConduct(TestCase):
self.assertEqual(args.cli_settings_dir, '{}/.conductr'.format(os.path.expanduser('~')))
self.assertEqual(args.verbose, False)
self.assertEqual(args.long_ids, False)
+ self.assertEqual(args.no_wait, False)
+ self.assertEqual(args.wait_timeout, 60)
self.assertEqual(args.bundle, 'path-to-bundle')
def test_parser_unload(self):
@@ -113,6 +121,8 @@ class TestConduct(TestCase):
self.assertEqual(args.cli_settings_dir, '{}/.conductr'.format(os.path.expanduser('~')))
self.assertEqual(args.verbose, False)
self.assertEqual(args.long_ids, False)
+ self.assertEqual(args.no_wait, False)
+ self.assertEqual(args.wait_timeout, 60)
self.assertEqual(args.bundle, 'path-to-bundle')
def test_get_cli_parameters(self):
diff --git a/conductr_cli/test/test_conduct_unload.py b/conductr_cli/test/test_conduct_unload.py
index b2adc98..92f69d4 100644
--- a/conductr_cli/test/test_conduct_unload.py
+++ b/conductr_cli/test/test_conduct_unload.py
@@ -22,6 +22,7 @@ class TestConductUnloadCommand(CliTestCase):
'port': 9005,
'api_version': '1',
'verbose': False,
+ 'no_wait': False,
'quiet': False,
'cli_parameters': '',
'bundle': '45e0c477d3e5ea92aa8d85c0d8f3e25c'
@@ -37,51 +38,101 @@ class TestConductUnloadCommand(CliTestCase):
return strip_margin(self.output_template.format(**{'params': params}))
def test_success(self):
+ wait_for_uninstallation_mock = MagicMock()
http_method = self.respond_with(200, self.default_response)
stdout = MagicMock()
- with patch('requests.delete', http_method):
- logging_setup.configure_logging(MagicMock(**self.default_args), stdout)
- result = conduct_unload.unload(MagicMock(**self.default_args))
+ input_args = MagicMock(**self.default_args)
+ with patch('requests.delete', http_method), \
+ patch('conductr_cli.bundle_installation.wait_for_uninstallation', wait_for_uninstallation_mock):
+ logging_setup.configure_logging(input_args, stdout)
+ result = conduct_unload.unload(input_args)
self.assertTrue(result)
http_method.assert_called_with(self.default_url, timeout=DEFAULT_HTTP_TIMEOUT)
+ wait_for_uninstallation_mock.assert_called_with('45e0c477d3e5ea92aa8d85c0d8f3e25c', input_args)
self.assertEqual(self.default_output(), self.output(stdout))
def test_success_verbose(self):
+ wait_for_uninstallation_mock = MagicMock()
http_method = self.respond_with(200, self.default_response)
stdout = MagicMock()
- with patch('requests.delete', http_method), patch('sys.stdout', stdout):
- args = self.default_args.copy()
- args.update({'verbose': True})
- logging_setup.configure_logging(MagicMock(**args), stdout)
- result = conduct_unload.unload(MagicMock(**args))
+ args = self.default_args.copy()
+ args.update({'verbose': True})
+ input_args = MagicMock(**args)
+
+ with patch('requests.delete', http_method), \
+ patch('conductr_cli.bundle_installation.wait_for_uninstallation', wait_for_uninstallation_mock):
+ logging_setup.configure_logging(input_args, stdout)
+ result = conduct_unload.unload(input_args)
self.assertTrue(result)
http_method.assert_called_with(self.default_url, timeout=DEFAULT_HTTP_TIMEOUT)
+ wait_for_uninstallation_mock.assert_called_with('45e0c477d3e5ea92aa8d85c0d8f3e25c', input_args)
self.assertEqual(self.default_response + self.default_output(), self.output(stdout))
+ def test_success_quiet(self):
+ wait_for_uninstallation_mock = MagicMock()
+ http_method = self.respond_with(200, self.default_response)
+ stdout = MagicMock()
+
+ args = self.default_args.copy()
+ args.update({'quiet': True})
+ input_args = MagicMock(**args)
+
+ with patch('requests.delete', http_method), \
+ patch('conductr_cli.bundle_installation.wait_for_uninstallation', wait_for_uninstallation_mock):
+ logging_setup.configure_logging(input_args, stdout)
+ result = conduct_unload.unload(input_args)
+ self.assertTrue(result)
+
+ http_method.assert_called_with(self.default_url, timeout=DEFAULT_HTTP_TIMEOUT)
+ wait_for_uninstallation_mock.assert_called_with('45e0c477d3e5ea92aa8d85c0d8f3e25c', input_args)
+
+ self.assertEqual('45e0c477d3e5ea92aa8d85c0d8f3e25c\n', self.output(stdout))
+
def test_success_with_configuration(self):
+ wait_for_uninstallation_mock = MagicMock()
http_method = self.respond_with(200, self.default_response)
stdout = MagicMock()
+ args = self.default_args.copy()
cli_parameters = ' --ip 127.0.1.1 --port 9006'
- with patch('requests.delete', http_method):
- args = self.default_args.copy()
- args.update({'cli_parameters': cli_parameters})
- logging_setup.configure_logging(MagicMock(**args), stdout)
- result = conduct_unload.unload(MagicMock(**args))
+ args.update({'cli_parameters': cli_parameters})
+ input_args = MagicMock(**args)
+
+ with patch('requests.delete', http_method), \
+ patch('conductr_cli.bundle_installation.wait_for_uninstallation', wait_for_uninstallation_mock):
+ logging_setup.configure_logging(input_args, stdout)
+ result = conduct_unload.unload(input_args)
self.assertTrue(result)
http_method.assert_called_with(self.default_url, timeout=DEFAULT_HTTP_TIMEOUT)
+ wait_for_uninstallation_mock.assert_called_with('45e0c477d3e5ea92aa8d85c0d8f3e25c', input_args)
self.assertEqual(
self.default_output(params=cli_parameters),
self.output(stdout))
+ def test_success_no_wait(self):
+ http_method = self.respond_with(200, self.default_response)
+ stdout = MagicMock()
+
+ args = self.default_args.copy()
+ args.update({'no_wait': True})
+ input_args = MagicMock(**args)
+ with patch('requests.delete', http_method):
+ logging_setup.configure_logging(input_args, stdout)
+ result = conduct_unload.unload(input_args)
+ self.assertTrue(result)
+
+ http_method.assert_called_with(self.default_url, timeout=DEFAULT_HTTP_TIMEOUT)
+
+ self.assertEqual(self.default_output(), self.output(stdout))
+
def test_failure(self):
http_method = self.respond_with(404)
stderr = MagicMock()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 0.21 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"flake8",
"pep8-naming",
"flake8-quotes"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | argcomplete==3.6.1
arrow==1.3.0
certifi==2025.1.31
charset-normalizer==3.4.1
-e git+https://github.com/typesafehub/conductr-cli.git@5947a412f5c61a10f5e5f0aa2da85cac2c66faca#egg=conductr_cli
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
flake8-quotes==3.4.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
packaging @ file:///croot/packaging_1734472117206/work
pep8-naming==0.14.1
pluggy @ file:///croot/pluggy_1733169602837/work
pycodestyle==2.13.0
pyflakes==3.3.1
pyhocon==0.2.1
pyparsing==2.0.3
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
requests==2.32.3
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
types-python-dateutil==2.9.0.20241206
urllib3==2.3.0
| name: conductr-cli
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argcomplete==3.6.1
- arrow==1.3.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- flake8==7.2.0
- flake8-quotes==3.4.0
- idna==3.10
- mccabe==0.7.0
- pep8-naming==0.14.1
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyhocon==0.2.1
- pyparsing==2.0.3
- python-dateutil==2.9.0.post0
- requests==2.32.3
- six==1.17.0
- types-python-dateutil==2.9.0.20241206
- urllib3==2.3.0
prefix: /opt/conda/envs/conductr-cli
| [
"conductr_cli/test/test_bundle_installation.py::TestWaitForUninstallation::test_return_immediately_if_uninstalled",
"conductr_cli/test/test_bundle_installation.py::TestWaitForUninstallation::test_wait_for_uninstallation",
"conductr_cli/test/test_bundle_installation.py::TestWaitForUninstallation::test_wait_timeout",
"conductr_cli/test/test_bundle_installation.py::TestWaitForUninstallation::test_wait_timeout_all_events",
"conductr_cli/test/test_conduct.py::TestConduct::test_parser_unload",
"conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_success",
"conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_success_quiet",
"conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_success_verbose",
"conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_success_with_configuration"
]
| []
| [
"conductr_cli/test/test_bundle_installation.py::TestCountInstallation::test_return_installation_count",
"conductr_cli/test/test_bundle_installation.py::TestCountInstallation::test_return_installation_count_v2",
"conductr_cli/test/test_bundle_installation.py::TestCountInstallation::test_return_zero_installation_count_v1",
"conductr_cli/test/test_bundle_installation.py::TestCountInstallation::test_return_zero_installation_count_v2",
"conductr_cli/test/test_bundle_installation.py::TestWaitForInstallation::test_return_immediately_if_installed",
"conductr_cli/test/test_bundle_installation.py::TestWaitForInstallation::test_wait_for_installation",
"conductr_cli/test/test_bundle_installation.py::TestWaitForInstallation::test_wait_timeout",
"conductr_cli/test/test_bundle_installation.py::TestWaitForInstallation::test_wait_timeout_all_events",
"conductr_cli/test/test_conduct.py::TestConduct::test_default",
"conductr_cli/test/test_conduct.py::TestConduct::test_get_cli_parameters",
"conductr_cli/test/test_conduct.py::TestConduct::test_parser_info",
"conductr_cli/test/test_conduct.py::TestConduct::test_parser_load",
"conductr_cli/test/test_conduct.py::TestConduct::test_parser_load_with_custom_resolve_cache_dir",
"conductr_cli/test/test_conduct.py::TestConduct::test_parser_run",
"conductr_cli/test/test_conduct.py::TestConduct::test_parser_services",
"conductr_cli/test/test_conduct.py::TestConduct::test_parser_stop",
"conductr_cli/test/test_conduct.py::TestConduct::test_parser_version",
"conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_failure",
"conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_failure_invalid_address",
"conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_success_no_wait"
]
| []
| Apache License 2.0 | 326 | [
"conductr_cli/bundle_installation.py",
"conductr_cli/conduct_unload.py",
"conductr_cli/conduct.py"
]
| [
"conductr_cli/bundle_installation.py",
"conductr_cli/conduct_unload.py",
"conductr_cli/conduct.py"
]
|
|
mapbox__mapbox-sdk-py-91 | c45e856d0640c6d9b1750446fed58cb882912edb | 2015-12-09 15:26:44 | c45e856d0640c6d9b1750446fed58cb882912edb | diff --git a/docs/mapmatching.md b/docs/mapmatching.md
new file mode 100644
index 0000000..f404714
--- /dev/null
+++ b/docs/mapmatching.md
@@ -0,0 +1,89 @@
+# Map Matching
+
+The `MapMatcher` class from the `mapbox.services.mapmatching` module provides
+access to the Mapbox Map Matching API. You can also import it directly from the
+`mapbox` module.
+
+```python
+>>> from mapbox import MapMatcher
+
+```
+
+See https://www.mapbox.com/developers/api/map-matching/ for general documentation
+of the API.
+
+Your Mapbox access token should be set in your environment; see the [access
+tokens](access_tokens.md) documentation for more information.
+
+## MapMatcher methods
+
+The methods of the `MapMatcher` class return an instance of
+[`requests.Response`](http://docs.python-requests.org/en/latest/api/#requests.Response).
+
+In addition to the `json()` method that returns Python data parsed from the
+API, the responses provide a `geojson()` method that converts that
+data to a GeoJSON like form.
+
+## Usage
+
+The Mapbox Map Matching API lets you take recorded GPS traces and snap them to the OpenStreetMap road and path network. This is helpful for aligning noisy traces and displaying them cleanly on a map.
+
+The Map Matching API is limited to 60 requests per minute and results must be displayed on a Mapbox map using one of our SDKs. For high volume or other use cases, contact us.
+
+
+```python
+>>> service = MapMatcher()
+
+```
+
+The input data to the Map Matcher must be a single GeoJSON-like Feature with a LineString geometry.
+The optional `coordTimes` property should be an array of the same length as the coordinates
+containing timestamps to help make the matching more accurate.
+
+```
+>>> line = {
+... "type": "Feature",
+... "properties": {
+... "coordTimes": [
+... "2015-04-21T06:00:00Z",
+... "2015-04-21T06:00:05Z",
+... "2015-04-21T06:00:10Z",
+... "2015-04-21T06:00:15Z",
+... "2015-04-21T06:00:20Z"]},
+... "geometry": {
+... "type": "LineString",
+... "coordinates": [
+... [13.418946862220764, 52.50055852688439],
+... [13.419011235237122, 52.50113000479732],
+... [13.419756889343262, 52.50171780290061],
+... [13.419885635375975, 52.50237416816131],
+... [13.420631289482117, 52.50294888790448]]}}
+
+```
+
+Use the `surface()` method to query the terrain dataset.
+
+```python
+>>> response = service.match(line, profile='mapbox.driving')
+>>> response.status_code
+200
+>>> response.headers['Content-Type']
+'application/json; charset=utf-8'
+
+```
+
+The response geojson contains a FeatureCollection with a single feature,
+with the new LineString corrected to match segments from the selected profile.
+
+```python
+>>> corrected = response.geojson()['features'][0]
+>>> corrected['geometry']['type']
+'LineString'
+>>> corrected['geometry'] == line['geometry']
+False
+>>> len(corrected['geometry']) == len(line['geometry'])
+True
+
+```
+
+See ``import mapbox; help(mapbox.MapMatcher)`` for more detailed usage.
diff --git a/mapbox/__init__.py b/mapbox/__init__.py
index e4d15aa..0c25b2b 100644
--- a/mapbox/__init__.py
+++ b/mapbox/__init__.py
@@ -5,6 +5,7 @@ __version__ = "0.5.0"
from .services.directions import Directions
from .services.distance import Distance
from .services.geocoding import Geocoder, InvalidPlaceTypeError
+from .services.mapmatching import MapMatcher
from .services.surface import Surface
-from .services.uploads import Uploader
from .services.static import Static
+from .services.uploads import Uploader
diff --git a/mapbox/services/mapmatching.py b/mapbox/services/mapmatching.py
new file mode 100644
index 0000000..3dd5d92
--- /dev/null
+++ b/mapbox/services/mapmatching.py
@@ -0,0 +1,37 @@
+import json
+
+from uritemplate import URITemplate
+
+from mapbox.services.base import Service
+
+
+class MapMatcher(Service):
+
+ def __init__(self, access_token=None):
+ self.baseuri = 'https://api.mapbox.com/matching/v4'
+ self.session = self.get_session(access_token)
+
+ def _validate_profile(self, profile):
+ valid_profiles = ['mapbox.driving', 'mapbox.cycling', 'mapbox.walking']
+ if profile not in valid_profiles:
+ raise ValueError("{} is not a valid profile".format(profile))
+ return profile
+
+ def match(self, feature, profile='mapbox.driving'):
+ profile = self._validate_profile(profile)
+ # todo validate single feature with linestring geometry up to 100 pts
+ geojson_line_feature = json.dumps(feature)
+
+ uri = URITemplate('%s/{profile}.json' % self.baseuri).expand(
+ profile=profile)
+
+ res = self.session.post(uri, data=geojson_line_feature,
+ headers={'Content-Type': 'application/json'})
+ self.handle_http_error(res)
+
+ def geojson():
+ return res.json()
+
+ res.geojson = geojson
+
+ return res
| Map Matching API | mapbox/mapbox-sdk-py | diff --git a/tests/test_mapmatching.py b/tests/test_mapmatching.py
new file mode 100644
index 0000000..2277866
--- /dev/null
+++ b/tests/test_mapmatching.py
@@ -0,0 +1,47 @@
+import pytest
+import responses
+
+import mapbox
+
[email protected]
+def line_feature():
+ return {
+ "type": "Feature",
+ "properties": {
+ "coordTimes": [
+ "2015-04-21T06:00:00Z",
+ "2015-04-21T06:00:05Z",
+ "2015-04-21T06:00:10Z",
+ "2015-04-21T06:00:15Z",
+ "2015-04-21T06:00:20Z"]},
+ "geometry": {
+ "type": "LineString",
+ "coordinates": [
+ [13.418946862220764, 52.50055852688439],
+ [13.419011235237122, 52.50113000479732],
+ [13.419756889343262, 52.50171780290061],
+ [13.419885635375975, 52.50237416816131],
+ [13.420631289482117, 52.50294888790448]]}}
+
+
[email protected]
+def test_matching(line_feature):
+
+ body = '{"type":"FeatureCollection","features":[{"type":"Feature","properties":{"confidence":0.8165504318718629,"matchedPoints":[[13.418805122375488,52.5005989074707],[13.419145584106445,52.501094818115234],[13.419618606567383,52.50175094604492],[13.420042037963867,52.50233459472656],[13.420494079589844,52.50298309326172]],"indices":[0,1,2,3,4]},"geometry":{"type":"LineString","coordinates":[[13.418805,52.500599],[13.418851,52.500659],[13.419121,52.501057],[13.419146,52.501095],[13.419276,52.501286],[13.419446,52.501518],[13.419619,52.501753],[13.419981,52.502249],[13.420042,52.502335],[13.420494,52.502984]]}}]}'
+
+ responses.add(
+ responses.POST,
+ 'https://api.mapbox.com/matching/v4/mapbox.driving.json?access_token=pk.test',
+ match_querystring=True,
+ body=body, status=200,
+ content_type='application/json')
+
+ service = mapbox.MapMatcher(access_token='pk.test')
+ res = service.match(line_feature)
+ assert res.status_code == 200
+
+
+def test_invalid_profile(line_feature):
+ service = mapbox.MapMatcher(access_token='pk.test')
+ with pytest.raises(ValueError):
+ service.match(line_feature, profile="covered_wagon")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
boto3==1.23.10
botocore==1.26.10
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
click-plugins==1.1.1
cligj==0.7.2
coverage==6.2
coveralls==3.3.1
distlib==0.3.9
docopt==0.6.2
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jmespath==0.10.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@c45e856d0640c6d9b1750446fed58cb882912edb#egg=mapbox
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
requests==2.27.1
responses==0.17.0
s3transfer==0.5.2
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==1.26.20
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.23.10
- botocore==1.26.10
- charset-normalizer==2.0.12
- click==8.0.4
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==6.2
- coveralls==3.3.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- jmespath==0.10.0
- platformdirs==2.4.0
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- requests==2.27.1
- responses==0.17.0
- s3transfer==0.5.2
- six==1.17.0
- tomli==1.2.3
- tox==3.28.0
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==1.26.20
- virtualenv==20.17.1
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_mapmatching.py::test_matching",
"tests/test_mapmatching.py::test_invalid_profile"
]
| []
| []
| []
| MIT License | 327 | [
"docs/mapmatching.md",
"mapbox/__init__.py",
"mapbox/services/mapmatching.py"
]
| [
"docs/mapmatching.md",
"mapbox/__init__.py",
"mapbox/services/mapmatching.py"
]
|
|
joblib__joblib-283 | c60d263fcc71ba9f4532010b732cde42e437039b | 2015-12-10 15:14:21 | 40341615cc2600675ce7457d9128fb030f6f89fa | lesteve: > inspect.getfullargspec is marked as deprecated since 3.5, why not directly use a compatibility function called signature and use the one from python >= 3.4.
Yeah I am aware of that but I wanted to do the smallest change to make joblib.Memory support functions with signature or keyword-only arguments. scikit-learn went the way you mentioned and ended up having to backport OrderedDict in order to support python 2.6.
aabadie: @lesteve, to me your change is an improvement and I'm ok to merge it. But if you have time and think it's worth reusing the scikit-learn strategy, then go ahead ;)
lesteve: > @lesteve, to me your change is an improvement and I'm ok to merge it. But if you have time and think it's worth reusing the scikit-learn strategy, then go ahead ;)
I am enclined to go with this simpler strategy for now. The only thing I want to look at is tackling this comment from above:
> At the moment I am not doing any checks to see whether the keyword-only arguments were indeed passed as keywords. Maybe I should since there is some checks for the number of arguments in this function already.
lesteve: Right, I fixed the keyword-only argument passed as positional parameter in joblib.function_inspect.filter_args. As you would expect, the snippet above now raises
```
ValueError: Keyword-only parameter 'kw1' was passed as positional parameter for func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'):
func_with_kwonly_args(1, 2, 3, kw2=4) was called.
```
Any more comments ?
aabadie: @lesteve, @ogrisel, could this one be merged ?
ogrisel: Besides minor comments, LGTM. I can do the cosmetics changed when merging if you wish. | diff --git a/joblib/func_inspect.py b/joblib/func_inspect.py
index cc5cbf6..3eee40b 100644
--- a/joblib/func_inspect.py
+++ b/joblib/func_inspect.py
@@ -11,11 +11,15 @@ import inspect
import warnings
import re
import os
-
+import sys
from ._compat import _basestring
from .logger import pformat
from ._memory_helpers import open_py_source
+
+PY3 = sys.version_info[0] >= 3
+
+
def get_func_code(func):
""" Attempts to retrieve a reliable function code hash.
@@ -156,6 +160,53 @@ def get_func_name(func, resolv_alias=True, win_characters=True):
return module, name
+def getfullargspec(func):
+ """Compatibility function to provide inspect.getfullargspec in Python 2
+
+ This should be rewritten using a backport of Python 3 signature
+ once we drop support for Python 2.6. We went for a simpler
+ approach at the time of writing because signature uses OrderedDict
+ which is not available in Python 2.6.
+ """
+ try:
+ return inspect.getfullargspec(func)
+ except AttributeError:
+ arg_spec = inspect.getargspec(func)
+ import collections
+ tuple_fields = ('args varargs varkw defaults kwonlyargs '
+ 'kwonlydefaults annotations')
+ tuple_type = collections.namedtuple('FullArgSpec', tuple_fields)
+
+ return tuple_type(args=arg_spec.args,
+ varargs=arg_spec.varargs,
+ varkw=arg_spec.keywords,
+ defaults=arg_spec.defaults,
+ kwonlyargs=[],
+ kwonlydefaults=None,
+ annotations={})
+
+
+def _signature_str(function_name, arg_spec):
+ """Helper function to output a function signature"""
+ # inspect.formatargspec can not deal with the same
+ # number of arguments in python 2 and 3
+ arg_spec_for_format = arg_spec[:7 if PY3 else 4]
+
+ arg_spec_str = inspect.formatargspec(*arg_spec_for_format)
+ return '{0}{1}'.format(function_name, arg_spec_str)
+
+
+def _function_called_str(function_name, args, kwargs):
+ """Helper function to output a function call"""
+ template_str = '{0}({1}, {2})'
+
+ args_str = repr(args)[1:-1]
+ kwargs_str = ', '.join('%s=%s' % (k, v)
+ for k, v in kwargs.items())
+ return template_str.format(function_name, args_str,
+ kwargs_str)
+
+
def filter_args(func, ignore_lst, args=(), kwargs=dict()):
""" Filters the given args and kwargs using a list of arguments to
ignore, and a function specification.
@@ -180,19 +231,22 @@ def filter_args(func, ignore_lst, args=(), kwargs=dict()):
args = list(args)
if isinstance(ignore_lst, _basestring):
# Catch a common mistake
- raise ValueError('ignore_lst must be a list of parameters to ignore '
+ raise ValueError(
+ 'ignore_lst must be a list of parameters to ignore '
'%s (type %s) was given' % (ignore_lst, type(ignore_lst)))
# Special case for functools.partial objects
if (not inspect.ismethod(func) and not inspect.isfunction(func)):
if ignore_lst:
warnings.warn('Cannot inspect object %s, ignore list will '
- 'not work.' % func, stacklevel=2)
+ 'not work.' % func, stacklevel=2)
return {'*': args, '**': kwargs}
- arg_spec = inspect.getargspec(func)
- arg_names = arg_spec.args
- arg_defaults = arg_spec.defaults or {}
- arg_keywords = arg_spec.keywords
+ arg_spec = getfullargspec(func)
+ arg_names = arg_spec.args + arg_spec.kwonlyargs
+ arg_defaults = arg_spec.defaults or ()
+ arg_defaults = arg_defaults + tuple(arg_spec.kwonlydefaults[k]
+ for k in arg_spec.kwonlyargs)
arg_varargs = arg_spec.varargs
+ arg_varkw = arg_spec.varkw
if inspect.ismethod(func):
# First argument is 'self', it has been removed by Python
@@ -207,7 +261,18 @@ def filter_args(func, ignore_lst, args=(), kwargs=dict()):
for arg_position, arg_name in enumerate(arg_names):
if arg_position < len(args):
# Positional argument or keyword argument given as positional
- arg_dict[arg_name] = args[arg_position]
+ if arg_name not in arg_spec.kwonlyargs:
+ arg_dict[arg_name] = args[arg_position]
+ else:
+ raise ValueError(
+ "Keyword-only parameter '%s' was passed as "
+ 'positional parameter for %s:\n'
+ ' %s was called.'
+ % (arg_name,
+ _signature_str(name, arg_spec),
+ _function_called_str(name, args, kwargs))
+ )
+
else:
position = arg_position - len(arg_names)
if arg_name in kwargs:
@@ -217,28 +282,24 @@ def filter_args(func, ignore_lst, args=(), kwargs=dict()):
arg_dict[arg_name] = arg_defaults[position]
except (IndexError, KeyError):
# Missing argument
- raise ValueError('Wrong number of arguments for %s%s:\n'
- ' %s(%s, %s) was called.'
- % (name,
- inspect.formatargspec(*inspect.getargspec(func)),
- name,
- repr(args)[1:-1],
- ', '.join('%s=%s' % (k, v)
- for k, v in kwargs.items())
- )
- )
+ raise ValueError(
+ 'Wrong number of arguments for %s:\n'
+ ' %s was called.'
+ % (_signature_str(name, arg_spec),
+ _function_called_str(name, args, kwargs))
+ )
varkwargs = dict()
for arg_name, arg_value in sorted(kwargs.items()):
if arg_name in arg_dict:
arg_dict[arg_name] = arg_value
- elif arg_keywords is not None:
+ elif arg_varkw is not None:
varkwargs[arg_name] = arg_value
else:
raise TypeError("Ignore list for %s() contains an unexpected "
"keyword argument '%s'" % (name, arg_name))
- if arg_keywords is not None:
+ if arg_varkw is not None:
arg_dict['**'] = varkwargs
if arg_varargs is not None:
varargs = args[arg_position + 1:]
@@ -250,13 +311,10 @@ def filter_args(func, ignore_lst, args=(), kwargs=dict()):
arg_dict.pop(item)
else:
raise ValueError("Ignore list: argument '%s' is not defined for "
- "function %s%s" %
- (item, name,
- inspect.formatargspec(arg_names,
- arg_varargs,
- arg_keywords,
- arg_defaults,
- )))
+ "function %s"
+ % (item,
+ _signature_str(name, arg_spec))
+ )
# XXX: Return a sorted list of pairs?
return arg_dict
| Replace deprecated usage of inspect.getargspec
inspect.getargspec has been deprecated since 3.0 (in favor of inspect.getfullargspec until 3.2 and in favor of inspect.signature since 3.3) and will be removed in 3.6. It also creates visible DeprecationWarning in 3.5.
| joblib/joblib | diff --git a/joblib/test/test_func_inspect.py b/joblib/test/test_func_inspect.py
index 15c6b43..62920e9 100644
--- a/joblib/test/test_func_inspect.py
+++ b/joblib/test/test_func_inspect.py
@@ -11,12 +11,15 @@ import shutil
import nose
import tempfile
import functools
+import sys
from joblib.func_inspect import filter_args, get_func_name, get_func_code
from joblib.func_inspect import _clean_win_chars, format_signature
from joblib.memory import Memory
from joblib.test.common import with_numpy
+from joblib.testing import assert_raises_regex
+PY3 = sys.version_info[0] >= 3
###############################################################################
# Module-level functions, for tests
@@ -165,6 +168,37 @@ def test_func_inspect_errors():
__file__.replace('.pyc', '.py'))
+if PY3:
+ exec("""
+def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'): pass
+
+def func_with_signature(a: int, b: int) -> None: pass
+""")
+
+ def test_filter_args_python_3():
+ nose.tools.assert_equal(
+ filter_args(func_with_kwonly_args,
+ [], (1, 2), {'kw1': 3, 'kw2': 4}),
+ {'a': 1, 'b': 2, 'kw1': 3, 'kw2': 4})
+
+ # filter_args doesn't care about keyword-only arguments so you
+ # can pass 'kw1' into *args without any problem
+ assert_raises_regex(
+ ValueError,
+ "Keyword-only parameter 'kw1' was passed as positional parameter",
+ filter_args,
+ func_with_kwonly_args, [], (1, 2, 3), {'kw2': 2})
+
+ nose.tools.assert_equal(
+ filter_args(func_with_kwonly_args, ['b', 'kw2'], (1, 2),
+ {'kw1': 3, 'kw2': 4}),
+ {'a': 1, 'kw1': 3})
+
+ nose.tools.assert_equal(
+ filter_args(func_with_signature, ['b'], (1, 2)),
+ {'a': 1})
+
+
def test_bound_methods():
""" Make sure that calling the same method on two different instances
of the same class does resolv to different signatures.
diff --git a/joblib/test/test_memory.py b/joblib/test/test_memory.py
index 14e319f..5f0bbfd 100644
--- a/joblib/test/test_memory.py
+++ b/joblib/test/test_memory.py
@@ -21,7 +21,9 @@ import nose
from joblib.memory import Memory, MemorizedFunc, NotMemorizedFunc, MemorizedResult
from joblib.memory import NotMemorizedResult, _FUNCTION_HASHES
from joblib.test.common import with_numpy, np
+from joblib.testing import assert_raises_regex
+PY3 = sys.version_info[0] >= 3
###############################################################################
# Module-level variables for the tests
@@ -676,3 +678,48 @@ def test_memory_in_memory_function_code_change():
def test_clear_memory_with_none_cachedir():
mem = Memory(cachedir=None)
mem.clear()
+
+if PY3:
+ exec("""
+def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'):
+ return a, b, kw1, kw2
+
+def func_with_signature(a: int, b: float) -> float:
+ return a + b
+""")
+
+ def test_memory_func_with_kwonly_args():
+ mem = Memory(cachedir=env['dir'], verbose=0)
+ func_cached = mem.cache(func_with_kwonly_args)
+
+ nose.tools.assert_equal(func_cached(1, 2, kw1=3), (1, 2, 3, 'kw2'))
+
+ # Making sure that providing a keyword-only argument by
+ # position raises an exception
+ assert_raises_regex(
+ ValueError,
+ "Keyword-only parameter 'kw1' was passed as positional parameter",
+ func_cached,
+ 1, 2, 3, {'kw2': 4})
+
+ # Keyword-only parameter passed by position with cached call
+ # should still raise ValueError
+ func_cached(1, 2, kw1=3, kw2=4)
+
+ assert_raises_regex(
+ ValueError,
+ "Keyword-only parameter 'kw1' was passed as positional parameter",
+ func_cached,
+ 1, 2, 3, {'kw2': 4})
+
+ # Test 'ignore' parameter
+ func_cached = mem.cache(func_with_kwonly_args, ignore=['kw2'])
+ nose.tools.assert_equal(func_cached(1, 2, kw1=3, kw2=4), (1, 2, 3, 4))
+ nose.tools.assert_equal(func_cached(1, 2, kw1=3, kw2='ignored'), (1, 2, 3, 4))
+
+
+ def test_memory_func_with_signature():
+ mem = Memory(cachedir=env['dir'], verbose=0)
+ func_cached = mem.cache(func_with_signature)
+
+ nose.tools.assert_equal(func_cached(1, 2.), 3.)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"coverage",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
coverage==6.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/joblib/joblib.git@c60d263fcc71ba9f4532010b732cde42e437039b#egg=joblib
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: joblib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==6.2
- nose==1.3.7
prefix: /opt/conda/envs/joblib
| [
"joblib/test/test_func_inspect.py::test_filter_args_python_3",
"joblib/test/test_memory.py::test_memory_func_with_kwonly_args",
"joblib/test/test_memory.py::test_memory_func_with_signature"
]
| []
| [
"joblib/test/test_func_inspect.py::test_filter_args_method",
"joblib/test/test_func_inspect.py::test_filter_kwargs",
"joblib/test/test_func_inspect.py::test_filter_args_2",
"joblib/test/test_func_inspect.py::test_func_inspect_errors",
"joblib/test/test_func_inspect.py::test_bound_methods",
"joblib/test/test_func_inspect.py::test_filter_args_error_msg",
"joblib/test/test_func_inspect.py::test_clean_win_chars",
"joblib/test/test_func_inspect.py::test_format_signature",
"joblib/test/test_func_inspect.py::test_special_source_encoding",
"joblib/test/test_func_inspect.py::test_func_code_consistency",
"joblib/test/test_memory.py::test_memory_warning_lambda_collisions",
"joblib/test/test_memory.py::test_argument_change",
"joblib/test/test_memory.py::test_call_and_shelve",
"joblib/test/test_memory.py::test_memorized_pickling",
"joblib/test/test_memory.py::test_memorized_repr",
"joblib/test/test_memory.py::test_memory_file_modification",
"joblib/test/test_memory.py::test_memory_in_memory_function_code_change",
"joblib/test/test_memory.py::test_clear_memory_with_none_cachedir"
]
| []
| BSD 3-Clause "New" or "Revised" License | 328 | [
"joblib/func_inspect.py"
]
| [
"joblib/func_inspect.py"
]
|
coleifer__peewee-790 | f424cd64352ecc799c3c2d60aafe17dbb30f58ef | 2015-12-10 17:14:23 | ffcdf8786f46bf612c5a3973f29bf87b3b4c74aa | diff --git a/.gitignore b/.gitignore
index 6e0d4e46..b348417c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,3 +9,4 @@ playhouse/_speedups.so
playhouse/tests/peewee_test.db
.idea/
MANIFEST
+peewee_test.db
diff --git a/peewee.py b/peewee.py
index 827d0896..c910682d 100644
--- a/peewee.py
+++ b/peewee.py
@@ -1038,7 +1038,12 @@ class UUIDField(Field):
db_field = 'uuid'
def db_value(self, value):
- return None if value is None else str(value)
+ if isinstance(value, uuid.UUID):
+ return value.hex
+ try:
+ return uuid.UUID(value).hex
+ except:
+ return value
def python_value(self, value):
return None if value is None else uuid.UUID(value)
@@ -3751,6 +3756,7 @@ class MySQLDatabase(Database):
'float': 'FLOAT',
'primary_key': 'INTEGER AUTO_INCREMENT',
'text': 'LONGTEXT',
+ 'uuid': 'LONGTEXT'
}
for_update = True
interpolation = '%s'
| Better support for UUIDField
As an extension from #780, databases which don't support UUID natively may encounter consistency problems.
Postgres will correctly handle multiple input variants [\[1\]][1], such as
`a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11` and `a0eebc999c0b4ef8bb6d6bb9bd380a11`.
However SQLite, which now uses `TEXT` since #780, does not support multiple input types properly, and will end up with different variants being stored in the backend, rather than being normalised. Other databases, such as MySQL, are not supported at all as they don't have the necessary `field_overrides`.
The most viable fix I can see for this would be;
```py
class UUIDField(Field):
...
def db_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
try:
return uuid.UUID(value).hex
except:
return value
```
The above would ensure that the UUID output is always consistent, whilst also allowing non confirming values to be used with `filter()`, e.g. `0f0f0f0f-`
I'd also like to propose that we add a `field_override` for `MySQLDatabase`, much the same as `SqliteDatabase`.
If you're happy with the above solution, I'll throw up a PR, let me know
[1]: http://www.postgresql.org/docs/9.1/static/datatype-uuid.html | coleifer/peewee | diff --git a/playhouse/tests/test_fields.py b/playhouse/tests/test_fields.py
index a9cfbcaf..1af7ee50 100644
--- a/playhouse/tests/test_fields.py
+++ b/playhouse/tests/test_fields.py
@@ -726,7 +726,6 @@ class TestServerDefaults(ModelTestCase):
self.assertEqual(sd2_db.timestamp, datetime.datetime(2015, 1, 2, 3, 4))
-@skip_if(lambda: isinstance(test_db, MySQLDatabase))
class TestUUIDField(ModelTestCase):
requires = [
TestingID,
@@ -745,6 +744,29 @@ class TestUUIDField(ModelTestCase):
t2 = TestingID.get(TestingID.uniq == uuid_obj)
self.assertEqual(t1, t2)
+ def test_uuid_casting(self):
+ uuid_obj = uuid.UUID('a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11')
+ uuid_str = uuid_obj.hex
+ uuid_str_short = uuid_str.replace("-", "")
+
+ t1 = TestingID.create(uniq=uuid_obj)
+ t1_db = TestingID.get(TestingID.uniq == uuid_str)
+ self.assertEqual(t1_db.uniq, uuid_obj)
+ t1_db = TestingID.get(TestingID.uniq == uuid_str_short)
+ self.assertEqual(t1_db.uniq, uuid_obj)
+
+ t1 = TestingID.create(uniq=uuid_str)
+ t1_db = TestingID.get(TestingID.uniq == uuid_str)
+ self.assertEqual(t1_db.uniq, uuid_obj)
+ t1_db = TestingID.get(TestingID.uniq == uuid_str_short)
+ self.assertEqual(t1_db.uniq, uuid_obj)
+
+ t1 = TestingID.create(uniq=uuid_str_short)
+ t1_db = TestingID.get(TestingID.uniq == uuid_str)
+ self.assertEqual(t1_db.uniq, uuid_obj)
+ t1_db = TestingID.get(TestingID.uniq == uuid_str_short)
+ self.assertEqual(t1_db.uniq, uuid_obj)
+
def test_uuid_foreign_keys(self):
data_a = UUIDData.create(id=uuid.uuid4(), data='a')
data_b = UUIDData.create(id=uuid.uuid4(), data='b')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 2.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"Cython"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | Cython==3.0.12
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
-e git+https://github.com/coleifer/peewee.git@f424cd64352ecc799c3c2d60aafe17dbb30f58ef#egg=peewee
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
| name: peewee
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cython==3.0.12
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/peewee
| [
"playhouse/tests/test_fields.py::TestFieldTypes::test_null_query",
"playhouse/tests/test_fields.py::TestFieldTypes::test_regexp",
"playhouse/tests/test_fields.py::TestDateTimeExtract::test_extract_date",
"playhouse/tests/test_fields.py::TestDateTimeExtract::test_extract_date_where",
"playhouse/tests/test_fields.py::TestDateTimeExtract::test_extract_datetime",
"playhouse/tests/test_fields.py::TestDateTimeExtract::test_extract_datetime_where",
"playhouse/tests/test_fields.py::TestDateTimeExtract::test_extract_time",
"playhouse/tests/test_fields.py::TestDateTimeExtract::test_extract_time_where",
"playhouse/tests/test_fields.py::TestUniqueColumnConstraint::test_multi_index",
"playhouse/tests/test_fields.py::TestUniqueColumnConstraint::test_unique",
"playhouse/tests/test_fields.py::TestNonIntegerPrimaryKey::test_non_int_fk",
"playhouse/tests/test_fields.py::TestNonIntegerPrimaryKey::test_non_int_pk",
"playhouse/tests/test_fields.py::TestPrimaryKeyIsForeignKey::test_pk_fk_relations",
"playhouse/tests/test_fields.py::TestPrimaryKeyIsForeignKey::test_primary_foreign_key",
"playhouse/tests/test_fields.py::TestFieldDatabaseColumn::test_db_column",
"playhouse/tests/test_fields.py::TestFieldDatabaseColumn::test_select",
"playhouse/tests/test_fields.py::TestUUIDField::test_uuid_casting"
]
| [
"playhouse/tests/test_fields.py::TestFieldTypes::test_between",
"playhouse/tests/test_fields.py::TestFieldTypes::test_endswith",
"playhouse/tests/test_fields.py::TestFieldTypes::test_in_",
"playhouse/tests/test_fields.py::TestFieldTypes::test_startswith",
"playhouse/tests/test_fields.py::TestFieldTypes::test_blob_field",
"playhouse/tests/test_fields.py::TestFieldTypes::test_contains",
"playhouse/tests/test_fields.py::TestFieldTypes::test_date_and_time_fields",
"playhouse/tests/test_fields.py::TestFieldTypes::test_date_as_string",
"playhouse/tests/test_fields.py::TestFieldTypes::test_field_types",
"playhouse/tests/test_fields.py::TestFieldTypes::test_fixed_charfield",
"playhouse/tests/test_fields.py::TestFieldTypes::test_floatfield",
"playhouse/tests/test_fields.py::TestFieldTypes::test_intfield",
"playhouse/tests/test_fields.py::TestFieldTypes::test_time_field_python_value",
"playhouse/tests/test_fields.py::TestFieldTypes::test_various_formats",
"playhouse/tests/test_fields.py::TestSQLiteDatePart::test_sqlite_date_part"
]
| [
"playhouse/tests/test_fields.py::TestFieldTypes::test_concat",
"playhouse/tests/test_fields.py::TestFieldTypes::test_decimalfield",
"playhouse/tests/test_fields.py::TestSQLiteDateTrunc::test_sqlite_date_trunc",
"playhouse/tests/test_fields.py::TestCheckConstraints::test_check_constraint",
"playhouse/tests/test_fields.py::TestServerDefaults::test_server_default",
"playhouse/tests/test_fields.py::TestUUIDField::test_prefetch_regression",
"playhouse/tests/test_fields.py::TestUUIDField::test_uuid",
"playhouse/tests/test_fields.py::TestUUIDField::test_uuid_foreign_keys"
]
| [
"playhouse/tests/test_fields.py::TestFieldTypes::test_blob_field_mysql",
"playhouse/tests/test_fields.py::TestFieldTypes::test_boolfield",
"playhouse/tests/test_fields.py::TestFieldTypes::test_charfield"
]
| MIT License | 329 | [
".gitignore",
"peewee.py"
]
| [
".gitignore",
"peewee.py"
]
|
|
mozilla__puente-54 | f12f27e2230f071f474b394d1ac908d2ed3476e2 | 2015-12-10 20:46:56 | f78d702e0d1376425d8d613a6573a896fc8d11a1 | diff --git a/puente/commands.py b/puente/commands.py
index c21cdd3..a36d035 100644
--- a/puente/commands.py
+++ b/puente/commands.py
@@ -129,11 +129,10 @@ def extract_command(outputdir, domain_methods, text_domain, keywords,
print('Done')
-def merge_command(create, backup, base_dir, domain_methods, languages):
+def merge_command(create, base_dir, domain_methods, languages):
"""
:arg create: whether or not to create directories if they don't
exist
- :arg backup: whether or not to create backup .po files
:arg base_dir: BASE_DIR setting
:arg domain_methods: DOMAIN_METHODS setting
:arg languages: LANGUAGES setting
@@ -214,7 +213,6 @@ def merge_command(create, backup, base_dir, domain_methods, languages):
'msgmerge',
'--update',
'--width=200',
- '--backup=%s' % ('simple' if backup else 'off'),
domain_po,
'-'
]
diff --git a/puente/management/commands/merge.py b/puente/management/commands/merge.py
index acf9b26..74de7d9 100644
--- a/puente/management/commands/merge.py
+++ b/puente/management/commands/merge.py
@@ -32,17 +32,11 @@ class Command(BaseCommand):
action='store_true', dest='create', default=False,
help='Create locale subdirectories'
),
- make_option(
- '-b', '--backup',
- action='store_true', dest='backup', default=False,
- help='Create backup files of .po files'
- ),
)
def handle(self, *args, **options):
return merge_command(
create=options.get('create'),
- backup=options.get('backup'),
base_dir=get_setting('BASE_DIR'),
domain_methods=get_setting('DOMAIN_METHODS'),
languages=getattr(settings, 'LANGUAGES', [])
diff --git a/puente/utils.py b/puente/utils.py
index e631102..ee1cc28 100644
--- a/puente/utils.py
+++ b/puente/utils.py
@@ -95,8 +95,20 @@ def generate_keywords(additional_keywords=None):
# Shallow copy
keywords = dict(BABEL_KEYWORDS)
- keywords['_lazy'] = None
- # FIXME: Add other keywords from Django here
+ keywords.update({
+ '_lazy': None,
+ 'gettext_lazy': None,
+ 'ugettext_lazy': None,
+ 'gettext_noop': None,
+ 'ugettext_noop': None,
+
+ 'ngettext_lazy': (1, 2),
+ 'ungettext_lazy': (1, 2),
+
+ 'npgettext': ((1, 'c'), 2, 3),
+ 'pgettext_lazy': ((1, 'c'), 2),
+ 'npgettext_lazy': ((1, 'c'), 2, 3),
+ })
# Add specified keywords
if additional_keywords:
| add django gettext keywords
* upgettext
* upgettext_lazy
* pgettext
* pgettext_lazy
* etc
Add all the ones that we're currently missing. | mozilla/puente | diff --git a/tests/test_extract.py b/tests/test_extract.py
index bc91f54..81fb63e 100644
--- a/tests/test_extract.py
+++ b/tests/test_extract.py
@@ -282,3 +282,63 @@ class TestExtractCommand:
""")
)
+
+ def test_django_pgettext_keywords(self, tmpdir):
+ # Test context
+ tmpdir.join('foo.py').write(dedent("""\
+ pgettext("context1", "string1")
+ pgettext_lazy("context2", "string2")
+ npgettext("context3", "string3", "plural3", 5)
+ npgettext_lazy("context4", "string4", "plural4", 5)
+ """))
+
+ # Extract
+ extract_command(
+ outputdir=str(tmpdir),
+ domain_methods={
+ 'django': [
+ ('*.py', 'python'),
+ ]
+ },
+ text_domain=puente_settings.TEXT_DOMAIN,
+ keywords=puente_settings.KEYWORDS,
+ comment_tags=puente_settings.COMMENT_TAGS,
+ base_dir=str(tmpdir),
+ project=puente_settings.PROJECT,
+ version=puente_settings.VERSION,
+ msgid_bugs_address=puente_settings.MSGID_BUGS_ADDRESS,
+ )
+
+ # Verify contents
+ assert os.path.exists(str(tmpdir.join('django.pot')))
+ pot_file = nix_header(tmpdir.join('django.pot').read())
+ assert (
+ pot_file ==
+ dedent("""\
+ #: foo.py:1
+ msgctxt "context1"
+ msgid "string1"
+ msgstr ""
+
+ #: foo.py:2
+ msgctxt "context2"
+ msgid "string2"
+ msgstr ""
+
+ #: foo.py:3
+ msgctxt "context3"
+ msgid "string3"
+ msgid_plural "plural3"
+ msgstr[0] ""
+ msgstr[1] ""
+
+ #: foo.py:4
+ msgctxt "context4"
+ msgid "string4"
+ msgid_plural "plural4"
+ msgstr[0] ""
+ msgstr[1] ""
+
+ """)
+ )
+
diff --git a/tests/test_merge.py b/tests/test_merge.py
index 60b46f1..29e1456 100644
--- a/tests/test_merge.py
+++ b/tests/test_merge.py
@@ -65,7 +65,6 @@ class TestMergecommand:
merge_command(
create=True,
- backup=True,
base_dir=str(tmpdir),
domain_methods={
'django': [
@@ -84,7 +83,6 @@ class TestMergecommand:
with pytest.raises(CommandError):
merge_command(
create=True,
- backup=True,
base_dir=str(tmpdir),
domain_methods={
'django': [
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-pythonpath",
"pytest-django"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt",
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
asgiref==3.8.1
attrs==25.3.0
babel==2.17.0
backports.tarfile==1.2.0
build==1.2.2.post1
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
check-manifest==0.50
colorama==0.4.6
cryptography==44.0.2
distlib==0.3.9
Django==4.2.20
django-jinja==2.11.0
docutils==0.21.2
filelock==3.18.0
id==1.5.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
Jinja2==3.1.6
keyring==25.6.0
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mdurl==0.1.2
more-itertools==10.6.0
nh3==0.2.21
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
-e git+https://github.com/mozilla/puente.git@f12f27e2230f071f474b394d1ac908d2ed3476e2#egg=puente
py==1.11.0
pycparser==2.22
Pygments==2.19.1
pyproject-api==1.9.0
pyproject_hooks==1.2.0
pytest==6.2.5
pytest-django==4.5.2
pytest-pythonpath==0.7.4
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
sqlparse==0.5.3
swebench_matterhorn @ file:///swebench_matterhorn
toml==0.10.2
tomli==2.2.1
tox==4.25.0
twine==6.1.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: puente
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- asgiref==3.8.1
- attrs==25.3.0
- babel==2.17.0
- backports-tarfile==1.2.0
- build==1.2.2.post1
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- check-manifest==0.50
- colorama==0.4.6
- cryptography==44.0.2
- distlib==0.3.9
- django==4.2.20
- django-jinja==2.11.0
- docutils==0.21.2
- filelock==3.18.0
- id==1.5.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==25.6.0
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- mdurl==0.1.2
- more-itertools==10.6.0
- nh3==0.2.21
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- py==1.11.0
- pycparser==2.22
- pygments==2.19.1
- pyproject-api==1.9.0
- pyproject-hooks==1.2.0
- pytest==6.2.5
- pytest-django==4.5.2
- pytest-pythonpath==0.7.4
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sqlparse==0.5.3
- swebench-matterhorn==0.0.0
- toml==0.10.2
- tomli==2.2.1
- tox==4.25.0
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/puente
| [
"tests/test_merge.py::TestMergecommand::test_missing_pot_file"
]
| [
"tests/test_extract.py::TestManageExtract::test_help",
"tests/test_merge.py::TestManageMerge::test_help",
"tests/test_extract.py::TestExtractCommand::test_basic_extraction",
"tests/test_extract.py::TestExtractCommand::test_header",
"tests/test_extract.py::TestExtractCommand::test_whitespace_collapsing",
"tests/test_extract.py::TestExtractCommand::test_context",
"tests/test_extract.py::TestExtractCommand::test_plurals",
"tests/test_extract.py::TestExtractCommand::test_django_pgettext_keywords",
"tests/test_merge.py::TestMergecommand::test_basic"
]
| []
| []
| BSD 3-Clause "New" or "Revised" License | 330 | [
"puente/management/commands/merge.py",
"puente/utils.py",
"puente/commands.py"
]
| [
"puente/management/commands/merge.py",
"puente/utils.py",
"puente/commands.py"
]
|
|
cmc-python__modelmachine-12 | 11fd90f0eb9b713b40be7246b25ca817702af930 | 2015-12-11 13:53:42 | fa9275d64498c7cbe24f02357bbb1bc971670756 | diff --git a/.gitignore b/.gitignore
index a9ce44b..ee5aa21 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,4 @@
-.*.swp # Vim
+*.swp # Vim
# Byte-compiled / optimized / DLL files
__pycache__/
diff --git a/README.md b/README.md
index d01c4fe..da57d66 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
# modelmachine
Model machine emulator
-[](https://travis-ci.org/cmc-python/modelmachine)
+[](https://travis-ci.org/vslutov/modelmachine)
## TODO
diff --git a/modelmachine/__main__.py b/modelmachine/__main__.py
index 23b5e8e..852943e 100644
--- a/modelmachine/__main__.py
+++ b/modelmachine/__main__.py
@@ -3,41 +3,62 @@
"""Modelmachine - model machine emulator."""
from modelmachine.ide import get_program, get_cpu, debug
-import pytest, os, sys
+import pytest, os, sys, argparse
VERSION = "0.0.6" # Don't forget fix in setup.py
-USAGE = '''Usage: modelmachine command [file]
-Available commands:
- test : run internal tests
- run <filename> : execute filename
- debug <filename> : debug filename
- version : print version and exit
- help : print this help and exit'''
+def run_program(args):
+ cpu = get_program(args.filename, args.protect_memory)
+ cpu.run_file(args.filename)
+def run_debug(args):
+ cpu = get_program(args.filename, args.protect_memory)
+ debug(cpu)
-def main(argv, stdin, stdout):
+def run_tests(args):
+ path = os.path.abspath(os.path.dirname(__file__))
+ sys.argv[1] = path
+ pytest.main()
+
+def main(argv, stdout):
"""Execute, when user call modelmachine."""
- stdin = stdin
- if len(argv) == 2 and argv[1] == "test":
- path = os.path.abspath(os.path.dirname(__file__))
- argv[1] = path
- pytest.main()
- elif len(argv) == 3 and argv[1] == "debug":
- filename = argv[2]
- cpu = get_program(filename)
- debug(cpu)
- elif len(argv) == 3 and argv[1] == "run":
- filename = argv[2]
- cpu = get_program(filename)
- cpu.run_file(filename)
- elif len(argv) == 2 and argv[1] == "version":
+ parser = argparse.ArgumentParser(description='Run modelmachine.', add_help=False)
+
+ group = parser.add_mutually_exclusive_group()
+
+ group.add_argument('-h', '--help', action='store_true', default=False,
+ help='show this help message and exit')
+ group.add_argument('-v', '--version', action='store_true', default=False,
+ help='print version and exit')
+
+ parser.add_argument('-m', '--protect_memory', action='store_true', default=False,
+ help='raise an error if try to read dirty memory')
+ subparsers = parser.add_subparsers(title='commands',
+ help='commands for model machine emulator')
+
+ run = subparsers.add_parser('run', help='run program')
+ run.add_argument('filename', help='file with source code')
+ run.set_defaults(func=run_program)
+
+ debug = subparsers.add_parser('debug', help='run program in debug mode')
+ debug.add_argument('filename', help='file with source code')
+ debug.set_defaults(func=run_debug)
+
+ test = subparsers.add_parser('test', help='run internal tests end exit')
+ test.set_defaults(func=run_tests)
+
+ args = parser.parse_args(argv[1:])
+
+ if args.version:
print("ModelMachine", VERSION, file=stdout)
+ elif args.help:
+ parser.print_help(stdout)
else:
- print(USAGE, file=stdout)
- if not (len(argv) == 2 and argv[1] == "help"):
- exit(1)
+ args.func(args)
def exec_main():
"""Hook for testability."""
- main(sys.argv, sys.stdin, sys.stdout)
+ main(sys.argv, sys.stdout)
+
+if __name__ == '__main__':
+ exec_main()
diff --git a/modelmachine/cpu.py b/modelmachine/cpu.py
index bb4faeb..8519e9c 100644
--- a/modelmachine/cpu.py
+++ b/modelmachine/cpu.py
@@ -70,13 +70,15 @@ class AbstractCPU:
self.io_unit.load_source(code)
- input_addresses = [int(x, 0) for x in self.config['input'].split(',')]
- self.io_unit.load_data(input_addresses, data)
+ if 'input' in self.config:
+ input_addresses = [int(x, 0) for x in self.config['input'].split(',')]
+ self.io_unit.load_data(input_addresses, data)
def print_result(self, output=sys.stdout):
"""Print calculation result."""
- for address in (int(x, 0) for x in self.config['output'].split(',')):
- print(self.io_unit.get_int(address), file=output)
+ if 'output' in self.config:
+ for address in (int(x, 0) for x in self.config['output'].split(',')):
+ print(self.io_unit.get_int(address), file=output)
def run_file(self, filename, output=sys.stdout):
"""Run all execution cycle."""
@@ -90,7 +92,7 @@ class BordachenkovaMM3(AbstractCPU):
"""Bordachenkova model machine 3."""
- def __init__(self):
+ def __init__(self, protect_memory):
"""See help(type(x))."""
word_size = 7 * 8
address_size = 2 * 8
@@ -98,7 +100,7 @@ class BordachenkovaMM3(AbstractCPU):
self.ram = RandomAccessMemory(word_size=word_size,
memory_size=memory_size,
endianess='big', # Unused
- is_protected=True)
+ is_protected=protect_memory)
self.registers = RegisterMemory()
self.register_names = BCU3.register_names
self.alu = ArithmeticLogicUnit(registers=self.registers,
@@ -119,7 +121,7 @@ class BordachenkovaMM2(AbstractCPU):
"""Bordachenkova model machine 2."""
- def __init__(self):
+ def __init__(self, protect_memory):
"""See help(type(x))."""
word_size = 5 * 8
address_size = 2 * 8
@@ -127,7 +129,7 @@ class BordachenkovaMM2(AbstractCPU):
self.ram = RandomAccessMemory(word_size=word_size,
memory_size=memory_size,
endianess='big', # Unused
- is_protected=True)
+ is_protected=protect_memory)
self.registers = RegisterMemory()
self.register_names = BCU2.register_names
self.alu = ArithmeticLogicUnit(registers=self.registers,
@@ -148,7 +150,7 @@ class BordachenkovaMMV(AbstractCPU):
"""Bordachenkova variable model machine."""
- def __init__(self):
+ def __init__(self, protect_memory):
"""See help(type(x))."""
byte_size = 8
word_size = 5 * byte_size
@@ -157,7 +159,7 @@ class BordachenkovaMMV(AbstractCPU):
self.ram = RandomAccessMemory(word_size=byte_size,
memory_size=memory_size,
endianess='big',
- is_protected=True)
+ is_protected=protect_memory)
self.registers = RegisterMemory()
self.register_names = BCUV.register_names
self.alu = ArithmeticLogicUnit(registers=self.registers,
@@ -178,7 +180,7 @@ class BordachenkovaMM1(AbstractCPU):
"""Bordachenkova model machine 1."""
- def __init__(self):
+ def __init__(self, protect_memory):
"""See help(type(x))."""
word_size = 3 * 8
address_size = 2 * 8
@@ -186,7 +188,7 @@ class BordachenkovaMM1(AbstractCPU):
self.ram = RandomAccessMemory(word_size=word_size,
memory_size=memory_size,
endianess='big', # Unused
- is_protected=True)
+ is_protected=protect_memory)
self.registers = RegisterMemory()
self.register_names = BCU1.register_names
self.alu = ArithmeticLogicUnit(registers=self.registers,
@@ -208,7 +210,7 @@ class BordachenkovaMMS(AbstractCPU):
"""Bordachenkova stack model machine."""
- def __init__(self):
+ def __init__(self, protect_memory):
"""See help(type(x))."""
byte_size = 8
word_size = 3 * byte_size
@@ -217,7 +219,7 @@ class BordachenkovaMMS(AbstractCPU):
self.ram = RandomAccessMemory(word_size=byte_size,
memory_size=memory_size,
endianess='big', # Unused
- is_protected=True)
+ is_protected=protect_memory)
self.registers = RegisterMemory()
self.register_names = BCUS.register_names
self.alu = ArithmeticLogicUnit(registers=self.registers,
diff --git a/modelmachine/ide.py b/modelmachine/ide.py
index 954cab3..a5d154c 100644
--- a/modelmachine/ide.py
+++ b/modelmachine/ide.py
@@ -147,20 +147,20 @@ def debug(cpu):
else:
need_help = True
-def get_cpu(source):
+def get_cpu(source, protect_memory):
"""Return empty cpu or raise the ValueError."""
arch = source[0].strip()
if arch in CPU_LIST:
- cpu = CPU_LIST[arch]()
+ cpu = CPU_LIST[arch](protect_memory)
return cpu
else:
raise ValueError('Unexpected arch (found in first line): {arch}'
.format(arch=arch))
-def get_program(filename):
+def get_program(filename, protect_memory):
"""Read model machine program."""
with open(filename, 'r') as source_file:
source = source_file.readlines()
- cpu = get_cpu(source)
+ cpu = get_cpu(source, protect_memory)
cpu.load_program(source)
return cpu
diff --git a/samples/minimal.mmach b/samples/minimal.mmach
new file mode 100644
index 0000000..40b875b
--- /dev/null
+++ b/samples/minimal.mmach
@@ -0,0 +1,9 @@
+bordachenkova_mms
+
+[config]
+
+[code]
+99 ; halt
+
+[input]
+
| Поправить RandomAcessMemory.is_protected
- Проверить, что при is_protected=False машина выдает нули и не падает.
- Поправить машину с переменной длиной адреса, устранить лишние считывания.
- Сделать параметр командной строки для управления этим параметром. | cmc-python/modelmachine | diff --git a/modelmachine/tests/test_cpu.py b/modelmachine/tests/test_cpu.py
index 08a5e25..5715ea8 100644
--- a/modelmachine/tests/test_cpu.py
+++ b/modelmachine/tests/test_cpu.py
@@ -109,7 +109,7 @@ class TestBordachenkovaMM3:
def setup(self):
"""Init state."""
- self.cpu = BordachenkovaMM3()
+ self.cpu = BordachenkovaMM3(protect_memory=False)
self.source = ("[config]\ninput=0x101,0x102\noutput=0x103\n" +
"[code]\n01 0101 0102 0103\n80 0000 0000 0003\n" +
"02 0103 0103 0103; never be used\n" +
@@ -137,7 +137,7 @@ class TestBordachenkovaMM2:
def setup(self):
"""Init state."""
- self.cpu = BordachenkovaMM2()
+ self.cpu = BordachenkovaMM2(protect_memory=False)
self.source = ("[config]\n" +
"input=0x101,0x102\n" +
"output=0x103\n" +
@@ -172,7 +172,7 @@ class TestBordachenkovaMMV:
def setup(self):
"""Init state."""
- self.cpu = BordachenkovaMMV()
+ self.cpu = BordachenkovaMMV(protect_memory=False)
self.source = ("[config]\n" +
"input=0x100,0x105\n" +
"output=0x10a\n" +
@@ -208,7 +208,7 @@ class TestBordachenkovaMM1:
def setup(self):
"""Init state."""
- self.cpu = BordachenkovaMM1()
+ self.cpu = BordachenkovaMM1(protect_memory=False)
self.source = ("[config]\n" +
"input=0x101,0x102\n" +
"output=0x103\n" +
@@ -246,7 +246,7 @@ class TestBordachenkovaMMS:
def setup(self):
"""Init state."""
- self.cpu = BordachenkovaMMS()
+ self.cpu = BordachenkovaMMS(protect_memory=False)
self.source = ("[config]\n" +
"input=0x100,0x103\n" +
"output=0x106\n" +
diff --git a/modelmachine/tests/test_cu_bord_variable.py b/modelmachine/tests/test_cu_bord_variable.py
index d7e83da..135823a 100644
--- a/modelmachine/tests/test_cu_bord_variable.py
+++ b/modelmachine/tests/test_cu_bord_variable.py
@@ -26,7 +26,7 @@ class TestBordachenkovaControlUnitV(TBCU2):
def setup(self):
"""Init state."""
super().setup()
- self.ram = RandomAccessMemory(BYTE_SIZE, 256, 'big')
+ self.ram = RandomAccessMemory(BYTE_SIZE, 256, 'big', is_protected=True)
self.control_unit = BordachenkovaControlUnitV(WORD_SIZE,
BYTE_SIZE,
self.registers,
@@ -158,6 +158,23 @@ class TestBordachenkovaControlUnitV(TBCU2):
assert self.registers.fetch("IP", BYTE_SIZE) == 0x15
assert self.control_unit.get_status() == HALTED
+ def test_minimal_run(self):
+ """Minimal program."""
+ self.control_unit.registers = self.registers = RegisterMemory()
+ self.registers.add_register('IR', WORD_SIZE)
+ self.alu = ArithmeticLogicUnit(self.registers,
+ self.control_unit.register_names,
+ WORD_SIZE,
+ BYTE_SIZE)
+ self.control_unit.alu = self.alu
+
+ self.ram.put(0x00, 0x99, BYTE_SIZE)
+ self.registers.put("IP", 0, BYTE_SIZE)
+
+ self.control_unit.run()
+ assert self.registers.fetch("IP", BYTE_SIZE) == 0x01
+ assert self.control_unit.get_status() == HALTED
+
class TestBordachenkovaControlUnitS(TBCU2):
@@ -166,7 +183,7 @@ class TestBordachenkovaControlUnitS(TBCU2):
def setup(self):
"""Init state."""
super().setup()
- self.ram = RandomAccessMemory(BYTE_SIZE, 256, 'big')
+ self.ram = RandomAccessMemory(BYTE_SIZE, 256, 'big', is_protected=True)
self.control_unit = BordachenkovaControlUnitS(WORD_SIZE,
BYTE_SIZE,
self.registers,
@@ -457,3 +474,23 @@ class TestBordachenkovaControlUnitS(TBCU2):
assert self.registers.fetch("IP", BYTE_SIZE) == 0x1a
assert self.registers.fetch("SP", BYTE_SIZE) == 0
assert self.control_unit.get_status() == HALTED
+
+ def test_minimal_run(self):
+ """Very simple program."""
+ self.control_unit.registers = self.registers = RegisterMemory()
+ self.registers.add_register("IR", WORD_SIZE)
+ self.registers.add_register("SP", BYTE_SIZE)
+ self.registers.put("SP", 0, BYTE_SIZE)
+ self.alu = ArithmeticLogicUnit(self.registers,
+ self.control_unit.register_names,
+ WORD_SIZE,
+ BYTE_SIZE)
+ self.control_unit.alu = self.alu
+
+ self.ram.put(0x00, 0x99, BYTE_SIZE)
+ self.registers.put("IP", 0, BYTE_SIZE)
+
+ self.control_unit.run()
+ assert self.registers.fetch("IP", BYTE_SIZE) == 0x01
+ assert self.registers.fetch("SP", BYTE_SIZE) == 0
+ assert self.control_unit.get_status() == HALTED
diff --git a/modelmachine/tests/test_ide.py b/modelmachine/tests/test_ide.py
index b58861e..18c9635 100644
--- a/modelmachine/tests/test_ide.py
+++ b/modelmachine/tests/test_ide.py
@@ -10,15 +10,15 @@ from pytest import raises
def test_get_cpu():
"""Test define cpu method."""
- ide.CPU_LIST = {"mm1": create_autospec(AbstractCPU, True)}
+ ide.CPU_LIST = {"abstract_cpu_test": create_autospec(AbstractCPU, True)}
with raises(ValueError):
- ide.get_cpu(["not_found_cpu", "[config]", "[code]", "00 00", "[input]"])
+ ide.get_cpu(["not_found_cpu", "[config]", "[code]", "00 00", "[input]"], False)
with raises(ValueError):
- ide.get_cpu(["[config]", "[code]", "00 00", "[input]"])
+ ide.get_cpu(["[config]", "[code]", "00 00", "[input]"], False)
- cpu = ide.get_cpu(["mm1", "[config]", "key=value",
- "[code]", "00 00", "99 00", "[input]"])
+ cpu = ide.get_cpu(["abstract_cpu_test", "[config]", "key=value",
+ "[code]", "00 00", "99 00", "[input]"], False)
assert isinstance(cpu, AbstractCPU)
diff --git a/modelmachine/tests/test_main.py b/modelmachine/tests/test_main.py
index 582eebe..0b89a0e 100644
--- a/modelmachine/tests/test_main.py
+++ b/modelmachine/tests/test_main.py
@@ -2,7 +2,7 @@
"""Test case for cli part of modelmachine package."""
-from modelmachine.__main__ import main, VERSION, USAGE
+from modelmachine.__main__ import main, VERSION
from pytest import raises
def test_version(tmpdir):
@@ -10,19 +10,25 @@ def test_version(tmpdir):
output_path = tmpdir.join('output.txt')
with open(str(output_path), 'w') as stdout:
- main(['modelmachine', 'version'], None, stdout)
+ main(['modelmachine', '--version'], stdout)
+
+ assert output_path.read() == 'ModelMachine ' + VERSION + '\n'
+
+ with open(str(output_path), 'w') as stdout:
+ main(['modelmachine', '-v'], stdout)
assert output_path.read() == 'ModelMachine ' + VERSION + '\n'
def test_usage(tmpdir):
- """Test that it's print usage (with exit code 1)."""
+ """Test that it's print usage."""
output_path = tmpdir.join('output.txt')
with open(str(output_path), 'w') as stdout:
- main(['modelmachine', 'help'], None, stdout)
- assert output_path.read() == USAGE + '\n'
-
- with open(str(output_path), 'w') as stdout:
- with raises(SystemExit):
- main(['modelmachine', 'wrong_command'], None, stdout)
- assert output_path.read() == USAGE + '\n'
+ main(['modelmachine', '--help'], stdout)
+ assert output_path.read().startswith('usage:')
+
+ # TODO: Add stderr capture
+ # with open(str(output_path), 'w') as stdout:
+ # with raises(SystemExit):
+ # main(['modelmachine', 'wrong_command'], stdout)
+ # assert output_path.read().startswith('usage:')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 5
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pylint",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==3.3.9
dill==0.3.9
exceptiongroup==1.2.2
iniconfig==2.1.0
isort==6.0.1
mccabe==0.7.0
-e git+https://github.com/cmc-python/modelmachine.git@11fd90f0eb9b713b40be7246b25ca817702af930#egg=modelmachine
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pylint==3.3.6
pytest==8.3.5
tomli==2.2.1
tomlkit==0.13.2
typing_extensions==4.13.0
| name: modelmachine
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==3.3.9
- dill==0.3.9
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- isort==6.0.1
- mccabe==0.7.0
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pylint==3.3.6
- pytest==8.3.5
- tomli==2.2.1
- tomlkit==0.13.2
- typing-extensions==4.13.0
prefix: /opt/conda/envs/modelmachine
| [
"modelmachine/tests/test_ide.py::test_get_cpu",
"modelmachine/tests/test_main.py::test_version",
"modelmachine/tests/test_main.py::test_usage"
]
| [
"modelmachine/tests/test_cpu.py::TestAbstractCPU::test_load_program",
"modelmachine/tests/test_cpu.py::TestAbstractCPU::test_print_result",
"modelmachine/tests/test_cpu.py::TestAbstractCPU::test_run_file",
"modelmachine/tests/test_cpu.py::TestBordachenkovaMM3::test_smoke",
"modelmachine/tests/test_cpu.py::TestBordachenkovaMM2::test_smoke",
"modelmachine/tests/test_cpu.py::TestBordachenkovaMMV::test_smoke",
"modelmachine/tests/test_cpu.py::TestBordachenkovaMM1::test_smoke",
"modelmachine/tests/test_cpu.py::TestBordachenkovaMMS::test_smoke",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_const",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_fetch_instruction",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_basic_execute",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_execute_cond_jumps",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_jump_halt",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_execute_jump_halt",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_execute_comp",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_write_back",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_fetch_and_decode",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_load",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_step",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_run",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_minimal_run",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_const",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_fetch_instruction",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_execute_cond_jumps",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_jump_halt",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_execute_jump_halt",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_execute_comp",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_fetch_and_decode",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_push",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_pop",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_load",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_basic_execute",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_execute_stack",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_write_back",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_step",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_run",
"modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_minimal_run"
]
| []
| []
| Do What The F*ck You Want To Public License | 331 | [
"modelmachine/__main__.py",
".gitignore",
"modelmachine/ide.py",
"README.md",
"samples/minimal.mmach",
"modelmachine/cpu.py"
]
| [
"modelmachine/__main__.py",
".gitignore",
"modelmachine/ide.py",
"README.md",
"samples/minimal.mmach",
"modelmachine/cpu.py"
]
|
|
Shopify__shopify_python_api-129 | 28c00a110c23edc5287d6e8f90f0e36f0eb5d1b3 | 2015-12-11 18:31:45 | c29e0ecbed9de67dd923f980a3ac053922dab75e | diff --git a/shopify/resources/__init__.py b/shopify/resources/__init__.py
index 2de7499..adacc08 100644
--- a/shopify/resources/__init__.py
+++ b/shopify/resources/__init__.py
@@ -46,5 +46,6 @@ from .policy import Policy
from .smart_collection import SmartCollection
from .gift_card import GiftCard
from .discount import Discount
+from .shipping_zone import ShippingZone
from ..base import ShopifyResource
diff --git a/shopify/resources/shipping_zone.py b/shopify/resources/shipping_zone.py
new file mode 100644
index 0000000..49cd647
--- /dev/null
+++ b/shopify/resources/shipping_zone.py
@@ -0,0 +1,5 @@
+from ..base import ShopifyResource
+
+
+class ShippingZone(ShopifyResource):
+ pass
| Add support for the new Shipping Zone resource.
As per https://ecommerce.shopify.com/c/api-announcements/t/shipping-zones-api-and-changes-to-the-countries-api-307687. | Shopify/shopify_python_api | diff --git a/test/fixtures/shipping_zones.json b/test/fixtures/shipping_zones.json
new file mode 100644
index 0000000..f07b8ff
--- /dev/null
+++ b/test/fixtures/shipping_zones.json
@@ -0,0 +1,114 @@
+{
+ "shipping_zones": [
+ {
+ "id": 1,
+ "name": "Some zone",
+ "countries": [
+ {
+ "id": 817138619,
+ "name": "United States",
+ "tax": 0.0,
+ "code": "US",
+ "tax_name": "Federal Tax",
+ "provinces": [
+ {
+ "id": 1013111685,
+ "country_id": 817138619,
+ "name": "New York",
+ "code": "NY",
+ "tax": 0.04,
+ "tax_name": "Tax",
+ "tax_type": null,
+ "shipping_zone_id": 1,
+ "tax_percentage": 4.0
+ },
+ {
+ "id": 1069646654,
+ "country_id": 817138619,
+ "name": "Ohio",
+ "code": "OH",
+ "tax": 0.0,
+ "tax_name": "State Tax",
+ "tax_type": null,
+ "shipping_zone_id": 1,
+ "tax_percentage": 0.0
+ }
+ ]
+ },
+ {
+ "id": 879921427,
+ "name": "Canada",
+ "tax": 0.05,
+ "code": "CA",
+ "tax_name": "GST",
+ "provinces": [
+ {
+ "id": 702530425,
+ "country_id": 879921427,
+ "name": "Ontario",
+ "code": "ON",
+ "tax": 0.08,
+ "tax_name": "Tax",
+ "tax_type": null,
+ "shipping_zone_id": 1,
+ "tax_percentage": 8.0
+ },
+ {
+ "id": 224293623,
+ "country_id": 879921427,
+ "name": "Quebec",
+ "code": "QC",
+ "tax": 0.09,
+ "tax_name": "HST",
+ "tax_type": "compounded",
+ "shipping_zone_id": 1,
+ "tax_percentage": 9.0
+ }
+ ]
+ },
+ {
+ "id": 988409122,
+ "name": "Yemen",
+ "tax": 0.0,
+ "code": "YE",
+ "tax_name": "GST",
+ "provinces": [
+ ]
+ }
+ ],
+ "weight_based_shipping_rates": [
+ {
+ "id": 760465697,
+ "weight_low": 1.2,
+ "weight_high": 10.0,
+ "name": "Austria Express Heavy Shipping",
+ "price": "40.00",
+ "shipping_zone_id": 1
+ }
+ ],
+ "price_based_shipping_rates": [
+ {
+ "id": 583276424,
+ "name": "Standard Shipping",
+ "min_order_subtotal": "0.00",
+ "price": "10.99",
+ "max_order_subtotal": "2000.00",
+ "shipping_zone_id": 1
+ }
+ ],
+ "carrier_shipping_rate_providers": [
+ {
+ "id": 972083812,
+ "country_id": null,
+ "carrier_service_id": 61629186,
+ "flat_modifier": "0.00",
+ "percent_modifier": 0,
+ "service_filter": {
+ "*": "+"
+ },
+ "shipping_zone_id": 1
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/test/shipping_zone_test.py b/test/shipping_zone_test.py
new file mode 100644
index 0000000..e81cfe6
--- /dev/null
+++ b/test/shipping_zone_test.py
@@ -0,0 +1,11 @@
+import shopify
+from test.test_helper import TestCase
+
+class ShippingZoneTest(TestCase):
+ def test_get_shipping_zones(self):
+ self.fake("shipping_zones", method='GET', body=self.load_fixture('shipping_zones'))
+ shipping_zones = shopify.ShippingZone.find()
+ self.assertEqual(1,len(shipping_zones))
+ self.assertEqual(shipping_zones[0].name,"Some zone")
+ self.assertEqual(3,len(shipping_zones[0].countries))
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 2.1 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pyactiveresource==2.2.2
pytest==8.3.5
PyYAML==6.0.2
-e git+https://github.com/Shopify/shopify_python_api.git@28c00a110c23edc5287d6e8f90f0e36f0eb5d1b3#egg=ShopifyAPI
six==1.17.0
tomli==2.2.1
| name: shopify_python_api
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pyactiveresource==2.2.2
- pytest==8.3.5
- pyyaml==6.0.2
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/shopify_python_api
| [
"test/shipping_zone_test.py::ShippingZoneTest::test_get_shipping_zones"
]
| []
| []
| []
| MIT License | 332 | [
"shopify/resources/__init__.py",
"shopify/resources/shipping_zone.py"
]
| [
"shopify/resources/__init__.py",
"shopify/resources/shipping_zone.py"
]
|
|
ifosch__accloudtant-26 | 66343620af6679769c3f5a7c727ae5d3492dc5ac | 2015-12-11 18:48:30 | 66343620af6679769c3f5a7c727ae5d3492dc5ac | diff --git a/accloudtant/aws/prices/__init__.py b/accloudtant/aws/prices/__init__.py
index c038349..3b0dec6 100644
--- a/accloudtant/aws/prices/__init__.py
+++ b/accloudtant/aws/prices/__init__.py
@@ -12,8 +12,11 @@ from accloudtant.utils import fix_lazy_json
class Prices(object):
def __init__(self):
- self.prices = process_ec2()
- self.output = print_prices(self.prices)
+ with warnings.catch_warnings(record=True) as w:
+ self.prices = process_ec2()
+ self.output = print_prices(self.prices)
+ for warning in w:
+ self.output += "\n{}".format(warning.message)
def __repr__(self):
return self.output
diff --git a/bin/accloudtant b/bin/accloudtant
index 71ef806..ab13966 100755
--- a/bin/accloudtant
+++ b/bin/accloudtant
@@ -19,7 +19,4 @@ def report():
raise NotImplementedError
if __name__ == '__main__':
- with warnings.catch_warnings(record=True) as w:
- cli()
- for warning in w:
- print(warning.message)
+ cli()
| Warnings on non supported pricelists vanished
When printing prices, there were a nice list of warnings about problems getting the data.
Since the addition of the two commands, this list vanished. | ifosch/accloudtant | diff --git a/tests/aws/print_expected_with_warnings.txt b/tests/aws/print_expected_with_warnings.txt
new file mode 100644
index 0000000..4af3e90
--- /dev/null
+++ b/tests/aws/print_expected_with_warnings.txt
@@ -0,0 +1,6 @@
+EC2 (Hourly prices, no upfronts, no instance type features):
+Type On Demand 1y No Upfront 1y Partial Upfront 1y All Upfront 3y Partial Upfront 3y All Upfront
+---------- ----------- --------------- -------------------- ---------------- -------------------- ----------------
+c3.8xlarge 0.768 0.611 0.5121 0.5225 0.4143 0.3894
+g2.2xlarge 0.767 0.611 0.5121 0.5225 0.4143 0.3894
+WARN: Parser not implemented for Unknown
diff --git a/tests/aws/test_prices.py b/tests/aws/test_prices.py
index 1edd66d..f7f8b0f 100644
--- a/tests/aws/test_prices.py
+++ b/tests/aws/test_prices.py
@@ -1,3 +1,4 @@
+import warnings
import pytest
import accloudtant.aws.prices
@@ -29,12 +30,18 @@ def mock_requests_get():
@pytest.fixture
def mock_process_ec2():
class MockProcessEC2(object):
- def set_responses(self, responses=None):
+ def set_responses(self, responses=None, unknown=None):
if responses is None:
responses = {}
+ if unknown is None:
+ unknown = []
self.responses = responses
+ self.unknown = unknown
def __call__(self):
+ for name in self.unknown:
+ warnings.warn("WARN: Parser not implemented for {}"
+ .format(name))
return self.responses
def __init__(self, responses=None):
@@ -1383,3 +1390,296 @@ def test_prices(capsys, monkeypatch, mock_process_ec2):
assert("{}\n".format(prices.output) == expected)
assert(out == expected)
assert(out2 == expected)
+
+
+def test_prices_with_warning(capsys, monkeypatch, mock_process_ec2):
+ result = {
+ 'eip': {
+ 'eu-ireland': {
+ 'perRemapOver100': '0.10',
+ 'perRemapFirst100': '0.00',
+ 'oneEIP': '0.00',
+ 'perNonAttachedPerHour': '0.005',
+ 'perAdditionalEIPPerHour': '0.005',
+ },
+ 'us-east': {
+ 'perRemapOver100': '0.10',
+ 'perRemapFirst100': '0.00',
+ 'oneEIP': '0.00',
+ 'perNonAttachedPerHour': '0.005',
+ 'perAdditionalEIPPerHour': '0.005',
+ },
+ },
+ 'cw': {
+ 'us-east-1': {
+ 'ec2Monitoring': '3.50',
+ 'cwRequests': '0.01',
+ 'cloudWatchLogs': '0.67',
+ 'cwMetrics': '0.50',
+ 'cwAlarms': '0.10',
+ },
+ 'eu-west-1': {
+ 'ec2Monitoring': '4.55',
+ 'cwRequests': '0.013',
+ 'cwMetrics': '0.65',
+ 'cwAlarms': '0.0515',
+ },
+ },
+ 'ebs': {
+ 'eu-ireland': {
+ 'ebsSnapsToS3': '0.138',
+ 'Amazon EBS General Purpose (SSD) volumes': '0.095',
+ 'Amazon EBS Provisioned IOPS (SSD) volumes': '0.055',
+ 'Amazon EBS Magnetic volumes': '0.11',
+ },
+ 'us-east': {
+ 'ebsSnapsToS3': '0.125',
+ 'Amazon EBS General Purpose (SSD) volumes': '0.095',
+ 'Amazon EBS Provisioned IOPS (SSD) volumes': '0.05',
+ 'Amazon EBS Magnetic volumes': '0.10',
+ },
+ },
+ 'data_transfer': {
+ 'eu-west-1': {
+ 'regional': {'prices': {'USD': '0.01', }, },
+ 'ELB': {'prices': {'USD': '0.01', }, },
+ 'AZ': {'prices': {'USD': '0.00', }, },
+ 'dataXferInEC2': {
+ 'anotherRegion': '0.00',
+ 'sameAZprivateIP': '0.00',
+ 'anotherService': '0.00',
+ 'Internet': '0.00',
+ 'crossAZ': '0.01',
+ 'sameAZpublicIP': '0.01',
+ },
+ 'dataXferOutEC2': {
+ 'Amazon CloudFront': '0.00',
+ 'crossRegion': '0.02',
+ 'crossAZOut': '0.01',
+ 'anotherServiceOut': '0.00',
+ 'sameAZpublicIPOut': '0.01',
+ 'sameAZprivateIPOut': '0.00',
+ },
+ 'dataXferOutInternet': {
+ 'next4PBout': 'contactus',
+ 'next40TBout': '0.085',
+ 'next100TBout': '0.070',
+ 'next350TBout': '0.050',
+ 'next05PBout': 'contactus',
+ 'greater5PBout': 'contactus',
+ 'firstGBout': '0.000',
+ 'upTo10TBout': '0.090',
+ },
+ },
+ 'us-east-1': {
+ 'regional': {'prices': {'USD': '0.01', }, },
+ 'ELB': {'prices': {'USD': '0.01', }, },
+ 'AZ': {'prices': {'USD': '0.00', }, },
+ 'dataXferInEC2': {
+ 'anotherRegion': '0.00',
+ 'sameAZprivateIP': '0.00',
+ 'anotherService': '0.00',
+ 'Internet': '0.00',
+ 'crossAZ': '0.01',
+ 'sameAZpublicIP': '0.01',
+ },
+ 'dataXferOutEC2': {
+ 'Amazon CloudFront': '0.00',
+ 'crossRegion': '0.02',
+ 'crossAZOut': '0.01',
+ 'anotherServiceOut': '0.00',
+ 'sameAZpublicIPOut': '0.01',
+ 'sameAZprivateIPOut': '0.00',
+ },
+ 'dataXferOutInternet': {
+ 'next4PBout': 'contactus',
+ 'next40TBout': '0.085',
+ 'next100TBout': '0.070',
+ 'next350TBout': '0.050',
+ 'next05PBout': 'contactus',
+ 'greater5PBout': 'contactus',
+ 'firstGBout': '0.000',
+ 'upTo10TBout': '0.090',
+ },
+ },
+ },
+ 'elb': {
+ 'eu-ireland': {
+ 'perELBHour': '0.0008',
+ 'perGBProcessed': '0.028',
+ },
+ 'us-east': {
+ 'perELBHour': '0.0008',
+ 'perGBProcessed': '0.025',
+ },
+ },
+ 'linux': {
+ 'us-east-1': {
+ 'g2.2xlarge': {
+ 'storageGB': '60 SSD',
+ 'ri': {
+ 'yrTerm1': {
+ 'noUpfront': {
+ 'upfront': '0',
+ 'monthlyStar': '446.03',
+ 'effectiveHourly': '0.611',
+ },
+ 'allUpfront': {
+ 'upfront': '2974',
+ 'monthlyStar': '133.59',
+ 'effectiveHourly': '0.5225',
+ },
+ 'partialUpfront': {
+ 'upfront': '4486',
+ 'monthlyStar': '0',
+ 'effectiveHourly': '0.5121',
+ },
+ },
+ 'yrTerm3': {
+ 'allUpfront': {
+ 'upfront': '10234',
+ 'monthlyStar': '0',
+ 'effectiveHourly': '0.3894',
+ },
+ 'partialUpfront': {
+ 'upfront': '7077',
+ 'monthlyStar': '105.85',
+ 'effectiveHourly': '0.4143',
+ },
+ },
+ },
+ 'od': '0.767',
+ 'memoryGiB': '15',
+ 'vCPU': '8',
+ },
+ 'c3.8xlarge': {
+ 'storageGB': '60 SSD',
+ 'ri': {
+ 'yrTerm1': {
+ 'noUpfront': {
+ 'upfront': '0',
+ 'monthlyStar': '446.03',
+ 'effectiveHourly': '0.611',
+ },
+ 'allUpfront': {
+ 'upfront': '2974',
+ 'monthlyStar': '133.59',
+ 'effectiveHourly': '0.5225',
+ },
+ 'partialUpfront': {
+ 'upfront': '4486',
+ 'monthlyStar': '0',
+ 'effectiveHourly': '0.5121',
+ },
+ },
+ 'yrTerm3': {
+ 'allUpfront': {
+ 'upfront': '10234',
+ 'monthlyStar': '0',
+ 'effectiveHourly': '0.3894',
+ },
+ 'partialUpfront': {
+ 'upfront': '7077',
+ 'monthlyStar': '105.85',
+ 'effectiveHourly': '0.4143',
+ },
+ },
+ },
+ 'od': '0.768',
+ 'memoryGiB': '15',
+ 'vCPU': '8',
+ },
+ },
+ 'eu-west-1': {
+ 'g2.2xlarge': {
+ 'storageGB': '60 SSD',
+ 'ri': {
+ 'yrTerm1': {
+ 'noUpfront': {
+ 'upfront': '0',
+ 'monthlyStar': '446.03',
+ 'effectiveHourly': '0.611',
+ },
+ 'allUpfront': {
+ 'upfront': '2974',
+ 'monthlyStar': '133.59',
+ 'effectiveHourly': '0.5225',
+ },
+ 'partialUpfront': {
+ 'upfront': '4486',
+ 'monthlyStar': '0',
+ 'effectiveHourly': '0.5121',
+ },
+ },
+ 'yrTerm3': {
+ 'allUpfront': {
+ 'upfront': '10234',
+ 'monthlyStar': '0',
+ 'effectiveHourly': '0.3894',
+ },
+ 'partialUpfront': {
+ 'upfront': '7077',
+ 'monthlyStar': '105.85',
+ 'effectiveHourly': '0.4143',
+ },
+ },
+ },
+ 'od': '0.787',
+ 'memoryGiB': '15',
+ 'vCPU': '8',
+ },
+ 'c3.8xlarge': {
+ 'storageGB': '60 SSD',
+ 'ri': {
+ 'yrTerm1': {
+ 'noUpfront': {
+ 'upfront': '0',
+ 'monthlyStar': '446.03',
+ 'effectiveHourly': '0.611',
+ },
+ 'allUpfront': {
+ 'upfront': '2974',
+ 'monthlyStar': '133.59',
+ 'effectiveHourly': '0.5225',
+ },
+ 'partialUpfront': {
+ 'upfront': '4486',
+ 'monthlyStar': '0',
+ 'effectiveHourly': '0.5121',
+ },
+ },
+ 'yrTerm3': {
+ 'allUpfront': {
+ 'upfront': '10234',
+ 'monthlyStar': '0',
+ 'effectiveHourly': '0.3894',
+ },
+ 'partialUpfront': {
+ 'upfront': '7077',
+ 'monthlyStar': '105.85',
+ 'effectiveHourly': '0.4143',
+ },
+ },
+ },
+ 'od': '0.767',
+ 'memoryGiB': '15',
+ 'vCPU': '8',
+ },
+ },
+ },
+ }
+ expected = open('tests/aws/print_expected_with_warnings.txt', 'r').read()
+
+ monkeypatch.setattr(
+ 'accloudtant.aws.prices.process_ec2',
+ mock_process_ec2
+ )
+ mock_process_ec2.set_responses(result, ['Unknown'])
+
+ prices = accloudtant.aws.prices.Prices()
+ print(prices)
+ out, err = capsys.readouterr()
+
+ assert(prices.prices == result)
+ assert("{}\n".format(prices.output) == expected)
+ assert(out == expected)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/ifosch/accloudtant.git@66343620af6679769c3f5a7c727ae5d3492dc5ac#egg=accloudtant
certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
requests==2.32.3
tabulate==0.9.0
tomli==2.2.1
urllib3==2.3.0
| name: accloudtant
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.32.3
- tabulate==0.9.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/accloudtant
| [
"tests/aws/test_prices.py::test_prices_with_warning"
]
| []
| [
"tests/aws/test_prices.py::test_model_ec2",
"tests/aws/test_prices.py::test_process_model",
"tests/aws/test_prices.py::test_process_generic",
"tests/aws/test_prices.py::test_process_on_demand",
"tests/aws/test_prices.py::test_process_reserved",
"tests/aws/test_prices.py::test_process_data_transfer",
"tests/aws/test_prices.py::test_process_ebs",
"tests/aws/test_prices.py::test_process_eip",
"tests/aws/test_prices.py::test_process_cw",
"tests/aws/test_prices.py::test_process_elb",
"tests/aws/test_prices.py::test_print_prices",
"tests/aws/test_prices.py::test_prices"
]
| []
| null | 333 | [
"accloudtant/aws/prices/__init__.py",
"bin/accloudtant"
]
| [
"accloudtant/aws/prices/__init__.py",
"bin/accloudtant"
]
|
|
garnaat__placebo-21 | 81cb9c0e48f7e6e58e2cdd6414b777b39c846c4b | 2015-12-14 17:29:40 | 81cb9c0e48f7e6e58e2cdd6414b777b39c846c4b | diff --git a/placebo/__init__.py b/placebo/__init__.py
index 43e34d6..8dc9470 100644
--- a/placebo/__init__.py
+++ b/placebo/__init__.py
@@ -15,7 +15,7 @@
from placebo.pill import Pill
-def attach(session, data_path, debug=False):
- pill = Pill(debug=debug)
+def attach(session, data_path, prefix=None, debug=False):
+ pill = Pill(prefix=prefix, debug=debug)
pill.attach(session, data_path)
return pill
diff --git a/placebo/pill.py b/placebo/pill.py
index 472402e..2c0cdd2 100644
--- a/placebo/pill.py
+++ b/placebo/pill.py
@@ -35,10 +35,11 @@ class Pill(object):
clients = []
- def __init__(self, debug=False):
+ def __init__(self, prefix=None, debug=False):
if debug:
self._set_logger(__name__, logging.DEBUG)
self.filename_re = re.compile(r'.*\..*_(?P<index>\d+).json')
+ self.prefix = prefix
self._uuid = str(uuid.uuid4())
self._data_path = None
self._mode = None
@@ -52,6 +53,10 @@ class Pill(object):
def mode(self):
return self._mode
+ @property
+ def data_path(self):
+ return self._data_path
+
def _set_logger(self, logger_name, level=logging.INFO):
"""
Convenience function to quickly configure full debug output
@@ -178,6 +183,8 @@ class Pill(object):
def get_new_file_path(self, service, operation):
base_name = '{}.{}'.format(service, operation)
+ if self.prefix:
+ base_name = '{}.{}'.format(self.prefix, base_name)
LOG.debug('get_new_file_path: %s', base_name)
index = 0
glob_pattern = os.path.join(self._data_path, base_name + '*')
@@ -194,6 +201,8 @@ class Pill(object):
def get_next_file_path(self, service, operation):
base_name = '{}.{}'.format(service, operation)
+ if self.prefix:
+ base_name = '{}.{}'.format(self.prefix, base_name)
LOG.debug('get_next_file_path: %s', base_name)
next_file = None
while next_file is None:
| Optional prefix for saved response filenames
I noticed that each time Placebo writes a new response JSON file, it adds a `_<number>` suffix before the `.json`, and simply increments the suffix until it finds one that doesn't exist yet. This is better than overwriting previous runs, but doesn't give any way to distinguish multiple runs.
What would be really nice, is if you could optionally set a `prefix` argument on a `Pill` object. This prefix would be applied to all filenames written by the pill, like so: `prefix.iam.ListUserPolicies_1.json`.
I can think of two good use cases for this:
1. Define the prefix uniquely on each execution, using a command line argument, timestamp, or some other generated value. Now individual executions can be reloaded and replayed, or even diffed for comparison.
2. For an application that uses multiple Boto sessions, define a different prefix for each session. For example, I can use `base` for my initial credentials session, `mfa` for my new session after MFA authentication, and `<rolename>` for a session created using an IAM role. I can even combine that with a timestamp (as in case 1) to separate multiple runs. | garnaat/placebo | diff --git a/tests/unit/responses/saved/foo.ec2.DescribeAddresses_1.json b/tests/unit/responses/saved/foo.ec2.DescribeAddresses_1.json
new file mode 100644
index 0000000..a907bf1
--- /dev/null
+++ b/tests/unit/responses/saved/foo.ec2.DescribeAddresses_1.json
@@ -0,0 +1,16 @@
+{
+ "status_code": 200,
+ "data": {
+ "Addresses": [
+ {
+ "PublicIp": "42.43.44.45",
+ "Domain": "vpc",
+ "AllocationId": "eipalloc-87654321"
+ }
+ ],
+ "ResponseMetadata": {
+ "HTTPStatusCode": 200,
+ "RequestId": "b0fe7bb2-6552-4ea8-8a36-9685044048ab"
+ }
+ }
+}
diff --git a/tests/unit/test_canned.py b/tests/unit/test_canned.py
index 913459d..c25905a 100644
--- a/tests/unit/test_canned.py
+++ b/tests/unit/test_canned.py
@@ -58,3 +58,23 @@ class TestPlacebo(unittest.TestCase):
self.assertEqual(len(result['KeyPairs']), 2)
self.assertEqual(result['KeyPairs'][0]['KeyName'], 'FooBar')
self.assertEqual(result['KeyPairs'][1]['KeyName'], 'FieBaz')
+
+ def test_prefix_new_file_path(self):
+ self.pill.prefix = 'foo'
+ service = 'ec2'
+ operation = 'DescribeAddresses'
+ filename = '{}.{}.{}_2.json'.format(self.pill.prefix, service,
+ operation)
+ target = os.path.join(self.data_path, filename)
+ self.assertEqual(self.pill.get_new_file_path(service, operation),
+ target)
+
+ def test_prefix_next_file_path(self):
+ self.pill.prefix = 'foo'
+ service = 'ec2'
+ operation = 'DescribeAddresses'
+ filename = '{}.{}.{}_1.json'.format(self.pill.prefix, service,
+ operation)
+ target = os.path.join(self.data_path, filename)
+ self.assertEqual(self.pill.get_next_file_path(service, operation),
+ target)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"coverage",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
boto3==1.2.2
botocore==1.3.30
certifi==2021.5.30
coverage==6.2
docutils==0.18.1
importlib-metadata==4.8.3
iniconfig==1.1.1
jmespath==0.10.0
mock==1.3.0
nose==1.3.7
packaging==21.3
pbr==6.1.1
-e git+https://github.com/garnaat/placebo.git@81cb9c0e48f7e6e58e2cdd6414b777b39c846c4b#egg=placebo
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: placebo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- boto3==1.2.2
- botocore==1.3.30
- coverage==6.2
- docutils==0.18.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jmespath==0.10.0
- mock==1.3.0
- nose==1.3.7
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/placebo
| [
"tests/unit/test_canned.py::TestPlacebo::test_prefix_new_file_path",
"tests/unit/test_canned.py::TestPlacebo::test_prefix_next_file_path"
]
| []
| [
"tests/unit/test_canned.py::TestPlacebo::test_describe_addresses",
"tests/unit/test_canned.py::TestPlacebo::test_describe_key_pairs"
]
| []
| Apache License 2.0 | 335 | [
"placebo/__init__.py",
"placebo/pill.py"
]
| [
"placebo/__init__.py",
"placebo/pill.py"
]
|
|
pypa__twine-156 | 5db1018ada0ba4d98201c299c84858e98adb87e3 | 2015-12-16 23:36:55 | f487b7da9c42e4932bc33bf10d70cdc59fd16fd5 | diff --git a/README.rst b/README.rst
index ab4baa4..9b8f0bb 100644
--- a/README.rst
+++ b/README.rst
@@ -54,15 +54,15 @@ Usage
1. Create some distributions in the normal way:
- .. code-block:: bash
+.. code-block:: bash
- $ python setup.py sdist bdist_wheel
+ $ python setup.py sdist bdist_wheel
2. Upload with twine:
- .. code-block:: bash
+.. code-block:: bash
- $ twine upload dist/*
+ $ twine upload dist/*
3. Done!
@@ -73,10 +73,8 @@ Options
.. code-block:: bash
$ twine upload -h
-
- usage: twine upload [-h] [-r REPOSITORY] [-s] [--sign-with SIGN_WITH]
- [-i IDENTITY] [-u USERNAME] [-p PASSWORD] [-c COMMENT]
- [--config-file CONFIG_FILE] [--skip-existing]
+ usage: twine upload [-h] [-r REPOSITORY] [-s] [-i IDENTITY] [-u USERNAME]
+ [-p PASSWORD] [-c COMMENT]
dist [dist ...]
positional arguments:
@@ -87,10 +85,8 @@ Options
optional arguments:
-h, --help show this help message and exit
-r REPOSITORY, --repository REPOSITORY
- The repository to upload the files to (default: pypi)
+ The repository to upload the files to
-s, --sign Sign files to upload using gpg
- --sign-with SIGN_WITH
- GPG program used to sign uploads (default: gpg)
-i IDENTITY, --identity IDENTITY
GPG identity used to sign files
-u USERNAME, --username USERNAME
@@ -99,9 +95,8 @@ Options
The password to authenticate to the repository with
-c COMMENT, --comment COMMENT
The comment to include with the distribution file
- --config-file CONFIG_FILE
+ --config-file FILE
The .pypirc config file to use
- --skip-existing Continue uploading files if one already exists
Resources
diff --git a/docs/changelog.rst b/docs/changelog.rst
index 9ad96d0..d16cada 100644
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -4,12 +4,10 @@
Changelog
=========
-* :release:`1.7.0 <TBD>`
+* :release:`1.6.5 <2015-12-16>`
- * :feature:`142` Support ``--cert`` and ``--client-cert`` command-line flags
- and config file options for feature parity with pip. This allows users to
- verify connections to servers other than PyPI (e.g., local package
- repositories) with different certificates.
+ * :bug:`155` Bump requests-toolbelt version to ensure we avoid
+ ConnectionErrors
* :release:`1.6.4 <2015-10-27>`
diff --git a/setup.py b/setup.py
index 6053a66..2af1dcc 100644
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@ import twine
install_requires = [
"pkginfo >= 1.0",
"requests >= 2.3.0",
- "requests-toolbelt >= 0.4.0",
+ "requests-toolbelt >= 0.5.1",
"setuptools >= 0.7.0",
]
diff --git a/twine/__init__.py b/twine/__init__.py
index e8f83e4..2549002 100644
--- a/twine/__init__.py
+++ b/twine/__init__.py
@@ -23,7 +23,7 @@ __title__ = "twine"
__summary__ = "Collection of utilities for interacting with PyPI"
__uri__ = "https://github.com/pypa/twine"
-__version__ = "1.6.4"
+__version__ = "1.6.5"
__author__ = "Donald Stufft and individual contributors"
__email__ = "[email protected]"
diff --git a/twine/commands/register.py b/twine/commands/register.py
index 16eece0..6be8cc8 100644
--- a/twine/commands/register.py
+++ b/twine/commands/register.py
@@ -23,8 +23,7 @@ from twine.repository import Repository
from twine import utils
-def register(package, repository, username, password, comment, config_file,
- cert, client_cert):
+def register(package, repository, username, password, comment, config_file):
config = utils.get_repository_from_config(config_file, repository)
config["repository"] = utils.normalize_repository_url(
config["repository"]
@@ -34,12 +33,8 @@ def register(package, repository, username, password, comment, config_file,
username = utils.get_username(username, config)
password = utils.get_password(password, config)
- ca_cert = utils.get_cacert(cert, config)
- client_cert = utils.get_clientcert(client_cert, config)
repository = Repository(config["repository"], username, password)
- repository.set_certificate_authority(ca_cert)
- repository.set_client_certificate(client_cert)
if not os.path.exists(package):
raise exc.PackageNotFound(
@@ -83,17 +78,6 @@ def main(args):
default="~/.pypirc",
help="The .pypirc config file to use",
)
- parser.add_argument(
- "--cert",
- metavar="path",
- help="Path to alternate CA bundle",
- )
- parser.add_argument(
- "--client-cert",
- metavar="path",
- help="Path to SSL client certificate, a single file containing the "
- "private key and the certificate in PEM forma",
- )
parser.add_argument(
"package",
metavar="package",
diff --git a/twine/commands/upload.py b/twine/commands/upload.py
index f194d8a..2bd4a52 100644
--- a/twine/commands/upload.py
+++ b/twine/commands/upload.py
@@ -62,7 +62,7 @@ def skip_upload(response, skip_existing, package):
def upload(dists, repository, sign, identity, username, password, comment,
- sign_with, config_file, skip_existing, cert, client_cert):
+ sign_with, config_file, skip_existing):
# Check that a nonsensical option wasn't given
if not sign and identity:
raise ValueError("sign must be given along with identity")
@@ -85,12 +85,8 @@ def upload(dists, repository, sign, identity, username, password, comment,
username = utils.get_username(username, config)
password = utils.get_password(password, config)
- ca_cert = utils.get_cacert(cert, config)
- client_cert = utils.get_clientcert(client_cert, config)
repository = Repository(config["repository"], username, password)
- repository.set_certificate_authority(ca_cert)
- repository.set_client_certificate(client_cert)
for filename in uploads:
package = PackageFile.from_filename(filename, comment)
@@ -171,17 +167,6 @@ def main(args):
action="store_true",
help="Continue uploading files if one already exists",
)
- parser.add_argument(
- "--cert",
- metavar="path",
- help="Path to alternate CA bundle",
- )
- parser.add_argument(
- "--client-cert",
- metavar="path",
- help="Path to SSL client certificate, a single file containing the "
- "private key and the certificate in PEM forma",
- )
parser.add_argument(
"dists",
nargs="+",
diff --git a/twine/repository.py b/twine/repository.py
index ac441f5..ae57821 100644
--- a/twine/repository.py
+++ b/twine/repository.py
@@ -41,14 +41,6 @@ class Repository(object):
data_to_send.append((key, item))
return data_to_send
- def set_certificate_authority(self, cacert):
- if cacert:
- self.session.verify = cacert
-
- def set_client_certificate(self, clientcert):
- if clientcert:
- self.session.cert = clientcert
-
def register(self, package):
data = package.metadata_dictionary()
data.update({
diff --git a/twine/utils.py b/twine/utils.py
index d8771ce..db49d14 100644
--- a/twine/utils.py
+++ b/twine/utils.py
@@ -116,15 +116,14 @@ def normalize_repository_url(url):
return urlunparse(parsed)
-def get_userpass_value(cli_value, config, key, prompt_strategy=None):
+def get_userpass_value(cli_value, config, key, prompt_strategy):
"""Gets the username / password from config.
Uses the following rules:
1. If it is specified on the cli (`cli_value`), use that.
2. If `config[key]` is specified, use that.
- 3. If `prompt_strategy`, prompt using `prompt_strategy`.
- 4. Otherwise return None
+ 3. Otherwise prompt using `prompt_strategy`.
:param cli_value: The value supplied from the command line or `None`.
:type cli_value: unicode or `None`
@@ -141,10 +140,8 @@ def get_userpass_value(cli_value, config, key, prompt_strategy=None):
return cli_value
elif config.get(key):
return config[key]
- elif prompt_strategy:
- return prompt_strategy()
else:
- return None
+ return prompt_strategy()
def password_prompt(prompt_text): # Always expects unicode for our own sanity
@@ -164,11 +161,3 @@ get_password = functools.partial(
key='password',
prompt_strategy=password_prompt('Enter your password: '),
)
-get_cacert = functools.partial(
- get_userpass_value,
- key='ca_cert',
-)
-get_clientcert = functools.partial(
- get_userpass_value,
- key='client_cert',
-)
| Requests 2.9.0 and requests-toolbelt < 0.5.1 break uploading
We need to bump the lower limit on our dependency of requests-toolbelt.
See also: https://github.com/sigmavirus24/requests-toolbelt/issues/117 | pypa/twine | diff --git a/tests/test_repository.py b/tests/test_repository.py
index 684f403..3b8d84b 100644
--- a/tests/test_repository.py
+++ b/tests/test_repository.py
@@ -47,29 +47,3 @@ def test_iterables_are_flattened():
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('platform', 'UNKNOWN'),
('platform', 'ANOTHERPLATFORM')]
-
-
-def test_set_client_certificate():
- repo = repository.Repository(
- repository_url='https://pypi.python.org/pypi',
- username='username',
- password='password',
- )
-
- assert repo.session.cert is None
-
- repo.set_client_certificate(('/path/to/cert', '/path/to/key'))
- assert repo.session.cert == ('/path/to/cert', '/path/to/key')
-
-
-def test_set_certificate_authority():
- repo = repository.Repository(
- repository_url='https://pypi.python.org/pypi',
- username='username',
- password='password',
- )
-
- assert repo.session.verify is True
-
- repo.set_certificate_authority('/path/to/cert')
- assert repo.session.verify == '/path/to/cert'
diff --git a/tests/test_upload.py b/tests/test_upload.py
index 596de80..7f99510 100644
--- a/tests/test_upload.py
+++ b/tests/test_upload.py
@@ -78,7 +78,6 @@ def test_get_config_old_format(tmpdir):
try:
upload.upload(dists=dists, repository="pypi", sign=None, identity=None,
username=None, password=None, comment=None,
- cert=None, client_cert=None,
sign_with=None, config_file=pypirc, skip_existing=False)
except KeyError as err:
assert err.args[0] == (
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 8
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"coverage",
"pretend",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
packaging @ file:///croot/packaging_1734472117206/work
pkginfo==1.12.1.2
pluggy @ file:///croot/pluggy_1733169602837/work
pretend==1.0.9
pycodestyle==2.13.0
pyflakes==3.3.1
pytest @ file:///croot/pytest_1738938843180/work
requests==2.32.3
requests-toolbelt==1.0.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/pypa/twine.git@5db1018ada0ba4d98201c299c84858e98adb87e3#egg=twine
urllib3==2.3.0
| name: twine
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- flake8==7.2.0
- idna==3.10
- mccabe==0.7.0
- pkginfo==1.12.1.2
- pretend==1.0.9
- pycodestyle==2.13.0
- pyflakes==3.3.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- urllib3==2.3.0
prefix: /opt/conda/envs/twine
| [
"tests/test_upload.py::test_get_config_old_format"
]
| []
| [
"tests/test_repository.py::test_gpg_signature_structure_is_preserved",
"tests/test_repository.py::test_content_structure_is_preserved",
"tests/test_repository.py::test_iterables_are_flattened",
"tests/test_upload.py::test_ensure_wheel_files_uploaded_first",
"tests/test_upload.py::test_ensure_if_no_wheel_files",
"tests/test_upload.py::test_find_dists_expands_globs",
"tests/test_upload.py::test_find_dists_errors_on_invalid_globs",
"tests/test_upload.py::test_find_dists_handles_real_files",
"tests/test_upload.py::test_skip_existing_skips_files_already_on_PyPI",
"tests/test_upload.py::test_skip_upload_respects_skip_existing"
]
| []
| Apache License 2.0 | 338 | [
"README.rst",
"twine/__init__.py",
"twine/commands/upload.py",
"setup.py",
"twine/commands/register.py",
"docs/changelog.rst",
"twine/utils.py",
"twine/repository.py"
]
| [
"README.rst",
"twine/__init__.py",
"twine/commands/upload.py",
"setup.py",
"twine/commands/register.py",
"docs/changelog.rst",
"twine/utils.py",
"twine/repository.py"
]
|
|
Shopify__shopify_python_api-130 | ecd532cc904729fd366f05ae8d7d754df79b55f7 | 2015-12-17 02:25:44 | c29e0ecbed9de67dd923f980a3ac053922dab75e | diff --git a/shopify/resources/__init__.py b/shopify/resources/__init__.py
index adacc08..da17196 100644
--- a/shopify/resources/__init__.py
+++ b/shopify/resources/__init__.py
@@ -47,5 +47,6 @@ from .smart_collection import SmartCollection
from .gift_card import GiftCard
from .discount import Discount
from .shipping_zone import ShippingZone
+from .location import Location
from ..base import ShopifyResource
diff --git a/shopify/resources/location.py b/shopify/resources/location.py
new file mode 100644
index 0000000..671b5b0
--- /dev/null
+++ b/shopify/resources/location.py
@@ -0,0 +1,5 @@
+from ..base import ShopifyResource
+
+
+class Location(ShopifyResource):
+ pass
| How do I retrieve Location information?
I see that dir(shopify) doesn't show me Location as an attribute, but I'm wondering if there's another way to use the API to get it (even though perhaps it's not wrapped as nicely as these other resources):
https://docs.shopify.com/api/location
dir(shopify)
['Address', 'ApplicationCharge', 'Article', 'Asset', 'BillingAddress', 'Blog', 'CarrierService', 'Cart', 'Checkout', 'Collect', 'Comment', 'Country', 'CustomCollection', 'Customer', 'CustomerGroup', 'CustomerSavedSearch', 'Event', 'Fulfillment', 'FulfillmentService', 'GiftCard', 'Image', 'LineItem', 'Metafield', 'NoteAttribute', 'Option', 'Order', 'OrderRisk', 'Page', 'PaymentDetails', 'Policy', 'Product', 'ProductSearchEngine', 'Province', 'Receipt', 'RecurringApplicationCharge', 'Redirect', 'Rule', 'ScriptTag', 'Session', 'ShippingAddress', 'ShippingLine', 'Shop', 'ShopifyResource', 'SmartCollection', 'TaxLine', 'Theme', 'Transaction', 'VERSION', 'ValidationException', 'Variant', 'Webhook', '__builtins__', '__doc__', '__file__', '__name__', '__package__', '__path__', 'address', 'application_charge', 'article', 'asset', 'base', 'billing_address', 'blog', 'carrier_service', 'cart', 'checkout', 'collect', 'comment', 'country', 'custom_collection', 'customer', 'customer_group', 'customer_saved_search', 'event', 'fulfillment', 'fulfillment_service', 'gift_card', 'image', 'line_item', 'metafield', 'mixins', 'note_attribute', 'option', 'order', 'order_risk', 'page', 'payment_details', 'policy', 'product', 'product_search_engine', 'province', 'receipt', 'recurring_application_charge', 'redirect', 'resources', 'rule', 'script_tag', 'session', 'shipping_address', 'shipping_line', 'shop', 'smart_collection', 'tax_line', 'theme', 'transaction', 'variant', 'version', 'webhook', 'yamlobjects']
Can I use this module to get location information?
Thanks! | Shopify/shopify_python_api | diff --git a/test/fixtures/location.json b/test/fixtures/location.json
new file mode 100644
index 0000000..ae07fac
--- /dev/null
+++ b/test/fixtures/location.json
@@ -0,0 +1,19 @@
+{
+ "location": {
+ "id": 487838322,
+ "name": "Fifth Avenue AppleStore",
+ "deleted_at": null,
+ "address1": null,
+ "address2": null,
+ "city": null,
+ "zip": null,
+ "province": null,
+ "country": "US",
+ "phone": null,
+ "created_at": "2015-12-08T11:44:58-05:00",
+ "updated_at": "2015-12-08T11:44:58-05:00",
+ "country_code": "US",
+ "country_name": "United States",
+ "province_code": null
+ }
+}
diff --git a/test/fixtures/locations.json b/test/fixtures/locations.json
new file mode 100644
index 0000000..906f7b7
--- /dev/null
+++ b/test/fixtures/locations.json
@@ -0,0 +1,38 @@
+{
+ "locations": [
+ {
+ "id": 487838322,
+ "name": "Fifth Avenue AppleStore",
+ "deleted_at": null,
+ "address1": null,
+ "address2": null,
+ "city": null,
+ "zip": null,
+ "province": null,
+ "country": "US",
+ "phone": null,
+ "created_at": "2015-12-08T11:44:58-05:00",
+ "updated_at": "2015-12-08T11:44:58-05:00",
+ "country_code": "US",
+ "country_name": "United States",
+ "province_code": null
+ },
+ {
+ "id": 1034478814,
+ "name": "Berlin Store",
+ "deleted_at": null,
+ "address1": null,
+ "address2": null,
+ "city": null,
+ "zip": null,
+ "province": null,
+ "country": "DE",
+ "phone": null,
+ "created_at": "2015-12-08T11:44:58-05:00",
+ "updated_at": "2015-12-08T11:44:58-05:00",
+ "country_code": "DE",
+ "country_name": "Germany",
+ "province_code": null
+ }
+ ]
+}
diff --git a/test/locations_test.py b/test/locations_test.py
new file mode 100644
index 0000000..44a6768
--- /dev/null
+++ b/test/locations_test.py
@@ -0,0 +1,14 @@
+import shopify
+from test.test_helper import TestCase
+
+class LocationsTest(TestCase):
+ def test_fetch_locations(self):
+ self.fake("locations", method='GET', body=self.load_fixture('locations'))
+ locations = shopify.Location.find()
+ self.assertEqual(2,len(locations))
+
+ def test_fetch_location(self):
+ self.fake("locations/487838322", method='GET', body=self.load_fixture('location'))
+ location = shopify.Location.find(487838322)
+ self.assertEqual(location.id,487838322)
+ self.assertEqual(location.name,"Fifth Avenue AppleStore")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pyactiveresource==2.2.2
pytest==8.3.5
PyYAML==6.0.2
-e git+https://github.com/Shopify/shopify_python_api.git@ecd532cc904729fd366f05ae8d7d754df79b55f7#egg=ShopifyAPI
six==1.17.0
tomli==2.2.1
| name: shopify_python_api
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pyactiveresource==2.2.2
- pytest==8.3.5
- pyyaml==6.0.2
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/shopify_python_api
| [
"test/locations_test.py::LocationsTest::test_fetch_location",
"test/locations_test.py::LocationsTest::test_fetch_locations"
]
| []
| []
| []
| MIT License | 339 | [
"shopify/resources/__init__.py",
"shopify/resources/location.py"
]
| [
"shopify/resources/__init__.py",
"shopify/resources/location.py"
]
|
|
jupyter-incubator__sparkmagic-73 | d9662c5c5b089976810dfe863437d86ae72ccf82 | 2015-12-18 01:53:03 | d9662c5c5b089976810dfe863437d86ae72ccf82 | diff --git a/remotespark/RemoteSparkMagics.py b/remotespark/RemoteSparkMagics.py
index 361b91b..5271de0 100644
--- a/remotespark/RemoteSparkMagics.py
+++ b/remotespark/RemoteSparkMagics.py
@@ -55,30 +55,40 @@ class RemoteSparkMagics(Magics):
Constants.context_name_sql,
Constants.context_name_hive,
Constants.context_name_spark))
- @argument("-e", "--endpoint", help="The name of the Livy endpoint to use. "
- "If only one endpoint has been created, there's no need to specify one.")
+ @argument("-s", "--session", help="The name of the Livy session to use. "
+ "If only one session has been created, there's no need to specify one.")
@argument("-t", "--chart", type=str, default="area", help='Chart type to use: table, area, line, bar.')
@argument("command", type=str, default=[""], nargs="*", help="Commands to execute.")
@line_cell_magic
def spark(self, line, cell=""):
- """Magic to execute spark remotely.
- If invoked with no subcommand, the code will be executed against the specified endpoint.
+ """Magic to execute spark remotely.
+
+ This magic allows you to create a Livy Scala or Python session against a Livy endpoint. Every session can
+ be used to execute either Spark code or SparkSQL code by executing against the SQL context in the session.
+ When the SQL context is used, the result will be a Pandas dataframe of a sample of the results.
+
+ If invoked with no subcommand, the cell will be executed against the specified session.
Subcommands
-----------
info
- Display the mode and available Livy endpoints.
+ Display the mode and available Livy sessions.
add
- Add a Livy endpoint. First argument is the friendly name of the endpoint, second argument
- is the language, and third argument is the connection string. A fourth argument specifying if
- endpoint can be skipped if already present is optional: "skip" or empty.
+ Add a Livy session. First argument is the name of the session, second argument
+ is the language, and third argument is the connection string of the Livy endpoint.
+ A fourth argument specifying if session creation can be skipped if it already exists is optional:
+ "skip" or empty.
e.g. `%%spark add test python url=https://sparkcluster.example.net/livy;username=admin;password=MyPassword skip`
or
e.g. `%%spark add test python url=https://sparkcluster.example.net/livy;username=admin;password=MyPassword`
+ run
+ Run Spark code against a session.
+ e.g. `%%spark -e testsession` will execute the cell code against the testsession previously created
+ e.g. `%%spark -e testsession -c sql` will execute the SQL code against the testsession previously created
delete
- Delete a Livy endpoint. Argument is the friendly name of the endpoint to be deleted.
+ Delete a Livy session. Argument is the name of the session to be deleted.
e.g. `%%spark delete defaultlivy`
cleanup
- Delete all Livy endpoints. No arguments required.
+ Delete all Livy sessions created by the notebook. No arguments required.
e.g. `%%spark cleanup`
"""
usage = "Please look at usage of %spark by executing `%spark?`."
@@ -102,13 +112,13 @@ class RemoteSparkMagics(Magics):
skip = args.command[4].lower() == "skip"
else:
skip = False
- self.spark_controller.add_endpoint(name, language, connection_string, skip)
+ self.spark_controller.add_session(name, language, connection_string, skip)
# delete
elif subcommand == "delete":
if len(args.command) != 2:
raise ValueError("Subcommand 'delete' requires an argument. {}".format(usage))
name = args.command[1].lower()
- self.spark_controller.delete_endpoint(name)
+ self.spark_controller.delete_session(name)
# cleanup
elif subcommand == "cleanup":
self.spark_controller.cleanup()
@@ -116,7 +126,7 @@ class RemoteSparkMagics(Magics):
elif len(subcommand) == 0:
if args.context == Constants.context_name_spark:
(success, out) = self.spark_controller.run_cell(cell,
- args.endpoint)
+ args.session)
if success:
self.ipython.write(out)
else:
@@ -124,13 +134,13 @@ class RemoteSparkMagics(Magics):
elif args.context == Constants.context_name_sql:
try:
return self.spark_controller.run_cell_sql(cell,
- args.endpoint)
+ args.session)
except DataFrameParseException as e:
self.ipython.write_err(e.out)
elif args.context == Constants.context_name_hive:
try:
return self.spark_controller.run_cell_hive(cell,
- args.endpoint)
+ args.session)
except DataFrameParseException as e:
self.ipython.write_err(e.out)
else:
diff --git a/remotespark/livyclientlib/clientmanager.py b/remotespark/livyclientlib/clientmanager.py
index d6466ae..e253735 100644
--- a/remotespark/livyclientlib/clientmanager.py
+++ b/remotespark/livyclientlib/clientmanager.py
@@ -41,12 +41,12 @@ class ClientManager(object):
def _serialize_state(self):
self._serializer.serialize_state(self._livy_clients)
- def get_endpoints_list(self):
+ def get_sessions_list(self):
return list(self._livy_clients.keys())
def add_client(self, name, livy_client):
- if name in self.get_endpoints_list():
- raise ValueError("Endpoint with name '{}' already exists. Please delete the endpoint"
+ if name in self.get_sessions_list():
+ raise ValueError("Session with name '{}' already exists. Please delete the session"
" first if you intend to replace it.".format(name))
self._livy_clients[name] = livy_client
@@ -54,34 +54,34 @@ class ClientManager(object):
def get_any_client(self):
number_of_sessions = len(self._livy_clients)
if number_of_sessions == 1:
- key = self.get_endpoints_list()[0]
+ key = self.get_sessions_list()[0]
return self._livy_clients[key]
elif number_of_sessions == 0:
raise AssertionError("You need to have at least 1 client created to execute commands.")
else:
- raise AssertionError("Please specify the client to use. Possible endpoints are {}".format(
- self.get_endpoints_list()))
+ raise AssertionError("Please specify the client to use. Possible sessions are {}".format(
+ self.get_sessions_list()))
def get_client(self, name):
- if name in self.get_endpoints_list():
+ if name in self.get_sessions_list():
return self._livy_clients[name]
- raise ValueError("Could not find '{}' endpoint in list of saved endpoints. Possible endpoints are {}".format(
- name, self.get_endpoints_list()))
+ raise ValueError("Could not find '{}' session in list of saved sessions. Possible sessions are {}".format(
+ name, self.get_sessions_list()))
def delete_client(self, name):
- self._remove_endpoint(name)
+ self._remove_session(name)
def clean_up_all(self):
- for name in self.get_endpoints_list():
- self._remove_endpoint(name)
+ for name in self.get_sessions_list():
+ self._remove_session(name)
if self._serializer is not None:
self._serialize_state()
- def _remove_endpoint(self, name):
- if name in self.get_endpoints_list():
+ def _remove_session(self, name):
+ if name in self.get_sessions_list():
self._livy_clients[name].close_session()
del self._livy_clients[name]
else:
- raise ValueError("Could not find '{}' endpoint in list of saved endpoints. Possible endpoints are {}"
- .format(name, self.get_endpoints_list()))
+ raise ValueError("Could not find '{}' session in list of saved sessions. Possible sessions are {}"
+ .format(name, self.get_sessions_list()))
diff --git a/remotespark/livyclientlib/livyclient.py b/remotespark/livyclientlib/livyclient.py
index 87fb395..dedefbb 100644
--- a/remotespark/livyclientlib/livyclient.py
+++ b/remotespark/livyclientlib/livyclient.py
@@ -7,7 +7,7 @@ from .constants import Constants
class LivyClient(object):
- """Spark client for Livy endpoint"""
+ """Spark client for Livy session"""
def __init__(self, session):
self.logger = Log("LivyClient")
diff --git a/remotespark/livyclientlib/livyclientfactory.py b/remotespark/livyclientlib/livyclientfactory.py
index 85443b3..2b89045 100644
--- a/remotespark/livyclientlib/livyclientfactory.py
+++ b/remotespark/livyclientlib/livyclientfactory.py
@@ -11,7 +11,7 @@ from .linearretrypolicy import LinearRetryPolicy
class LivyClientFactory(object):
- """Spark client for Livy endpoint"""
+ """Spark client factory"""
def __init__(self):
self.logger = Log("LivyClientFactory")
diff --git a/remotespark/livyclientlib/pandaslivyclientbase.py b/remotespark/livyclientlib/pandaslivyclientbase.py
index 692c5d0..72852f1 100644
--- a/remotespark/livyclientlib/pandaslivyclientbase.py
+++ b/remotespark/livyclientlib/pandaslivyclientbase.py
@@ -5,7 +5,7 @@ from .livyclient import LivyClient
from .dataframeparseexception import DataFrameParseException
class PandasLivyClientBase(LivyClient):
- """Spark client for Livy endpoint that produces pandas df for sql and hive commands."""
+ """Spark client for Livy session that produces pandas df for sql and hive commands."""
def __init__(self, session, max_take_rows):
super(PandasLivyClientBase, self).__init__(session)
self.max_take_rows = max_take_rows
diff --git a/remotespark/livyclientlib/pandaspysparklivyclient.py b/remotespark/livyclientlib/pandaspysparklivyclient.py
index edf6abb..81c5123 100644
--- a/remotespark/livyclientlib/pandaspysparklivyclient.py
+++ b/remotespark/livyclientlib/pandaspysparklivyclient.py
@@ -7,7 +7,7 @@ import json
from .pandaslivyclientbase import PandasLivyClientBase
class PandasPysparkLivyClient(PandasLivyClientBase):
- """Spark client for Livy endpoint in PySpark"""
+ """Spark client for Livy session in PySpark"""
def __init__(self, session, max_take_rows):
super(PandasPysparkLivyClient, self).__init__(session, max_take_rows)
diff --git a/remotespark/livyclientlib/pandasscalalivyclient.py b/remotespark/livyclientlib/pandasscalalivyclient.py
index 5b9e031..8bb1ea8 100644
--- a/remotespark/livyclientlib/pandasscalalivyclient.py
+++ b/remotespark/livyclientlib/pandasscalalivyclient.py
@@ -8,7 +8,7 @@ import re
from .pandaslivyclientbase import PandasLivyClientBase
class PandasScalaLivyClient(PandasLivyClientBase):
- """Spark client for Livy endpoint in Scala"""
+ """Spark client for Livy session in Scala"""
def __init__(self, session, max_take_rows):
super(PandasScalaLivyClient, self).__init__(session, max_take_rows)
diff --git a/remotespark/livyclientlib/sparkcontroller.py b/remotespark/livyclientlib/sparkcontroller.py
index 4ab4dd2..d736011 100644
--- a/remotespark/livyclientlib/sparkcontroller.py
+++ b/remotespark/livyclientlib/sparkcontroller.py
@@ -1,6 +1,3 @@
-"""Runs Scala, PySpark and SQL statement through Spark using a REST endpoint in remote cluster.
-Provides the %spark magic."""
-
# Copyright (c) 2015 [email protected]
# Distributed under the terms of the Modified BSD License.
@@ -38,12 +35,12 @@ class SparkController(object):
def cleanup(self):
self.client_manager.clean_up_all()
- def delete_endpoint(self, name):
+ def delete_session(self, name):
self.client_manager.delete_client(name)
- def add_endpoint(self, name, language, connection_string, skip_if_exists):
- if skip_if_exists and (name in self.client_manager.get_endpoints_list()):
- self.logger.debug("Skipping {} because it already exists in list of endpoints.".format(name))
+ def add_session(self, name, language, connection_string, skip_if_exists):
+ if skip_if_exists and (name in self.client_manager.get_sessions_list()):
+ self.logger.debug("Skipping {} because it already exists in list of sessions.".format(name))
return
session = self.client_factory.create_session(language, connection_string, "-1", False)
@@ -52,7 +49,7 @@ class SparkController(object):
self.client_manager.add_client(name, livy_client)
def get_client_keys(self):
- return self.client_manager.get_endpoints_list()
+ return self.client_manager.get_sessions_list()
def get_client_by_name_or_default(self, client_name):
if client_name is None:
diff --git a/remotespark/sparkkernelbase.py b/remotespark/sparkkernelbase.py
index b842b76..d8074f7 100644
--- a/remotespark/sparkkernelbase.py
+++ b/remotespark/sparkkernelbase.py
@@ -81,11 +81,11 @@ class SparkKernelBase(IPythonKernel):
self.already_ran_once = True
- add_endpoint_code = "%spark add {} {} {} skip".format(
+ add_session_code = "%spark add {} {} {} skip".format(
self.client_name, self.session_language, connection_string)
- self._execute_cell(add_endpoint_code, True, False, shutdown_if_error=True,
+ self._execute_cell(add_session_code, True, False, shutdown_if_error=True,
log_if_error="Failed to create a Livy session.")
- self.logger.debug("Added endpoint.")
+ self.logger.debug("Added session.")
def _get_configuration(self):
try:
| Rename --endpoint param to magics to --session
Make -e be -s | jupyter-incubator/sparkmagic | diff --git a/tests/test_clientmanager.py b/tests/test_clientmanager.py
index 3376082..b1ef425 100644
--- a/tests/test_clientmanager.py
+++ b/tests/test_clientmanager.py
@@ -18,13 +18,13 @@ def test_deserialize_on_creation():
serializer.deserialize_state.return_value = [("py", None), ("sc", None)]
manager = ClientManager(serializer)
- assert "py" in manager.get_endpoints_list()
- assert "sc" in manager.get_endpoints_list()
+ assert "py" in manager.get_sessions_list()
+ assert "sc" in manager.get_sessions_list()
serializer = MagicMock()
manager = ClientManager(serializer)
- assert len(manager.get_endpoints_list()) == 0
+ assert len(manager.get_sessions_list()) == 0
def test_serialize_periodically():
@@ -82,7 +82,7 @@ def test_client_names_returned():
manager.add_client("name0", client)
manager.add_client("name1", client)
- assert_equals({"name0", "name1"}, set(manager.get_endpoints_list()))
+ assert_equals({"name0", "name1"}, set(manager.get_sessions_list()))
def test_get_any_client():
diff --git a/tests/test_remotesparkmagics.py b/tests/test_remotesparkmagics.py
index 363849f..22a70b4 100644
--- a/tests/test_remotesparkmagics.py
+++ b/tests/test_remotesparkmagics.py
@@ -32,10 +32,10 @@ def test_info_command_parses():
@with_setup(_setup, _teardown)
-def test_add_endpoint_command_parses():
+def test_add_sessions_command_parses():
# Do not skip
- add_endpoint_mock = MagicMock()
- spark_controller.add_endpoint = add_endpoint_mock
+ add_sessions_mock = MagicMock()
+ spark_controller.add_session = add_sessions_mock
command = "add"
name = "name"
language = "python"
@@ -44,11 +44,11 @@ def test_add_endpoint_command_parses():
magic.spark(line)
- add_endpoint_mock.assert_called_once_with(name, language, connection_string, False)
+ add_sessions_mock.assert_called_once_with(name, language, connection_string, False)
# Skip
- add_endpoint_mock = MagicMock()
- spark_controller.add_endpoint = add_endpoint_mock
+ add_sessions_mock = MagicMock()
+ spark_controller.add_session = add_sessions_mock
command = "add"
name = "name"
language = "python"
@@ -57,13 +57,13 @@ def test_add_endpoint_command_parses():
magic.spark(line)
- add_endpoint_mock.assert_called_once_with(name, language, connection_string, True)
+ add_sessions_mock.assert_called_once_with(name, language, connection_string, True)
@with_setup(_setup, _teardown)
-def test_delete_endpoint_command_parses():
+def test_delete_sessions_command_parses():
mock_method = MagicMock()
- spark_controller.delete_endpoint = mock_method
+ spark_controller.delete_session = mock_method
command = "delete"
name = "name"
line = " ".join([command, name])
@@ -98,8 +98,8 @@ def test_run_cell_command_parses():
run_cell_method.return_value = (True, "")
spark_controller.run_cell = run_cell_method
- command = "-e"
- name = "endpoint_name"
+ command = "-s"
+ name = "sessions_name"
line = " ".join([command, name])
cell = "cell code"
diff --git a/tests/test_sparkcontroller.py b/tests/test_sparkcontroller.py
index 9dea184..4a20537 100644
--- a/tests/test_sparkcontroller.py
+++ b/tests/test_sparkcontroller.py
@@ -25,7 +25,7 @@ def _teardown():
@with_setup(_setup, _teardown)
-def test_add_endpoint():
+def test_add_session():
name = "name"
language = "python"
connection_string = "url=http://location:port;username=name;password=word"
@@ -34,7 +34,7 @@ def test_add_endpoint():
client_factory.create_session = MagicMock(return_value=session)
client_factory.build_client = MagicMock(return_value=client)
- controller.add_endpoint(name, language, connection_string, False)
+ controller.add_session(name, language, connection_string, False)
client_factory.create_session.assert_called_once_with(language, connection_string, "-1", False)
client_factory.build_client.assert_called_once_with(language, session)
@@ -43,7 +43,7 @@ def test_add_endpoint():
@with_setup(_setup, _teardown)
-def test_add_endpoint_skip():
+def test_add_session_skip():
name = "name"
language = "python"
connection_string = "url=http://location:port;username=name;password=word"
@@ -52,8 +52,8 @@ def test_add_endpoint_skip():
client_factory.create_session = MagicMock(return_value=session)
client_factory.build_client = MagicMock(return_value=client)
- client_manager.get_endpoints_list.return_value = [name]
- controller.add_endpoint(name, language, connection_string, True)
+ client_manager.get_sessions_list.return_value = [name]
+ controller.add_session(name, language, connection_string, True)
assert client_factory.create_session.call_count == 0
assert client_factory.build_client.call_count == 0
@@ -62,10 +62,10 @@ def test_add_endpoint_skip():
@with_setup(_setup, _teardown)
-def test_delete_endpoint():
+def test_delete_session():
name = "name"
- controller.delete_endpoint(name)
+ controller.delete_session(name)
client_manager.delete_client.assert_called_once_with(name)
@@ -83,7 +83,7 @@ def test_run_cell():
default_client.execute = chosen_client.execute = MagicMock(return_value=(True,""))
client_manager.get_any_client = MagicMock(return_value=default_client)
client_manager.get_client = MagicMock(return_value=chosen_client)
- name = "endpoint_name"
+ name = "session_name"
cell = "cell code"
controller.run_cell(cell, name)
@@ -107,4 +107,4 @@ def test_run_cell():
@with_setup(_setup, _teardown)
def test_get_client_keys():
controller.get_client_keys()
- client_manager.get_endpoints_list.assert_called_once_with()
+ client_manager.get_sessions_list.assert_called_once_with()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 9
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"mkdir ~/.sparkmagic",
"cp remotespark/default_config.json ~/.sparkmagic/config.json"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==4.9.0
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
arrow==1.3.0
async-lru==2.0.5
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
comm==0.2.2
decorator==5.2.1
defusedxml==0.7.1
exceptiongroup==1.2.2
fastjsonschema==2.21.1
fqdn==1.5.1
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==4.1.1
ipython==4.0.0
ipython-genutils==0.2.0
ipywidgets==7.8.5
isoduration==20.11.0
Jinja2==3.1.6
json5==0.10.0
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter-lsp==2.2.5
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyter_server==2.15.0
jupyter_server_terminals==0.5.3
jupyterlab==4.1.5
jupyterlab_pygments==0.3.0
jupyterlab_server==2.27.3
jupyterlab_widgets==1.1.11
MarkupSafe==3.0.2
mistune==3.1.3
mock==5.2.0
narwhals==1.32.0
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nose==1.3.7
notebook==7.1.3
notebook_shim==0.2.4
numpy==2.0.2
overrides==7.7.0
packaging==24.2
pandas==2.2.3
pandocfilters==1.5.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==4.3.7
plotly==6.0.1
pluggy==1.5.0
prometheus_client==0.21.1
ptyprocess==0.7.0
pycparser==2.22
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
pytz==2025.2
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
-e git+https://github.com/jupyter-incubator/sparkmagic.git@d9662c5c5b089976810dfe863437d86ae72ccf82#egg=remotespark
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
Send2Trash==1.8.3
simplegeneric==0.8.1
six==1.17.0
sniffio==1.3.1
soupsieve==2.6
terminado==0.18.1
tinycss2==1.4.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
uri-template==1.3.0
urllib3==2.3.0
webcolors==24.11.1
webencodings==0.5.1
websocket-client==1.8.0
widgetsnbextension==3.6.10
zipp==3.21.0
| name: sparkmagic
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==4.9.0
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- arrow==1.3.0
- async-lru==2.0.5
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- comm==0.2.2
- decorator==5.2.1
- defusedxml==0.7.1
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- fqdn==1.5.1
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==4.1.1
- ipython==4.0.0
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- isoduration==20.11.0
- jinja2==3.1.6
- json5==0.10.0
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyter-lsp==2.2.5
- jupyter-server==2.15.0
- jupyter-server-terminals==0.5.3
- jupyterlab==4.1.5
- jupyterlab-pygments==0.3.0
- jupyterlab-server==2.27.3
- jupyterlab-widgets==1.1.11
- markupsafe==3.0.2
- mistune==3.1.3
- mock==5.2.0
- narwhals==1.32.0
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nose==1.3.7
- notebook==7.1.3
- notebook-shim==0.2.4
- numpy==2.0.2
- overrides==7.7.0
- packaging==24.2
- pandas==2.2.3
- pandocfilters==1.5.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==4.3.7
- plotly==6.0.1
- pluggy==1.5.0
- prometheus-client==0.21.1
- ptyprocess==0.7.0
- pycparser==2.22
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pytz==2025.2
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- send2trash==1.8.3
- simplegeneric==0.8.1
- six==1.17.0
- sniffio==1.3.1
- soupsieve==2.6
- terminado==0.18.1
- tinycss2==1.4.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- uri-template==1.3.0
- urllib3==2.3.0
- webcolors==24.11.1
- webencodings==0.5.1
- websocket-client==1.8.0
- widgetsnbextension==3.6.10
- zipp==3.21.0
prefix: /opt/conda/envs/sparkmagic
| [
"tests/test_clientmanager.py::test_deserialize_on_creation",
"tests/test_clientmanager.py::test_client_names_returned"
]
| [
"tests/test_remotesparkmagics.py::test_info_command_parses",
"tests/test_remotesparkmagics.py::test_add_sessions_command_parses",
"tests/test_remotesparkmagics.py::test_delete_sessions_command_parses",
"tests/test_remotesparkmagics.py::test_cleanup_command_parses",
"tests/test_remotesparkmagics.py::test_bad_command_throws_exception",
"tests/test_remotesparkmagics.py::test_run_cell_command_parses",
"tests/test_sparkcontroller.py::test_add_session",
"tests/test_sparkcontroller.py::test_add_session_skip",
"tests/test_sparkcontroller.py::test_delete_session",
"tests/test_sparkcontroller.py::test_cleanup",
"tests/test_sparkcontroller.py::test_run_cell",
"tests/test_sparkcontroller.py::test_get_client_keys"
]
| [
"tests/test_clientmanager.py::test_get_client_throws_when_client_not_exists",
"tests/test_clientmanager.py::test_serialize_periodically",
"tests/test_clientmanager.py::test_get_client",
"tests/test_clientmanager.py::test_delete_client",
"tests/test_clientmanager.py::test_delete_client_throws_when_client_not_exists",
"tests/test_clientmanager.py::test_add_client_throws_when_client_exists",
"tests/test_clientmanager.py::test_get_any_client",
"tests/test_clientmanager.py::test_get_any_client_raises_exception_with_no_client",
"tests/test_clientmanager.py::test_get_any_client_raises_exception_with_two_clients",
"tests/test_clientmanager.py::test_clean_up",
"tests/test_clientmanager.py::test_clean_up_serializer"
]
| []
| Modified BSD License | 340 | [
"remotespark/livyclientlib/pandasscalalivyclient.py",
"remotespark/livyclientlib/clientmanager.py",
"remotespark/livyclientlib/livyclient.py",
"remotespark/livyclientlib/pandaspysparklivyclient.py",
"remotespark/sparkkernelbase.py",
"remotespark/livyclientlib/pandaslivyclientbase.py",
"remotespark/livyclientlib/livyclientfactory.py",
"remotespark/livyclientlib/sparkcontroller.py",
"remotespark/RemoteSparkMagics.py"
]
| [
"remotespark/livyclientlib/pandasscalalivyclient.py",
"remotespark/livyclientlib/clientmanager.py",
"remotespark/livyclientlib/livyclient.py",
"remotespark/livyclientlib/pandaspysparklivyclient.py",
"remotespark/sparkkernelbase.py",
"remotespark/livyclientlib/pandaslivyclientbase.py",
"remotespark/livyclientlib/livyclientfactory.py",
"remotespark/livyclientlib/sparkcontroller.py",
"remotespark/RemoteSparkMagics.py"
]
|
|
joke2k__faker-314 | 9f338881f582807fd9d1339b6148b039f8141bb3 | 2015-12-18 19:49:49 | 883576c2d718ad7f604415e02a898f1f917d5b86 | diff --git a/faker/providers/misc/__init__.py b/faker/providers/misc/__init__.py
index 32531a60..9b318691 100644
--- a/faker/providers/misc/__init__.py
+++ b/faker/providers/misc/__init__.py
@@ -88,13 +88,33 @@ class Provider(BaseProvider):
@param lower_case: Boolean. Whether to use lower letters
@return: String. Random password
"""
- chars = ""
+ choices = ""
+ required_tokens = []
if special_chars:
- chars += "!@#$%^&*()_+"
+ required_tokens.append(random.choice("!@#$%^&*()_+"))
+ choices += "!@#$%^&*()_+"
if digits:
- chars += string.digits
+ required_tokens.append(random.choice(string.digits))
+ choices += string.digits
if upper_case:
- chars += string.ascii_uppercase
+ required_tokens.append(random.choice(string.ascii_uppercase))
+ choices += string.ascii_uppercase
if lower_case:
- chars += string.ascii_lowercase
- return ''.join(random.choice(chars) for x in range(length))
+ required_tokens.append(random.choice(string.ascii_lowercase))
+ choices += string.ascii_lowercase
+
+ assert len(required_tokens) <= length, "Required length is shorter than required characters"
+
+ # Generate a first version of the password
+ chars = [random.choice(choices) for x in range(length)]
+
+ # Pick some unique locations
+ random_indexes = set()
+ while len(random_indexes) < len(required_tokens):
+ random_indexes.add(random.randint(0, len(chars) - 1))
+
+ # Replace them with the required characters
+ for i, index in enumerate(random_indexes):
+ chars[index] = required_tokens[i]
+
+ return ''.join(chars)
| Param switches on faker.password() don't guarantee valid password
The format switches on `faker.password()` (`special_chars, digits, upper_case, lower_case`) don't always return passwords matching those rules.
This is problematic as when using generated passwords in unit tests, where passwords must conform to validity rules (e.g. "must contain numbers"), tests can randomly fail.
I expected that these switches would guarantee the function returns a conforming password. e.g. `faker.password(digits=True)` always returns a password containing digits, but this is not the case.
| joke2k/faker | diff --git a/faker/tests/__init__.py b/faker/tests/__init__.py
index 4eeaa3c7..6502a448 100644
--- a/faker/tests/__init__.py
+++ b/faker/tests/__init__.py
@@ -9,6 +9,7 @@ import json
import os
import time
import unittest
+import string
import sys
try:
@@ -458,6 +459,22 @@ class FactoryTestCase(unittest.TestCase):
datetime.datetime.now(utc).replace(second=0, microsecond=0)
)
+ def test_password(self):
+ from faker.providers.misc import Provider
+
+ def in_string(char, _str):
+ return char in _str
+
+ for _ in range(999):
+ password = Provider.password()
+
+ self.assertTrue(any([in_string(char, password) for char in "!@#$%^&*()_+"]))
+ self.assertTrue(any([in_string(char, password) for char in string.digits]))
+ self.assertTrue(any([in_string(char, password) for char in string.ascii_uppercase]))
+ self.assertTrue(any([in_string(char, password) for char in string.ascii_lowercase]))
+
+ self.assertRaises(AssertionError, Provider.password, length=2)
+
def test_prefix_suffix_always_string(self):
# Locales known to contain `*_male` and `*_female`.
for locale in ("bg_BG", "dk_DK", "en", "ru_RU", "tr_TR"):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"faker/tests/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | dnspython==2.7.0
email_validator==2.2.0
exceptiongroup==1.2.2
-e git+https://github.com/joke2k/faker.git@9f338881f582807fd9d1339b6148b039f8141bb3#egg=fake_factory
idna==3.10
iniconfig==2.1.0
mock==1.0.1
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
six==1.17.0
tomli==2.2.1
UkPostcodeParser==1.0.3
| name: faker
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- dnspython==2.7.0
- email-validator==2.2.0
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- mock==1.0.1
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- six==1.17.0
- tomli==2.2.1
- ukpostcodeparser==1.0.3
prefix: /opt/conda/envs/faker
| [
"faker/tests/__init__.py::FactoryTestCase::test_password"
]
| []
| [
"faker/tests/__init__.py::ShimsTestCase::test_counter",
"faker/tests/__init__.py::UtilsTestCase::test_add_dicts",
"faker/tests/__init__.py::UtilsTestCase::test_choice_distribution",
"faker/tests/__init__.py::UtilsTestCase::test_find_available_locales",
"faker/tests/__init__.py::UtilsTestCase::test_find_available_providers",
"faker/tests/__init__.py::FactoryTestCase::test_add_provider_gives_priority_to_newly_added_provider",
"faker/tests/__init__.py::FactoryTestCase::test_command",
"faker/tests/__init__.py::FactoryTestCase::test_command_custom_provider",
"faker/tests/__init__.py::FactoryTestCase::test_date_time_between_dates",
"faker/tests/__init__.py::FactoryTestCase::test_date_time_between_dates_with_tzinfo",
"faker/tests/__init__.py::FactoryTestCase::test_date_time_this_period",
"faker/tests/__init__.py::FactoryTestCase::test_date_time_this_period_with_tzinfo",
"faker/tests/__init__.py::FactoryTestCase::test_datetime_safe",
"faker/tests/__init__.py::FactoryTestCase::test_datetimes_with_and_without_tzinfo",
"faker/tests/__init__.py::FactoryTestCase::test_documentor",
"faker/tests/__init__.py::FactoryTestCase::test_format_calls_formatter_on_provider",
"faker/tests/__init__.py::FactoryTestCase::test_format_transfers_arguments_to_formatter",
"faker/tests/__init__.py::FactoryTestCase::test_get_formatter_returns_callable",
"faker/tests/__init__.py::FactoryTestCase::test_get_formatter_returns_correct_formatter",
"faker/tests/__init__.py::FactoryTestCase::test_get_formatter_throws_exception_on_incorrect_formatter",
"faker/tests/__init__.py::FactoryTestCase::test_magic_call_calls_format",
"faker/tests/__init__.py::FactoryTestCase::test_magic_call_calls_format_with_arguments",
"faker/tests/__init__.py::FactoryTestCase::test_no_words_paragraph",
"faker/tests/__init__.py::FactoryTestCase::test_no_words_sentence",
"faker/tests/__init__.py::FactoryTestCase::test_parse_returns_same_string_when_it_contains_no_curly_braces",
"faker/tests/__init__.py::FactoryTestCase::test_parse_returns_string_with_tokens_replaced_by_formatters",
"faker/tests/__init__.py::FactoryTestCase::test_prefix_suffix_always_string",
"faker/tests/__init__.py::FactoryTestCase::test_random_element",
"faker/tests/__init__.py::FactoryTestCase::test_slugify",
"faker/tests/__init__.py::FactoryTestCase::test_timezone_conversion",
"faker/tests/__init__.py::FactoryTestCase::test_us_ssn_valid",
"faker/tests/__init__.py::GeneratorTestCase::test_random_seed_doesnt_seed_system_random"
]
| []
| MIT License | 341 | [
"faker/providers/misc/__init__.py"
]
| [
"faker/providers/misc/__init__.py"
]
|
|
pika__pika-675 | d8a782d97579cd96ed67ccfb55f63ca8fdafa199 | 2015-12-19 02:54:17 | f73f9bbaddd90b03583a6693f6158e56fbede948 | vitaly-krugl: CC @gmr: Hi Gavin, please review this fix.
vitaly-krugl: @gmr, the failed test `BlockingChannel.basic_nack single message` is unrelated to the changes in this PR; it's a known issue described in #677.
Please re-start the build. Thanks! | diff --git a/pika/adapters/libev_connection.py b/pika/adapters/libev_connection.py
index 2a27055..26a6547 100644
--- a/pika/adapters/libev_connection.py
+++ b/pika/adapters/libev_connection.py
@@ -127,7 +127,7 @@ class LibevConnection(BaseConnection):
if self._on_signal_callback and not global_sigterm_watcher:
global_sigterm_watcher = \
self.ioloop.signal(signal.SIGTERM,
- self._handle_sigterm)
+ self._handle_sigterm)
if self._on_signal_callback and not global_sigint_watcher:
global_sigint_watcher = self.ioloop.signal(signal.SIGINT,
@@ -136,8 +136,8 @@ class LibevConnection(BaseConnection):
if not self._io_watcher:
self._io_watcher = \
self.ioloop.io(self.socket.fileno(),
- self._PIKA_TO_LIBEV_ARRAY[self.event_state],
- self._handle_events)
+ self._PIKA_TO_LIBEV_ARRAY[self.event_state],
+ self._handle_events)
self.async = pyev.Async(self.ioloop, self._noop_callable)
self.async.start()
@@ -209,8 +209,9 @@ class LibevConnection(BaseConnection):
self._PIKA_TO_LIBEV_ARRAY[self.event_state])
break
- except: # sometimes the stop() doesn't complete in time
- if retries > 5: raise
+ except Exception: # sometimes the stop() doesn't complete in time
+ if retries > 5:
+ raise
self._io_watcher.stop() # so try it again
retries += 1
@@ -268,7 +269,7 @@ class LibevConnection(BaseConnection):
:rtype: timer instance handle.
"""
- LOGGER.debug('deadline: {0}'.format(deadline))
+ LOGGER.debug('deadline: %s', deadline)
timer = self._get_timer(deadline)
self._active_timers[timer] = (callback_method, callback_timeout,
callback_kwargs)
diff --git a/pika/channel.py b/pika/channel.py
index 641e469..5c67c49 100644
--- a/pika/channel.py
+++ b/pika/channel.py
@@ -436,7 +436,10 @@ class Channel(object):
For more information see:
http://www.rabbitmq.com/extensions.html#confirms
- :param method callback: The callback for delivery confirmations
+ :param method callback: The callback for delivery confirmations that has
+ the following signature: callback(pika.frame.Method), where
+ method_frame contains either method `spec.Basic.Ack` or
+ `spec.Basic.Nack`
:param bool nowait: Do not send a reply frame (Confirm.SelectOk)
"""
@@ -674,7 +677,8 @@ class Channel(object):
Leave the queue name empty for a auto-named queue in RabbitMQ
- :param method callback: The method to call on Queue.DeclareOk
+ :param method callback: callback(pika.frame.Method) for method
+ Queue.DeclareOk
:param queue: The queue name
:type queue: str or unicode
:param bool passive: Only check to see if the queue exists
@@ -694,7 +698,8 @@ class Channel(object):
self._validate_channel_and_callback(callback)
return self._rpc(spec.Queue.Declare(0, queue, passive, durable,
exclusive, auto_delete, nowait,
- arguments or dict()), callback,
+ arguments or dict()),
+ callback,
replies)
def queue_delete(self,
@@ -1087,52 +1092,74 @@ class Channel(object):
"""
LOGGER.debug('%i blocked frames', len(self._blocked))
self._blocking = None
- while len(self._blocked) > 0 and self._blocking is None:
+ while self._blocked and self._blocking is None:
self._rpc(*self._blocked.popleft())
def _rpc(self, method_frame, callback=None, acceptable_replies=None):
- """Shortcut wrapper to the Connection's rpc command using its callback
- stack, passing in our channel number.
+ """Make a syncronous channel RPC call for a synchronous method frame. If
+ the channel is already in the blocking state, then enqueue the request,
+ but don't send it at this time; it will be eventually sent by
+ `_on_synchronous_complete` after the prior blocking request receives a
+ resposne. If the channel is not in the blocking state and
+ `acceptable_replies` is not empty, transition the channel to the
+ blocking state and register for `_on_synchronous_complete` before
+ sending the request.
+
+ NOTE: A populated callback must be accompanied by populated
+ acceptable_replies.
:param pika.amqp_object.Method method_frame: The method frame to call
:param method callback: The callback for the RPC response
:param list acceptable_replies: The replies this RPC call expects
"""
+ assert method_frame.synchronous, (
+ 'Only synchronous-capable frames may be used with _rpc: %r'
+ % (method_frame,))
+
+ # Validate we got None or a list of acceptable_replies
+ if not isinstance(acceptable_replies, (type(None), list)):
+ raise TypeError('acceptable_replies should be list or None')
+
+ # Validate the callback is callable
+ if callback is not None and not is_callable(callback):
+ raise TypeError('callback should be None, a function or method.')
+
+ if callback is not None and not acceptable_replies:
+ raise ValueError('A populated callback must be accompanied by '
+ 'populated acceptable_replies')
+
# Make sure the channel is open
if self.is_closed:
raise exceptions.ChannelClosed
# If the channel is blocking, add subsequent commands to our stack
if self._blocking:
+ LOGGER.debug('Already in blocking state, so enqueueing frame %s; '
+ 'acceptable_replies=%r',
+ method_frame, acceptable_replies)
return self._blocked.append([method_frame, callback,
acceptable_replies])
- # Validate we got None or a list of acceptable_replies
- if acceptable_replies and not isinstance(acceptable_replies, list):
- raise TypeError("acceptable_replies should be list or None")
-
- # Validate the callback is callable
- if callback and not is_callable(callback):
- raise TypeError("callback should be None, a function or method.")
-
- # Block until a response frame is received for synchronous frames
- if method_frame.synchronous:
- self._blocking = method_frame.NAME
-
# If acceptable replies are set, add callbacks
if acceptable_replies:
- for reply in acceptable_replies or list():
+ # Block until a response frame is received for synchronous frames
+ self._blocking = method_frame.NAME
+ LOGGER.debug(
+ 'Entering blocking state on frame %s; acceptable_replies=%r',
+ method_frame, acceptable_replies)
+
+ for reply in acceptable_replies:
if isinstance(reply, tuple):
reply, arguments = reply
else:
arguments = None
- LOGGER.debug('Adding in on_synchronous_complete callback')
+ LOGGER.debug('Adding on_synchronous_complete callback')
self.callbacks.add(self.channel_number, reply,
self._on_synchronous_complete,
arguments=arguments)
- if callback:
- LOGGER.debug('Adding passed in callback')
+ if callback is not None:
+ LOGGER.debug('Adding passed-in callback')
self.callbacks.add(self.channel_number, reply, callback,
arguments=arguments)
| Regression: Cannot receive long running messages when in use in tornado app
Hi there,
we just noticed a regression from pika 0.9.14 to 0.10.0. We haven't been able to track down the problem further than this (sorry) but will instead stop for now by pinning the old version.
The problem seems to be that pika (used via tornado-celery) is not able to receive answer messages from rabbitmq for longish running (>4-5 seconds) requests to a background worker.
The answer message instead seems to time out after the message timeout period in rabbitmq after which it is lost.
Do you need more information to fix this? | pika/pika | diff --git a/tests/acceptance/async_adapter_tests.py b/tests/acceptance/async_adapter_tests.py
index 9d7448e..9a44208 100644
--- a/tests/acceptance/async_adapter_tests.py
+++ b/tests/acceptance/async_adapter_tests.py
@@ -1,13 +1,25 @@
+# Suppress pylint messages concerning missing class and method docstrings
+# pylint: disable=C0111
+
+# Suppress pylint warning about attribute defined outside __init__
+# pylint: disable=W0201
+
+# Suppress pylint warning about access to protected member
+# pylint: disable=W0212
+
+# Suppress pylint warning about unused argument
+# pylint: disable=W0613
+
import time
import uuid
-from pika import spec, URLParameters
+from pika import spec
from pika.compat import as_bytes
from async_test_base import (AsyncTestCase, BoundQueueTestCase, AsyncAdapters)
-class TestA_Connect(AsyncTestCase, AsyncAdapters):
+class TestA_Connect(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103
DESCRIPTION = "Connect, open channel and disconnect"
def begin(self, channel):
@@ -26,11 +38,49 @@ class TestConfirmSelect(AsyncTestCase, AsyncAdapters):
self.stop()
+class TestBlockingNonBlockingBlockingRPCWontStall(AsyncTestCase, AsyncAdapters):
+ DESCRIPTION = ("Verify that a sequence of blocking, non-blocking, blocking "
+ "RPC requests won't stall")
+
+ def begin(self, channel):
+ # Queue declaration params table: queue name, nowait value
+ self._expected_queue_params = (
+ ("blocking-non-blocking-stall-check-" + uuid.uuid1().hex, False),
+ ("blocking-non-blocking-stall-check-" + uuid.uuid1().hex, True),
+ ("blocking-non-blocking-stall-check-" + uuid.uuid1().hex, False)
+ )
+
+ self._declared_queue_names = []
+
+ for queue, nowait in self._expected_queue_params:
+ channel.queue_declare(callback=self._queue_declare_ok_cb
+ if not nowait else None,
+ queue=queue,
+ auto_delete=True,
+ nowait=nowait,
+ arguments={'x-expires': self.TIMEOUT * 1000})
+
+ def _queue_declare_ok_cb(self, declare_ok_frame):
+ self._declared_queue_names.append(declare_ok_frame.method.queue)
+
+ if len(self._declared_queue_names) == 2:
+ # Initiate check for creation of queue declared with nowait=True
+ self.channel.queue_declare(callback=self._queue_declare_ok_cb,
+ queue=self._expected_queue_params[1][0],
+ passive=True,
+ nowait=False)
+ elif len(self._declared_queue_names) == 3:
+ self.assertSequenceEqual(
+ sorted(self._declared_queue_names),
+ sorted(item[0] for item in self._expected_queue_params))
+ self.stop()
+
+
class TestConsumeCancel(AsyncTestCase, AsyncAdapters):
DESCRIPTION = "Consume and cancel"
def begin(self, channel):
- self.queue_name = str(uuid.uuid4())
+ self.queue_name = self.__class__.__name__ + ':' + uuid.uuid1().hex
channel.queue_declare(self.on_queue_declared, queue=self.queue_name)
def on_queue_declared(self, frame):
@@ -58,7 +108,7 @@ class TestExchangeDeclareAndDelete(AsyncTestCase, AsyncAdapters):
X_TYPE = 'direct'
def begin(self, channel):
- self.name = self.__class__.__name__ + ':' + str(id(self))
+ self.name = self.__class__.__name__ + ':' + uuid.uuid1().hex
channel.exchange_declare(self.on_exchange_declared, self.name,
exchange_type=self.X_TYPE,
passive=False,
@@ -81,7 +131,7 @@ class TestExchangeRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters):
X_TYPE2 = 'topic'
def begin(self, channel):
- self.name = self.__class__.__name__ + ':' + str(id(self))
+ self.name = self.__class__.__name__ + ':' + uuid.uuid1().hex
self.channel.add_on_close_callback(self.on_channel_closed)
channel.exchange_declare(self.on_exchange_declared, self.name,
exchange_type=self.X_TYPE1,
@@ -97,7 +147,7 @@ class TestExchangeRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters):
self.connection.channel(self.on_cleanup_channel)
def on_exchange_declared(self, frame):
- self.channel.exchange_declare(self.on_exchange_declared, self.name,
+ self.channel.exchange_declare(self.on_bad_result, self.name,
exchange_type=self.X_TYPE2,
passive=False,
durable=False,
@@ -134,7 +184,8 @@ class TestQueueNameDeclareAndDelete(AsyncTestCase, AsyncAdapters):
DESCRIPTION = "Create and delete a named queue"
def begin(self, channel):
- channel.queue_declare(self.on_queue_declared, str(id(self)),
+ self._q_name = self.__class__.__name__ + ':' + uuid.uuid1().hex
+ channel.queue_declare(self.on_queue_declared, self._q_name,
passive=False,
durable=False,
exclusive=True,
@@ -143,10 +194,9 @@ class TestQueueNameDeclareAndDelete(AsyncTestCase, AsyncAdapters):
arguments={'x-expires': self.TIMEOUT * 1000})
def on_queue_declared(self, frame):
- queue = str(id(self))
self.assertIsInstance(frame.method, spec.Queue.DeclareOk)
# Frame's method's queue is encoded (impl detail)
- self.assertEqual(frame.method.queue, queue)
+ self.assertEqual(frame.method.queue, self._q_name)
self.channel.queue_delete(self.on_queue_delete, frame.method.queue)
def on_queue_delete(self, frame):
@@ -159,8 +209,9 @@ class TestQueueRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters):
DESCRIPTION = "Should close chan: re-declared queue w/ diff params"
def begin(self, channel):
+ self._q_name = self.__class__.__name__ + ':' + uuid.uuid1().hex
self.channel.add_on_close_callback(self.on_channel_closed)
- channel.queue_declare(self.on_queue_declared, str(id(self)),
+ channel.queue_declare(self.on_queue_declared, self._q_name,
passive=False,
durable=False,
exclusive=True,
@@ -172,7 +223,7 @@ class TestQueueRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters):
self.stop()
def on_queue_declared(self, frame):
- self.channel.queue_declare(self.on_bad_result, str(id(self)),
+ self.channel.queue_declare(self.on_bad_result, self._q_name,
passive=False,
durable=True,
exclusive=False,
@@ -181,13 +232,13 @@ class TestQueueRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters):
arguments={'x-expires': self.TIMEOUT * 1000})
def on_bad_result(self, frame):
- self.channel.queue_delete(None, str(id(self)), nowait=True)
+ self.channel.queue_delete(None, self._q_name, nowait=True)
raise AssertionError("Should not have received a Queue.DeclareOk")
-class TestTX1_Select(AsyncTestCase, AsyncAdapters):
- DESCRIPTION="Receive confirmation of Tx.Select"
+class TestTX1_Select(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103
+ DESCRIPTION = "Receive confirmation of Tx.Select"
def begin(self, channel):
channel.tx_select(self.on_complete)
@@ -198,8 +249,8 @@ class TestTX1_Select(AsyncTestCase, AsyncAdapters):
-class TestTX2_Commit(AsyncTestCase, AsyncAdapters):
- DESCRIPTION="Start a transaction, and commit it"
+class TestTX2_Commit(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103
+ DESCRIPTION = "Start a transaction, and commit it"
def begin(self, channel):
channel.tx_select(self.on_selectok)
@@ -213,7 +264,7 @@ class TestTX2_Commit(AsyncTestCase, AsyncAdapters):
self.stop()
-class TestTX2_CommitFailure(AsyncTestCase, AsyncAdapters):
+class TestTX2_CommitFailure(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103
DESCRIPTION = "Close the channel: commit without a TX"
def begin(self, channel):
@@ -226,11 +277,12 @@ class TestTX2_CommitFailure(AsyncTestCase, AsyncAdapters):
def on_selectok(self, frame):
self.assertIsInstance(frame.method, spec.Tx.SelectOk)
- def on_commitok(self, frame):
+ @staticmethod
+ def on_commitok(frame):
raise AssertionError("Should not have received a Tx.CommitOk")
-class TestTX3_Rollback(AsyncTestCase, AsyncAdapters):
+class TestTX3_Rollback(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103
DESCRIPTION = "Start a transaction, then rollback"
def begin(self, channel):
@@ -246,7 +298,7 @@ class TestTX3_Rollback(AsyncTestCase, AsyncAdapters):
-class TestTX3_RollbackFailure(AsyncTestCase, AsyncAdapters):
+class TestTX3_RollbackFailure(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103
DESCRIPTION = "Close the channel: rollback without a TX"
def begin(self, channel):
@@ -256,12 +308,12 @@ class TestTX3_RollbackFailure(AsyncTestCase, AsyncAdapters):
def on_channel_closed(self, channel, reply_code, reply_text):
self.stop()
- def on_commitok(self, frame):
+ @staticmethod
+ def on_commitok(frame):
raise AssertionError("Should not have received a Tx.RollbackOk")
-
-class TestZ_PublishAndConsume(BoundQueueTestCase, AsyncAdapters):
+class TestZ_PublishAndConsume(BoundQueueTestCase, AsyncAdapters): # pylint: disable=C0103
DESCRIPTION = "Publish a message and consume it"
def on_ready(self, frame):
@@ -282,10 +334,11 @@ class TestZ_PublishAndConsume(BoundQueueTestCase, AsyncAdapters):
-class TestZ_PublishAndConsumeBig(BoundQueueTestCase, AsyncAdapters):
+class TestZ_PublishAndConsumeBig(BoundQueueTestCase, AsyncAdapters): # pylint: disable=C0103
DESCRIPTION = "Publish a big message and consume it"
- def _get_msg_body(self):
+ @staticmethod
+ def _get_msg_body():
return '\n'.join(["%s" % i for i in range(0, 2097152)])
def on_ready(self, frame):
@@ -305,7 +358,7 @@ class TestZ_PublishAndConsumeBig(BoundQueueTestCase, AsyncAdapters):
self.channel.basic_cancel(self.on_cancelled, self.ctag)
-class TestZ_PublishAndGet(BoundQueueTestCase, AsyncAdapters):
+class TestZ_PublishAndGet(BoundQueueTestCase, AsyncAdapters): # pylint: disable=C0103
DESCRIPTION = "Publish a message and get it"
def on_ready(self, frame):
@@ -321,13 +374,14 @@ class TestZ_PublishAndGet(BoundQueueTestCase, AsyncAdapters):
self.stop()
-class TestZ_AccessDenied(AsyncTestCase, AsyncAdapters):
+class TestZ_AccessDenied(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103
DESCRIPTION = "Verify that access denied invokes on open error callback"
def start(self, *args, **kwargs):
self.parameters.virtual_host = str(uuid.uuid4())
self.error_captured = False
super(TestZ_AccessDenied, self).start(*args, **kwargs)
+ self.assertTrue(self.error_captured)
def on_open_error(self, connection, error):
self.error_captured = True
@@ -336,7 +390,3 @@ class TestZ_AccessDenied(AsyncTestCase, AsyncAdapters):
def on_open(self, connection):
super(TestZ_AccessDenied, self).on_open(connection)
self.stop()
-
- def tearDown(self):
- self.assertTrue(self.error_captured)
- super(TestZ_AccessDenied, self).tearDown()
diff --git a/tests/acceptance/async_test_base.py b/tests/acceptance/async_test_base.py
index 26c3e1a..eafe72f 100644
--- a/tests/acceptance/async_test_base.py
+++ b/tests/acceptance/async_test_base.py
@@ -1,3 +1,10 @@
+# Suppress pylint warnings concerning attribute defined outside __init__
+# pylint: disable=W0201
+
+# Suppress pylint messages concerning missing docstrings
+# pylint: disable=C0111
+
+from datetime import datetime
import select
import logging
try:
@@ -6,7 +13,9 @@ except ImportError:
import unittest
import platform
-target = platform.python_implementation()
+_TARGET = platform.python_implementation()
+
+import uuid
import pika
from pika import adapters
@@ -24,6 +33,9 @@ class AsyncTestCase(unittest.TestCase):
'amqp://guest:guest@localhost:5672/%2F')
super(AsyncTestCase, self).setUp()
+ def tearDown(self):
+ self._stop()
+
def shortDescription(self):
method_desc = super(AsyncTestCase, self).shortDescription()
if self.DESCRIPTION:
@@ -31,11 +43,12 @@ class AsyncTestCase(unittest.TestCase):
else:
return method_desc
- def begin(self, channel):
+ def begin(self, channel): # pylint: disable=R0201,W0613
"""Extend to start the actual tests on the channel"""
- raise AssertionError("AsyncTestCase.begin_test not extended")
+ self.fail("AsyncTestCase.begin_test not extended")
def start(self, adapter=None):
+ self.logger.info('start at %s', datetime.utcnow())
self.adapter = adapter or self.ADAPTER
self.connection = self.adapter(self.parameters, self.on_open,
@@ -53,19 +66,18 @@ class AsyncTestCase(unittest.TestCase):
def _stop(self):
if hasattr(self, 'timeout') and self.timeout:
+ self.logger.info("Removing timeout")
self.connection.remove_timeout(self.timeout)
self.timeout = None
if hasattr(self, 'connection') and self.connection:
+ self.logger.info("Stopping ioloop")
self.connection.ioloop.stop()
self.connection = None
- def tearDown(self):
- self._stop()
-
def on_closed(self, connection, reply_code, reply_text):
"""called when the connection has finished closing"""
- self.logger.debug('on_closed: %r %r %r', connection,
- reply_code, reply_text)
+ self.logger.info('on_closed: %r %r %r', connection,
+ reply_code, reply_text)
self._stop()
def on_open(self, connection):
@@ -73,29 +85,25 @@ class AsyncTestCase(unittest.TestCase):
self.channel = connection.channel(self.begin)
def on_open_error(self, connection, error):
- self.logger.debug('on_open_error: %r %r', connection, error)
+ self.logger.error('on_open_error: %r %r', connection, error)
connection.ioloop.stop()
raise AssertionError('Error connecting to RabbitMQ')
def on_timeout(self):
"""called when stuck waiting for connection to close"""
+ self.logger.info('on_timeout at %s', datetime.utcnow())
# force the ioloop to stop
- self.logger.debug('on_timeout')
+ self.logger.debug('on_timeout called')
self.connection.ioloop.stop()
raise AssertionError('Test timed out')
class BoundQueueTestCase(AsyncTestCase):
- def tearDown(self):
- """Cleanup auto-declared queue and exchange"""
- self._cconn = self.adapter(self.parameters, self._on_cconn_open,
- self._on_cconn_error, self._on_cconn_closed)
-
def start(self, adapter=None):
# PY3 compat encoding
- self.exchange = 'e' + str(id(self))
- self.queue = 'q' + str(id(self))
+ self.exchange = 'e-' + self.__class__.__name__ + ':' + uuid.uuid1().hex
+ self.queue = 'q-' + self.__class__.__name__ + ':' + uuid.uuid1().hex
self.routing_key = self.__class__.__name__
super(BoundQueueTestCase, self).start(adapter)
@@ -106,82 +114,70 @@ class BoundQueueTestCase(AsyncTestCase):
durable=False,
auto_delete=True)
- def on_exchange_declared(self, frame):
+ def on_exchange_declared(self, frame): # pylint: disable=W0613
self.channel.queue_declare(self.on_queue_declared, self.queue,
passive=False,
durable=False,
exclusive=True,
auto_delete=True,
nowait=False,
- arguments={'x-expires': self.TIMEOUT * 1000}
- )
+ arguments={'x-expires': self.TIMEOUT * 1000})
- def on_queue_declared(self, frame):
+ def on_queue_declared(self, frame): # pylint: disable=W0613
self.channel.queue_bind(self.on_ready, self.queue, self.exchange,
self.routing_key)
def on_ready(self, frame):
raise NotImplementedError
- def _on_cconn_closed(self, cconn, *args, **kwargs):
- cconn.ioloop.stop()
- self._cconn = None
-
- def _on_cconn_error(self, connection):
- connection.ioloop.stop()
- raise AssertionError('Error connecting to RabbitMQ')
-
- def _on_cconn_open(self, connection):
- connection.channel(self._on_cconn_channel)
-
- def _on_cconn_channel(self, channel):
- channel.exchange_delete(None, self.exchange, nowait=True)
- channel.queue_delete(None, self.queue, nowait=True)
- self._cconn.close()
#
# In order to write test cases that will tested using all the Async Adapters
-# write a class that inherits both from one of TestCase classes above and
+# write a class that inherits both from one of TestCase classes above and
# from the AsyncAdapters class below. This allows you to avoid duplicating the
# test methods for each adapter in each test class.
#
class AsyncAdapters(object):
+ def start(self, adapter_class):
+ raise NotImplementedError
+
def select_default_test(self):
"SelectConnection:DefaultPoller"
- select_connection.POLLER_TYPE=None
+ select_connection.POLLER_TYPE = None
self.start(adapters.SelectConnection)
def select_select_test(self):
"SelectConnection:select"
- select_connection.POLLER_TYPE='select'
+ select_connection.POLLER_TYPE = 'select'
self.start(adapters.SelectConnection)
- @unittest.skipIf(not hasattr(select, 'poll')
- or not hasattr(select.poll(), 'modify'), "poll not supported")
+ @unittest.skipIf(
+ not hasattr(select, 'poll') or
+ not hasattr(select.poll(), 'modify'), "poll not supported") # pylint: disable=E1101
def select_poll_test(self):
"SelectConnection:poll"
- select_connection.POLLER_TYPE='poll'
+ select_connection.POLLER_TYPE = 'poll'
self.start(adapters.SelectConnection)
@unittest.skipIf(not hasattr(select, 'epoll'), "epoll not supported")
def select_epoll_test(self):
"SelectConnection:epoll"
- select_connection.POLLER_TYPE='epoll'
+ select_connection.POLLER_TYPE = 'epoll'
self.start(adapters.SelectConnection)
@unittest.skipIf(not hasattr(select, 'kqueue'), "kqueue not supported")
def select_kqueue_test(self):
"SelectConnection:kqueue"
- select_connection.POLLER_TYPE='kqueue'
+ select_connection.POLLER_TYPE = 'kqueue'
self.start(adapters.SelectConnection)
def tornado_test(self):
"TornadoConnection"
self.start(adapters.TornadoConnection)
- @unittest.skipIf(target == 'PyPy', 'PyPy is not supported')
+ @unittest.skipIf(_TARGET == 'PyPy', 'PyPy is not supported')
@unittest.skipIf(adapters.LibevConnection is None, 'pyev is not installed')
def libev_test(self):
"LibevConnection"
diff --git a/tests/unit/channel_tests.py b/tests/unit/channel_tests.py
index a80996a..57c5178 100644
--- a/tests/unit/channel_tests.py
+++ b/tests/unit/channel_tests.py
@@ -2,6 +2,10 @@
Tests for pika.channel.ContentFrameDispatcher
"""
+
+# Disable pylint warning about Access to a protected member
+# pylint: disable=W0212
+
import collections
import logging
@@ -16,17 +20,35 @@ except ImportError:
import unittest
import warnings
+from pika import callback
from pika import channel
+from pika import connection
from pika import exceptions
from pika import frame
from pika import spec
+class ConnectionTemplate(connection.Connection):
+ """Template for using as mock spec_set for the pika Connection class. It
+ defines members accessed by the code under test that would be defined in the
+ base class's constructor.
+ """
+ callbacks = None
+
+ # Suppress pylint warnings about specific abstract methods not being
+ # overridden
+ _adapter_connect = connection.Connection._adapter_connect
+ _adapter_disconnect = connection.Connection._adapter_disconnect
+ _flush_outbound = connection.Connection._flush_outbound
+ add_timeout = connection.Connection.add_timeout
+ remove_timeout = connection.Connection.remove_timeout
+
+
class ChannelTests(unittest.TestCase):
- @mock.patch('pika.connection.Connection')
- def _create_connection(self, connection=None):
- return connection
+ @mock.patch('pika.connection.Connection', autospec=ConnectionTemplate)
+ def _create_connection(self, connectionClassMock=None):
+ return connectionClassMock()
def setUp(self):
self.connection = self._create_connection()
@@ -440,14 +462,12 @@ class ChannelTests(unittest.TestCase):
self.obj.confirm_delivery(logging.debug)
self.obj.callbacks.add.assert_called_with(*expectation, arguments=None)
- def test_confirm_delivery_callback_with_nowait(self):
+ def test_confirm_delivery_callback_with_nowait_raises_value_error(self):
self.obj._set_state(self.obj.OPEN)
expectation = [self.obj.channel_number, spec.Confirm.SelectOk,
self.obj._on_selectok]
- self.obj.confirm_delivery(logging.debug, True)
- self.assertNotIn(mock.call(*expectation,
- arguments=None),
- self.obj.callbacks.add.call_args_list)
+ with self.assertRaises(ValueError):
+ self.obj.confirm_delivery(logging.debug, True)
def test_confirm_delivery_callback_basic_ack(self):
self.obj._set_state(self.obj.OPEN)
@@ -847,7 +867,6 @@ class ChannelTests(unittest.TestCase):
def test_add_callbacks_basic_get_empty_added(self):
self.obj._add_callbacks()
- print(self.obj.callbacks.add.__dict__)
self.obj.callbacks.add.assert_any_call(self.obj.channel_number,
spec.Basic.GetEmpty,
self.obj._on_getempty, False)
@@ -1153,20 +1172,22 @@ class ChannelTests(unittest.TestCase):
def test_rpc_raises_channel_closed(self):
self.assertRaises(exceptions.ChannelClosed, self.obj._rpc,
- frame.Method(self.obj.channel_number,
- spec.Basic.Ack(1)))
+ spec.Basic.Cancel('tag_abc'))
def test_rpc_while_blocking_appends_blocked_collection(self):
self.obj._set_state(self.obj.OPEN)
self.obj._blocking = spec.Confirm.Select()
- expectation = [frame.Method(self.obj.channel_number, spec.Basic.Ack(1)),
- 'Foo', None]
+ acceptable_replies = [
+ (spec.Basic.CancelOk, {'consumer_tag': 'tag_abc'})]
+ expectation = [spec.Basic.Cancel('tag_abc'), lambda *args: None,
+ acceptable_replies]
self.obj._rpc(*expectation)
self.assertIn(expectation, self.obj._blocked)
def test_rpc_throws_value_error_with_unacceptable_replies(self):
self.obj._set_state(self.obj.OPEN)
- self.assertRaises(TypeError, self.obj._rpc, spec.Basic.Ack(1),
+ self.assertRaises(TypeError, self.obj._rpc,
+ spec.Basic.Cancel('tag_abc'),
logging.debug, 'Foo')
def test_rpc_throws_type_error_with_invalid_callback(self):
@@ -1174,15 +1195,27 @@ class ChannelTests(unittest.TestCase):
self.assertRaises(TypeError, self.obj._rpc, spec.Channel.Open(1),
['foo'], [spec.Channel.OpenOk])
- def test_rpc_adds_on_synchronous_complete(self):
+ def test_rpc_enters_blocking_and_adds_on_synchronous_complete(self):
self.obj._set_state(self.obj.OPEN)
method_frame = spec.Channel.Open()
self.obj._rpc(method_frame, None, [spec.Channel.OpenOk])
+ self.assertEqual(self.obj._blocking, method_frame.NAME)
self.obj.callbacks.add.assert_called_with(
self.obj.channel_number, spec.Channel.OpenOk,
self.obj._on_synchronous_complete,
arguments=None)
+ def test_rpc_not_blocking_and_no_on_synchronous_complete_when_no_replies(self):
+ self.obj._set_state(self.obj.OPEN)
+ method_frame = spec.Channel.Open()
+ self.obj._rpc(method_frame, None, acceptable_replies=[])
+ self.assertIsNone(self.obj._blocking)
+ with self.assertRaises(AssertionError):
+ self.obj.callbacks.add.assert_called_with(
+ mock.ANY, mock.ANY,
+ self.obj._on_synchronous_complete,
+ arguments=mock.ANY)
+
def test_rpc_adds_callback(self):
self.obj._set_state(self.obj.OPEN)
method_frame = spec.Channel.Open()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 2
} | 0.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"yapf",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libev-dev"
],
"python": "3.5",
"reqs_path": [
"test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
nose==1.3.7
packaging==21.3
-e git+https://github.com/pika/pika.git@d8a782d97579cd96ed67ccfb55f63ca8fdafa199#egg=pika
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
tomli==1.2.3
tornado==6.1
Twisted==15.3.0
typing_extensions==4.1.1
urllib3==1.26.20
yapf==0.32.0
zipp==3.6.0
zope.interface==5.5.2
| name: pika
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- tornado==6.1
- twisted==15.3.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- yapf==0.32.0
- zipp==3.6.0
- zope-interface==5.5.2
prefix: /opt/conda/envs/pika
| [
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_with_nowait_raises_value_error",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_not_blocking_and_no_on_synchronous_complete_when_no_replies"
]
| []
| [
"tests/unit/channel_tests.py::ChannelTests::test_add_callback",
"tests/unit/channel_tests.py::ChannelTests::test_add_callback_multiple_replies",
"tests/unit/channel_tests.py::ChannelTests::test_add_callbacks_basic_cancel_empty_added",
"tests/unit/channel_tests.py::ChannelTests::test_add_callbacks_basic_get_empty_added",
"tests/unit/channel_tests.py::ChannelTests::test_add_callbacks_channel_close_added",
"tests/unit/channel_tests.py::ChannelTests::test_add_callbacks_channel_flow_added",
"tests/unit/channel_tests.py::ChannelTests::test_add_on_cancel_callback",
"tests/unit/channel_tests.py::ChannelTests::test_add_on_close_callback",
"tests/unit/channel_tests.py::ChannelTests::test_add_on_flow_callback",
"tests/unit/channel_tests.py::ChannelTests::test_add_on_return_callback",
"tests/unit/channel_tests.py::ChannelTests::test_basic_ack_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_callback_appended",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_calls_validate",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_channel_cancelled_appended",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_no_consumer_tag",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_on_cancel_appended",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_raises_value_error",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_then_close",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_calls_validate",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumer_tag",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumer_tag_cancelled_full",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumer_tag_in_consumers",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumers_callback_value",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumers_pending_list_is_empty",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumers_rpc_called",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_duplicate_consumer_tag_raises",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_has_pending_list",
"tests/unit/channel_tests.py::ChannelTests::test_basic_get_callback",
"tests/unit/channel_tests.py::ChannelTests::test_basic_get_calls_validate",
"tests/unit/channel_tests.py::ChannelTests::test_basic_get_send_method_called",
"tests/unit/channel_tests.py::ChannelTests::test_basic_nack_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_nack_send_method_request",
"tests/unit/channel_tests.py::ChannelTests::test_basic_publish_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_publish_send_method_request",
"tests/unit/channel_tests.py::ChannelTests::test_basic_qos_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_qos_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_basic_recover_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_recover_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_basic_reject_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_reject_send_method_request",
"tests/unit/channel_tests.py::ChannelTests::test_basic_send_method_calls_rpc",
"tests/unit/channel_tests.py::ChannelTests::test_channel_open_add_callbacks_called",
"tests/unit/channel_tests.py::ChannelTests::test_cleanup",
"tests/unit/channel_tests.py::ChannelTests::test_close_basic_cancel_called",
"tests/unit/channel_tests.py::ChannelTests::test_close_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_close_state",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_basic_ack",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_basic_nack",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_without_nowait_selectok",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_no_callback_callback_call_count",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_no_callback_no_basic_ack_callback",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_no_callback_no_basic_nack_callback",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_raises_method_not_implemented_for_confirms",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_raises_method_not_implemented_for_nack",
"tests/unit/channel_tests.py::ChannelTests::test_consumer_tags",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_bind_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_bind_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_bind_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_bind_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_with_type_arg_assigns_to_exchange_type",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_with_type_arg_raises_deprecation_warning",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_delete_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_delete_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_delete_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_delete_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_unbind_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_unbind_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_unbind_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_unbind_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_flow_off_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_flow_on_flowok_callback",
"tests/unit/channel_tests.py::ChannelTests::test_flow_on_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_flow_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_flow_raises_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_get_pending_message",
"tests/unit/channel_tests.py::ChannelTests::test_get_pending_message_item_popped",
"tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_basic_deliver_called",
"tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_basic_get_called",
"tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_basic_return_called",
"tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_method_returns_none",
"tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_sets_header_frame",
"tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_sets_method_frame",
"tests/unit/channel_tests.py::ChannelTests::test_has_content_false",
"tests/unit/channel_tests.py::ChannelTests::test_has_content_true",
"tests/unit/channel_tests.py::ChannelTests::test_immediate_called_logger_warning",
"tests/unit/channel_tests.py::ChannelTests::test_init_blocked",
"tests/unit/channel_tests.py::ChannelTests::test_init_blocking",
"tests/unit/channel_tests.py::ChannelTests::test_init_callbacks",
"tests/unit/channel_tests.py::ChannelTests::test_init_cancelled",
"tests/unit/channel_tests.py::ChannelTests::test_init_channel_number",
"tests/unit/channel_tests.py::ChannelTests::test_init_connection",
"tests/unit/channel_tests.py::ChannelTests::test_init_consumers",
"tests/unit/channel_tests.py::ChannelTests::test_init_frame_dispatcher",
"tests/unit/channel_tests.py::ChannelTests::test_init_has_on_flow_callback",
"tests/unit/channel_tests.py::ChannelTests::test_init_invalid_channel_number",
"tests/unit/channel_tests.py::ChannelTests::test_init_on_flowok_callback",
"tests/unit/channel_tests.py::ChannelTests::test_init_on_getok_callback",
"tests/unit/channel_tests.py::ChannelTests::test_init_on_openok_callback",
"tests/unit/channel_tests.py::ChannelTests::test_init_pending",
"tests/unit/channel_tests.py::ChannelTests::test_init_state",
"tests/unit/channel_tests.py::ChannelTests::test_is_closed_false",
"tests/unit/channel_tests.py::ChannelTests::test_is_closed_true",
"tests/unit/channel_tests.py::ChannelTests::test_is_closing_false",
"tests/unit/channel_tests.py::ChannelTests::test_is_closing_true",
"tests/unit/channel_tests.py::ChannelTests::test_on_cancel_not_appended_cancelled",
"tests/unit/channel_tests.py::ChannelTests::test_on_cancel_removed_consumer",
"tests/unit/channel_tests.py::ChannelTests::test_on_cancelok_removed_consumer",
"tests/unit/channel_tests.py::ChannelTests::test_on_cancelok_removed_pending",
"tests/unit/channel_tests.py::ChannelTests::test_on_close_warning",
"tests/unit/channel_tests.py::ChannelTests::test_on_confirm_selectok",
"tests/unit/channel_tests.py::ChannelTests::test_on_deliver_callback_called",
"tests/unit/channel_tests.py::ChannelTests::test_on_deliver_pending_callbacks_called",
"tests/unit/channel_tests.py::ChannelTests::test_on_deliver_pending_called",
"tests/unit/channel_tests.py::ChannelTests::test_on_eventok",
"tests/unit/channel_tests.py::ChannelTests::test_on_flow",
"tests/unit/channel_tests.py::ChannelTests::test_on_flow_with_callback",
"tests/unit/channel_tests.py::ChannelTests::test_on_flowok",
"tests/unit/channel_tests.py::ChannelTests::test_on_flowok_callback_reset",
"tests/unit/channel_tests.py::ChannelTests::test_on_flowok_calls_callback",
"tests/unit/channel_tests.py::ChannelTests::test_on_getempty",
"tests/unit/channel_tests.py::ChannelTests::test_on_getok_callback_called",
"tests/unit/channel_tests.py::ChannelTests::test_on_getok_callback_reset",
"tests/unit/channel_tests.py::ChannelTests::test_on_getok_no_callback",
"tests/unit/channel_tests.py::ChannelTests::test_on_openok_callback_called",
"tests/unit/channel_tests.py::ChannelTests::test_on_openok_no_callback",
"tests/unit/channel_tests.py::ChannelTests::test_on_synchronous_complete",
"tests/unit/channel_tests.py::ChannelTests::test_onreturn",
"tests/unit/channel_tests.py::ChannelTests::test_onreturn_warning",
"tests/unit/channel_tests.py::ChannelTests::test_queue_bind_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_queue_bind_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_queue_bind_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_queue_bind_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_queue_declare_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_queue_declare_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_queue_declare_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_queue_declare_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_queue_delete_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_queue_delete_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_queue_delete_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_queue_delete_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_queue_purge_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_queue_purge_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_queue_purge_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_queue_purge_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_queue_unbind_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_queue_unbind_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_queue_unbind_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_adds_callback",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_enters_blocking_and_adds_on_synchronous_complete",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_throws_type_error_with_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_throws_value_error_with_unacceptable_replies",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_while_blocking_appends_blocked_collection",
"tests/unit/channel_tests.py::ChannelTests::test_send_method",
"tests/unit/channel_tests.py::ChannelTests::test_set_state",
"tests/unit/channel_tests.py::ChannelTests::test_tx_commit_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_tx_commit_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_tx_rollback_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_tx_select_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_validate_channel_and_callback_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_validate_channel_and_callback_raises_value_error_not_callable"
]
| []
| BSD 3-Clause "New" or "Revised" License | 342 | [
"pika/channel.py",
"pika/adapters/libev_connection.py"
]
| [
"pika/channel.py",
"pika/adapters/libev_connection.py"
]
|
jupyter-incubator__sparkmagic-84 | 3c0230b30b1f63780fbf2d4ba9d83e0a83f51eea | 2015-12-19 08:44:24 | 3c0230b30b1f63780fbf2d4ba9d83e0a83f51eea | diff --git a/remotespark/default_config.json b/remotespark/default_config.json
index 3be83bf..ea4adaa 100644
--- a/remotespark/default_config.json
+++ b/remotespark/default_config.json
@@ -47,5 +47,10 @@
"fatal_error_suggestion": "The code failed because of a fatal error:\n\t{}.\n\nSome things to try:\na) Make sure Spark has enough available resources for Jupyter to create a Spark context.\nb) Contact your Jupyter administrator to make sure the Spark magics library is configured correctly.\nc) Restart the kernel.",
- "ignore_ssl_errors": false
+ "ignore_ssl_errors": false,
+
+ "session_configs": {
+ "driverMemory": "1000M",
+ "executorCores": 2
+ }
}
diff --git a/remotespark/livyclientlib/clientmanagerstateserializer.py b/remotespark/livyclientlib/clientmanagerstateserializer.py
index 8a04bce..f4615ea 100644
--- a/remotespark/livyclientlib/clientmanagerstateserializer.py
+++ b/remotespark/livyclientlib/clientmanagerstateserializer.py
@@ -38,11 +38,11 @@ class ClientManagerStateSerializer(object):
name = client["name"]
session_id = client["id"]
sql_context_created = client["sqlcontext"]
- language = client["language"]
+ kind = client["kind"].lower()
connection_string = client["connectionstring"]
session = self._client_factory.create_session(
- language, connection_string, session_id, sql_context_created)
+ connection_string, session_id, sql_context_created, {"kind": kind})
# Do not start session automatically. Just create it but skip is not existent.
try:
@@ -50,7 +50,7 @@ class ClientManagerStateSerializer(object):
status = session.status
if not session.is_final_status(status):
self.logger.debug("Adding session {}".format(session_id))
- client_obj = self._client_factory.build_client(language, session)
+ client_obj = self._client_factory.build_client(session)
clients_to_return.append((name, client_obj))
else:
self.logger.error("Skipping serialized session '{}' because session was in status {}."
diff --git a/remotespark/livyclientlib/livyclient.py b/remotespark/livyclientlib/livyclient.py
index 6632b2a..f216c44 100644
--- a/remotespark/livyclientlib/livyclient.py
+++ b/remotespark/livyclientlib/livyclient.py
@@ -34,8 +34,8 @@ class LivyClient(object):
self._session.delete()
@property
- def language(self):
- return self._session.language
+ def kind(self):
+ return self._session.kind
@property
def session_id(self):
diff --git a/remotespark/livyclientlib/livyclientfactory.py b/remotespark/livyclientlib/livyclientfactory.py
index 7599ddd..f7b2ae3 100644
--- a/remotespark/livyclientlib/livyclientfactory.py
+++ b/remotespark/livyclientlib/livyclientfactory.py
@@ -17,23 +17,24 @@ class LivyClientFactory(object):
self.logger = Log("LivyClientFactory")
self.max_results = 2500
- def build_client(self, language, session):
+ def build_client(self, session):
assert session is not None
+ kind = session.kind
- if language == Constants.lang_python:
+ if kind == Constants.session_kind_pyspark:
return PandasPysparkLivyClient(session, self.max_results)
- elif language == Constants.lang_scala:
+ elif kind == Constants.session_kind_spark:
return PandasScalaLivyClient(session, self.max_results)
else:
- raise ValueError("Language '{}' is not supported.".format(language))
+ raise ValueError("Kind '{}' is not supported.".format(kind))
@staticmethod
- def create_session(language, connection_string, session_id="-1", sql_created=False):
+ def create_session(connection_string, properties, session_id="-1", sql_created=False):
cso = get_connection_string_elements(connection_string)
retry_policy = LinearRetryPolicy(seconds_to_sleep=5, max_retries=5)
http_client = LivyReliableHttpClient(cso.url, cso.username, cso.password, retry_policy)
- session = LivySession(http_client, language, session_id, sql_created)
+ session = LivySession(http_client, session_id, sql_created, properties)
return session
diff --git a/remotespark/livyclientlib/livysession.py b/remotespark/livyclientlib/livysession.py
index 247ab84..7c8efaf 100644
--- a/remotespark/livyclientlib/livysession.py
+++ b/remotespark/livyclientlib/livysession.py
@@ -7,7 +7,6 @@ from time import sleep, time
import remotespark.utils.configuration as conf
from remotespark.utils.constants import Constants
from remotespark.utils.log import Log
-from remotespark.utils.utils import get_instance_id
from .livyclienttimeouterror import LivyClientTimeoutError
from .livyunexpectedstatuserror import LivyUnexpectedStatusError
from .livysessionstate import LivySessionState
@@ -15,9 +14,12 @@ from .livysessionstate import LivySessionState
class LivySession(object):
"""Session that is livy specific."""
- # TODO(aggftw): make threadsafe
- def __init__(self, http_client, language, session_id, sql_created):
+ def __init__(self, http_client, session_id, sql_created, properties):
+ assert "kind" in properties.keys()
+ kind = properties["kind"]
+ self.properties = properties
+
status_sleep_seconds = conf.status_sleep_seconds()
statement_sleep_seconds = conf.statement_sleep_seconds()
create_sql_context_timeout_seconds = conf.create_sql_context_timeout_seconds()
@@ -30,10 +32,10 @@ class LivySession(object):
self.logger = Log("LivySession")
- language = language.lower()
- if language not in Constants.lang_supported:
- raise ValueError("Session of language '{}' not supported. Session must be of languages {}."
- .format(language, ", ".join(Constants.lang_supported)))
+ kind = kind.lower()
+ if kind not in Constants.session_kinds_supported:
+ raise ValueError("Session of kind '{}' not supported. Session must be of kinds {}."
+ .format(kind, ", ".join(Constants.session_kinds_supported)))
if session_id == "-1":
self._status = Constants.not_started_session_status
@@ -41,45 +43,44 @@ class LivySession(object):
else:
self._status = Constants.busy_session_status
+ self._logs = ""
self._http_client = http_client
self._status_sleep_seconds = status_sleep_seconds
self._statement_sleep_seconds = statement_sleep_seconds
self._create_sql_context_timeout_seconds = create_sql_context_timeout_seconds
self._state = LivySessionState(session_id, http_client.connection_string,
- language, sql_created)
+ kind, sql_created)
def get_state(self):
return self._state
def start(self):
"""Start the session against actual livy server."""
- # TODO(aggftw): do a pass to make all contracts variables; i.e. not peppered in code
- self.logger.debug("Starting '{}' session.".format(self.language))
+ self.logger.debug("Starting '{}' session.".format(self.kind))
- app_name = "remotesparkmagics_{}".format(get_instance_id())
- r = self._http_client.post("/sessions", [201], {"kind": self._get_livy_kind(), "name": app_name})
+ r = self._http_client.post("/sessions", [201], self.properties)
self._state.session_id = str(r.json()["id"])
self._status = str(r.json()["state"])
- self.logger.debug("Session '{}' started.".format(self.language))
+ self.logger.debug("Session '{}' started.".format(self.kind))
def create_sql_context(self):
"""Create a sqlContext object on the session. Object will be accessible via variable 'sqlContext'."""
if self.started_sql_context:
return
- self.logger.debug("Starting '{}' sql and hive session.".format(self.language))
+ self.logger.debug("Starting '{}' sql and hive session.".format(self.kind))
- self._create_context("sql")
- self._create_context("hive")
+ self._create_context(Constants.context_name_sql)
+ self._create_context(Constants.context_name_hive)
self._state.sql_context_created = True
def _create_context(self, context_type):
- if context_type == "sql":
+ if context_type == Constants.context_name_sql:
command = self._get_sql_context_creation_command()
- elif context_type == "hive":
+ elif context_type == Constants.context_name_hive:
command = self._get_hive_context_creation_command()
else:
raise ValueError("Cannot create context of type {}.".format(context_type))
@@ -87,7 +88,7 @@ class LivySession(object):
try:
self.wait_for_idle(self._create_sql_context_timeout_seconds)
self.execute(command)
- self.logger.debug("Started '{}' {} session.".format(self.language, context_type))
+ self.logger.debug("Started '{}' {} session.".format(self.kind, context_type))
except LivyClientTimeoutError:
raise LivyClientTimeoutError("Failed to create the {} context in time. Timed out after {} seconds."
.format(context_type, self._create_sql_context_timeout_seconds))
@@ -101,8 +102,8 @@ class LivySession(object):
return self._state.sql_context_created
@property
- def language(self):
- return self._state.language
+ def kind(self):
+ return self._state.kind
def refresh_status(self):
(status, logs) = self._get_latest_status_and_logs()
@@ -124,7 +125,6 @@ class LivySession(object):
return status in Constants.final_status
def execute(self, commands):
- """Executes commands in session."""
code = textwrap.dedent(commands)
data = {"code": code}
@@ -134,7 +134,6 @@ class LivySession(object):
return self._get_statement_output(statement_id)
def delete(self):
- """Deletes the session and releases any resources."""
self.logger.debug("Deleting session '{}'".format(self.id))
if self._status != Constants.not_started_session_status and self._status != Constants.dead_session_status:
@@ -147,7 +146,11 @@ class LivySession(object):
def wait_for_idle(self, seconds_to_wait):
"""Wait for session to go to idle status. Sleep meanwhile. Calls done every status_sleep_seconds as
- indicated by the constructor."""
+ indicated by the constructor.
+
+ Parameters:
+ seconds_to_wait : number of seconds to wait before giving up.
+ """
self.refresh_status()
current_status = self._status
@@ -187,11 +190,11 @@ class LivySession(object):
.format(self.id, len(filtered_sessions)))
session = filtered_sessions[0]
- return (session['state'], session['log'])
+ return session['state'], session['log']
def _get_statement_output(self, statement_id):
statement_running = True
- output = ""
+ out = ""
while statement_running:
r = self._http_client.get(self._statements_url(), [200])
statement = [i for i in r.json()["statements"] if i["id"] == statement_id][0]
@@ -208,36 +211,31 @@ class LivySession(object):
if statement_output["status"] == "ok":
out = (True, statement_output["data"]["text/plain"])
elif statement_output["status"] == "error":
- out = (False, statement_output["evalue"] + "\n" + \
- "".join(statement_output["traceback"]))
- return out
+ out = (False, statement_output["evalue"] + "\n" +
+ "".join(statement_output["traceback"]))
+ else:
+ raise ValueError("Unknown output status: '{}'".format(statement_output["status"]))
- def _get_livy_kind(self):
- if self.language == Constants.lang_scala:
- return Constants.session_kind_spark
- elif self.language == Constants.lang_python:
- return Constants.session_kind_pyspark
- else:
- raise ValueError("Cannot get session kind for {}.".format(self.language))
+ return out
def _get_sql_context_creation_command(self):
- if self.language == Constants.lang_scala:
+ if self.kind == Constants.session_kind_spark:
sql_context_command = "val sqlContext = new org.apache.spark.sql.SQLContext(sc)\n" \
"import sqlContext.implicits._"
- elif self.language == Constants.lang_python:
+ elif self.kind == Constants.session_kind_pyspark:
sql_context_command = "from pyspark.sql import SQLContext\nfrom pyspark.sql.types import *\n" \
"sqlContext = SQLContext(sc)"
else:
- raise ValueError("Do not know how to create sqlContext in session of language {}.".format(self.language))
+ raise ValueError("Do not know how to create sqlContext in session of kind {}.".format(self.kind))
return sql_context_command
def _get_hive_context_creation_command(self):
- if self.language == Constants.lang_scala:
+ if self.kind == Constants.session_kind_spark:
hive_context_command = "val hiveContext = new org.apache.spark.sql.hive.HiveContext(sc)"
- elif self.language == Constants.lang_python:
+ elif self.kind == Constants.session_kind_pyspark:
hive_context_command = "from pyspark.sql import HiveContext\nhiveContext = HiveContext(sc)"
else:
- raise ValueError("Do not know how to create hiveContext in session of language {}.".format(self.language))
+ raise ValueError("Do not know how to create hiveContext in session of kind {}.".format(self.kind))
return hive_context_command
diff --git a/remotespark/livyclientlib/livysessionstate.py b/remotespark/livyclientlib/livysessionstate.py
index 66654f1..c86dc97 100644
--- a/remotespark/livyclientlib/livysessionstate.py
+++ b/remotespark/livyclientlib/livysessionstate.py
@@ -3,9 +3,9 @@
class LivySessionState(object):
- def __init__(self, session_id, connection_string, language, sql_context_created, version="0.0.0"):
+ def __init__(self, session_id, connection_string, kind, sql_context_created, version="0.0.0"):
self._session_id = session_id
- self._language = language
+ self._kind = kind
self._sql_context_created = sql_context_created
self._version = version
self._connection_string = connection_string
@@ -19,8 +19,8 @@ class LivySessionState(object):
self._session_id = value
@property
- def language(self):
- return self._language
+ def kind(self):
+ return self._kind
@property
def sql_context_created(self):
@@ -39,5 +39,5 @@ class LivySessionState(object):
return self._connection_string
def to_dict(self):
- return {"id": self.session_id, "language": self.language, "sqlcontext": self.sql_context_created,
+ return {"id": self.session_id, "kind": self.kind, "sqlcontext": self.sql_context_created,
"version": self.version, "connectionstring": self.connection_string}
diff --git a/remotespark/livyclientlib/sparkcontroller.py b/remotespark/livyclientlib/sparkcontroller.py
index b3aa9d0..b5ef94a 100644
--- a/remotespark/livyclientlib/sparkcontroller.py
+++ b/remotespark/livyclientlib/sparkcontroller.py
@@ -38,14 +38,14 @@ class SparkController(object):
def delete_session(self, name):
self.client_manager.delete_client(name)
- def add_session(self, name, language, connection_string, skip_if_exists):
+ def add_session(self, name, connection_string, skip_if_exists, properties):
if skip_if_exists and (name in self.client_manager.get_sessions_list()):
self.logger.debug("Skipping {} because it already exists in list of sessions.".format(name))
return
- session = self.client_factory.create_session(language, connection_string, "-1", False)
+ session = self.client_factory.create_session(connection_string, properties, "-1", False)
session.start()
- livy_client = self.client_factory.build_client(language, session)
+ livy_client = self.client_factory.build_client(session)
self.client_manager.add_client(name, livy_client)
def get_client_keys(self):
diff --git a/remotespark/remotesparkmagics.py b/remotespark/remotesparkmagics.py
index ee17f21..821eef8 100644
--- a/remotespark/remotesparkmagics.py
+++ b/remotespark/remotesparkmagics.py
@@ -7,6 +7,8 @@ Provides the %spark magic."""
from __future__ import print_function
from IPython.core.magic import Magics, magics_class, line_cell_magic, needs_local_scope
from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring
+import json
+import copy
import remotespark.utils.configuration as conf
from remotespark.utils.constants import Constants
@@ -41,6 +43,8 @@ class RemoteSparkMagics(Magics):
except KeyError:
self.logger.error("Could not read env vars for serialization.")
+ self.properties = conf.session_configs()
+
self.logger.debug("Initialized spark magics.")
@magic_arguments()
@@ -57,7 +61,7 @@ class RemoteSparkMagics(Magics):
@argument("command", type=str, default=[""], nargs="*", help="Commands to execute.")
@needs_local_scope
@line_cell_magic
- def spark(self, line, cell="", local_ns={}):
+ def spark(self, line, cell="", local_ns=None):
"""Magic to execute spark remotely.
This magic allows you to create a Livy Scala or Python session against a Livy endpoint. Every session can
@@ -68,21 +72,28 @@ class RemoteSparkMagics(Magics):
Subcommands
-----------
info
- Display the mode and available Livy sessions.
+ Display the available Livy sessions and other configurations for sessions.
add
Add a Livy session. First argument is the name of the session, second argument
is the language, and third argument is the connection string of the Livy endpoint.
A fourth argument specifying if session creation can be skipped if it already exists is optional:
"skip" or empty.
- e.g. `%%spark add test python url=https://sparkcluster.example.net/livy;username=admin;password=MyPassword skip`
+ e.g. `%%spark add test python url=https://sparkcluster.net/livy;username=u;password=p skip`
or
- e.g. `%%spark add test python url=https://sparkcluster.example.net/livy;username=admin;password=MyPassword`
+ e.g. `%%spark add test python url=https://sparkcluster.net/livy;username=u;password=p`
+ config
+ Override the livy session properties sent to Livy on session creation. All session creations will
+ contain these config settings from then on.
+ Expected value is a JSON key-value string to be sent as part of the Request Body for the POST /sessions
+ endpoint in Livy.
+ e.g. `%%spark config {"driverMemory":"1000M", "executorCores":4}`
run
Run Spark code against a session.
e.g. `%%spark -e testsession` will execute the cell code against the testsession previously created
e.g. `%%spark -e testsession -c sql` will execute the SQL code against the testsession previously created
- e.g. `%%spark -e testsession -c sql -o my_var` will execute the SQL code against the testsession previously
- created and store the pandas dataframe created in the my_var variable in the Python environment
+ e.g. `%%spark -e testsession -c sql -o my_var` will execute the SQL code against the testsession
+ previously created and store the pandas dataframe created in the my_var variable in the
+ Python environment.
delete
Delete a Livy session. Argument is the name of the session to be deleted.
e.g. `%%spark delete defaultlivy`
@@ -99,18 +110,29 @@ class RemoteSparkMagics(Magics):
# info
if subcommand == "info":
self._print_info()
+ # config
+ elif subcommand == "config":
+ # Would normally do " ".join(args.command[1:]) but parse_argstring removes quotes...
+ rest_of_line = user_input[7:]
+ self.properties = json.loads(rest_of_line)
# add
elif subcommand == "add":
if len(args.command) != 4 and len(args.command) != 5:
raise ValueError("Subcommand 'add' requires three or four arguments. {}".format(usage))
+
name = args.command[1].lower()
- language = args.command[2]
+ language = args.command[2].lower()
connection_string = args.command[3]
+
if len(args.command) == 5:
skip = args.command[4].lower() == "skip"
else:
skip = False
- self.spark_controller.add_session(name, language, connection_string, skip)
+
+ properties = copy.deepcopy(self.properties)
+ properties["kind"] = self._get_livy_kind(language)
+
+ self.spark_controller.add_session(name, connection_string, skip, properties)
# delete
elif subcommand == "delete":
if len(args.command) != 2:
@@ -151,7 +173,21 @@ class RemoteSparkMagics(Magics):
return None
def _print_info(self):
- print("Info for running Spark:\n\t{}\n".format(self.spark_controller.get_client_keys()))
+ print("""Info for running Spark:
+ Sessions:
+ {}
+ Session configs:
+ {}
+""".format(self.spark_controller.get_client_keys(), self.properties))
+
+ @staticmethod
+ def _get_livy_kind(language):
+ if language == Constants.lang_scala:
+ return Constants.session_kind_spark
+ elif language == Constants.lang_python:
+ return Constants.session_kind_pyspark
+ else:
+ raise ValueError("Cannot get session kind for {}.".format(language))
def load_ipython_extension(ip):
diff --git a/remotespark/utils/configuration.py b/remotespark/utils/configuration.py
index cdef740..df09e52 100644
--- a/remotespark/utils/configuration.py
+++ b/remotespark/utils/configuration.py
@@ -68,6 +68,10 @@ def _override(f):
# value of that configuration if there is any such configuration. Otherwise,
# these functions return the default values described in their bodies.
+@_override
+def session_configs():
+ return {}
+
@_override
def serialize():
return False
diff --git a/remotespark/utils/constants.py b/remotespark/utils/constants.py
index 197bd65..6d6a1cc 100644
--- a/remotespark/utils/constants.py
+++ b/remotespark/utils/constants.py
@@ -7,6 +7,7 @@ class Constants:
session_kind_spark = "spark"
session_kind_pyspark = "pyspark"
+ session_kinds_supported = [session_kind_spark, session_kind_pyspark]
context_name_spark = "spark"
context_name_sql = "sql"
| Allow user to specify memory/cores/etc for every session | jupyter-incubator/sparkmagic | diff --git a/tests/test_clientmanagerstateserializer.py b/tests/test_clientmanagerstateserializer.py
index 4310058..4ed8ed7 100644
--- a/tests/test_clientmanagerstateserializer.py
+++ b/tests/test_clientmanagerstateserializer.py
@@ -27,7 +27,7 @@ def test_deserialize_not_emtpy():
"name": "py",
"id": "1",
"sqlcontext": true,
- "language": "python",
+ "kind": "pyspark",
"connectionstring": "url=https://mysite.com/livy;username=user;password=pass",
"version": "0.0.0"
},
@@ -35,7 +35,7 @@ def test_deserialize_not_emtpy():
"name": "sc",
"id": "2",
"sqlcontext": false,
- "language": "scala",
+ "kind": "spark",
"connectionstring": "url=https://mysite.com/livy;username=user;password=pass",
"version": "0.0.0"
}
@@ -50,17 +50,15 @@ def test_deserialize_not_emtpy():
(name, client) = deserialized[0]
assert name == "py"
- client_factory.create_session.assert_any_call("python",
- "url=https://mysite.com/livy;username=user;password=pass",
- "1", True)
- client_factory.build_client.assert_any_call("python", session)
+ client_factory.create_session.assert_any_call("url=https://mysite.com/livy;username=user;password=pass",
+ "1", True, {"kind":"pyspark"})
+ client_factory.build_client.assert_any_call(session)
(name, client) = deserialized[1]
assert name == "sc"
- client_factory.create_session.assert_any_call("scala",
- "url=https://mysite.com/livy;username=user;password=pass",
- "2", False)
- client_factory.build_client.assert_any_call("scala", session)
+ client_factory.create_session.assert_any_call("url=https://mysite.com/livy;username=user;password=pass",
+ "2", False, {"kind":"spark"})
+ client_factory.build_client.assert_any_call(session)
def test_deserialize_not_emtpy_but_dead():
@@ -75,7 +73,7 @@ def test_deserialize_not_emtpy_but_dead():
"name": "py",
"id": "1",
"sqlcontext": true,
- "language": "python",
+ "kind": "pyspark",
"connectionstring": "url=https://mysite.com/livy;username=user;password=pass",
"version": "0.0.0"
},
@@ -83,7 +81,7 @@ def test_deserialize_not_emtpy_but_dead():
"name": "sc",
"id": "2",
"sqlcontext": false,
- "language": "scala",
+ "kind": "spark",
"connectionstring": "url=https://mysite.com/livy;username=user;password=pass",
"version": "0.0.0"
}
@@ -113,7 +111,7 @@ def test_deserialize_not_emtpy_but_error():
"name": "py",
"id": "1",
"sqlcontext": true,
- "language": "python",
+ "kind": "pyspark",
"connectionstring": "url=https://mysite.com/livy;username=user;password=pass",
"version": "0.0.0"
},
@@ -121,7 +119,7 @@ def test_deserialize_not_emtpy_but_error():
"name": "sc",
"id": "2",
"sqlcontext": false,
- "language": "scala",
+ "kind": "spark",
"connectionstring": "url=https://mysite.com/livy;username=user;password=pass",
"version": "0.0.0"
}
@@ -155,11 +153,11 @@ def test_serialize_not_empty():
client_factory = MagicMock()
reader_writer = MagicMock()
client1 = MagicMock()
- client1.serialize.return_value = {"id": "1", "sqlcontext": True, "language": "python",
+ client1.serialize.return_value = {"id": "1", "sqlcontext": True, "kind": "pyspark",
"connectionstring": "url=https://mysite.com/livy;username=user;password=pass",
"version": "0.0.0"}
client2 = MagicMock()
- client2.serialize.return_value = {"id": "2", "sqlcontext": False, "language": "scala",
+ client2.serialize.return_value = {"id": "2", "sqlcontext": False, "kind": "spark",
"connectionstring": "url=https://mysite.com/livy;username=user;password=pass",
"version": "0.0.0"}
serializer = ClientManagerStateSerializer(client_factory, reader_writer)
@@ -169,9 +167,9 @@ def test_serialize_not_empty():
# Verify write was called with following string
expected_str = '{"clients": [{"name": "py", "connectionstring": "url=https://mysite.com/livy;username=user;p' \
- 'assword=pass", "version": "0.0.0", "language": "python", "sqlcontext": true, "id": "1"}, {"n' \
+ 'assword=pass", "version": "0.0.0", "kind": "pyspark", "sqlcontext": true, "id": "1"}, {"n' \
'ame": "sc", "connectionstring": "url=https://mysite.com/livy;username=user;password=pass", "ve' \
- 'rsion": "0.0.0", "language": "scala", "sqlcontext": false, "id": "2"}]}'
+ 'rsion": "0.0.0", "kind": "spark", "sqlcontext": false, "id": "2"}]}'
expected_dict = json.loads(expected_str)
call_list = reader_writer.overwrite_with_line.call_args_list
assert len(call_list) == 1
diff --git a/tests/test_livyclient.py b/tests/test_livyclient.py
index a47e023..57870fa 100644
--- a/tests/test_livyclient.py
+++ b/tests/test_livyclient.py
@@ -3,6 +3,7 @@
from remotespark.livyclientlib.livyclient import LivyClient
from remotespark.livyclientlib.livysessionstate import LivySessionState
from remotespark.utils.utils import get_connection_string
+from remotespark.utils.constants import Constants
def test_create_sql_context_automatically():
@@ -55,7 +56,7 @@ def test_serialize():
connection_string = get_connection_string(url, username, password)
http_client = MagicMock()
http_client.connection_string = connection_string
- kind = "scala"
+ kind = Constants.session_kind_spark
session_id = "-1"
sql_created = False
session = MagicMock()
@@ -67,7 +68,7 @@ def test_serialize():
assert serialized["connectionstring"] == connection_string
assert serialized["id"] == "-1"
- assert serialized["language"] == kind
+ assert serialized["kind"] == kind
assert serialized["sqlcontext"] == sql_created
assert serialized["version"] == "0.0.0"
assert len(serialized.keys()) == 5
@@ -82,16 +83,16 @@ def test_close_session():
mock_spark_session.delete.assert_called_once_with()
-def test_language():
- lang = "python"
+def test_kind():
+ kind = "pyspark"
mock_spark_session = MagicMock()
- language_mock = PropertyMock(return_value=lang)
- type(mock_spark_session).language = language_mock
+ language_mock = PropertyMock(return_value=kind)
+ type(mock_spark_session).kind = language_mock
client = LivyClient(mock_spark_session)
- l = client.language
+ l = client.kind
- assert l == lang
+ assert l == kind
def test_session_id():
diff --git a/tests/test_livyclientfactory.py b/tests/test_livyclientfactory.py
index 1e2e4f2..643b103 100644
--- a/tests/test_livyclientfactory.py
+++ b/tests/test_livyclientfactory.py
@@ -2,6 +2,8 @@ from mock import MagicMock
from nose.tools import raises
from remotespark.livyclientlib.livyclientfactory import LivyClientFactory
+from remotespark.livyclientlib.pandaspysparklivyclient import PandasPysparkLivyClient
+from remotespark.livyclientlib.pandasscalalivyclient import PandasScalaLivyClient
from remotespark.utils.constants import Constants
from remotespark.utils.utils import get_connection_string
@@ -9,36 +11,58 @@ from remotespark.utils.utils import get_connection_string
def test_build_session_with_defaults():
factory = LivyClientFactory()
connection_string = get_connection_string("url", "user", "pass")
- language = "python"
+ kind = Constants.session_kind_pyspark
+ properties = {"kind": kind}
- session = factory.create_session(language, connection_string)
+ session = factory.create_session(connection_string, properties)
- assert session.language == language
+ assert session.kind == kind
assert session.id == "-1"
assert session.started_sql_context is False
+ assert session.properties == properties
def test_build_session():
factory = LivyClientFactory()
connection_string = get_connection_string("url", "user", "pass")
- language = "python"
+ kind = Constants.session_kind_pyspark
+ properties = {"kind": kind}
- session = factory.create_session(language, connection_string, "1", True)
+ session = factory.create_session(connection_string, properties, "1", True)
- assert session.language == language
+ assert session.kind == kind
assert session.id == "1"
assert session.started_sql_context
+ assert session.properties == properties
def test_can_build_all_clients():
- session = MagicMock()
factory = LivyClientFactory()
- for language in Constants.lang_supported:
- factory.build_client(language, session)
+ for kind in Constants.session_kinds_supported:
+ session = MagicMock()
+ session.kind = kind
+ factory.build_client(session)
@raises(ValueError)
def test_build_unknown_language():
session = MagicMock()
+ session.kind = "unknown"
+ factory = LivyClientFactory()
+ factory.build_client(session)
+
+
+def test_build_pyspark():
+ session = MagicMock()
+ session.kind = Constants.session_kind_pyspark
+ factory = LivyClientFactory()
+ client = factory.build_client(session)
+ assert isinstance(client, PandasPysparkLivyClient)
+
+
+def test_build_spark():
+ session = MagicMock()
+ session.kind = Constants.session_kind_spark
factory = LivyClientFactory()
- factory.build_client("unknown", session)
+ client = factory.build_client(session)
+ assert isinstance(client, PandasScalaLivyClient)
diff --git a/tests/test_livysession.py b/tests/test_livysession.py
index 0090e1b..6ee3d1d 100644
--- a/tests/test_livysession.py
+++ b/tests/test_livysession.py
@@ -7,7 +7,8 @@ from remotespark.livyclientlib.livyclienttimeouterror import LivyClientTimeoutEr
from remotespark.livyclientlib.livyunexpectedstatuserror import LivyUnexpectedStatusError
from remotespark.livyclientlib.livysession import LivySession
import remotespark.utils.configuration as conf
-from remotespark.utils.utils import get_connection_string, get_instance_id
+from remotespark.utils.utils import get_connection_string
+from remotespark.utils.constants import Constants
class DummyResponse:
@@ -23,41 +24,22 @@ class DummyResponse:
class TestLivySession:
- pi_result = "Pi is roughly 3.14336"
-
- session_create_json = '{"id":0,"state":"starting","kind":"spark","log":[]}'
- ready_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"idle","kind":"spark","log":["16:23:01,15' \
- '1 |-INFO in ch.qos.logback.core.joran.action.AppenderAction - Naming appender as [STDOUT]' \
- '","16:23:01,213 |-INFO in ch.qos.logback.core.joran.action.NestedComplexPropertyIA - As' \
- 'suming default type [ch.qos.logback.access.PatternLayoutEncoder] for [encoder] propert' \
- 'y","16:23:01,368 |-INFO in ch.qos.logback.core.joran.action.AppenderRefAction - Attachin' \
- 'g appender named [STDOUT] to null","16:23:01,368 |-INFO in ch.qos.logback.access.joran.act' \
- 'ion.ConfigurationAction - End of configuration.","16:23:01,371 |-INFO in ch.qos.logback.ac' \
- 'cess.joran.JoranConfigurator@53799e55 - Registering current configuration as safe fallback' \
- ' point","","15/09/04 16:23:01 INFO server.ServerConnector: Started ServerConnector@388859' \
- 'e4{HTTP/1.1}{0.0.0.0:37394}","15/09/04 16:23:01 INFO server.Server: Started @27514ms","' \
- '15/09/04 16:23:01 INFO livy.WebServer: Starting server on 37394","Starting livy-repl on' \
- ' http://10.0.0.11:37394"]}]}'
- error_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"error","kind":"spark","log":[]}]}'
- busy_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"busy","kind":"spark","log":["16:23:01,151' \
- ' |-INFO in ch.qos.logback.core.joran.action.AppenderAction - Naming appender as [STDOUT]",' \
- '"16:23:01,213 |-INFO in ch.qos.logback.core.joran.action.NestedComplexPropertyIA - Assumin' \
- 'g default type [ch.qos.logback.access.PatternLayoutEncoder] for [encoder] property","16:23' \
- ':01,368 |-INFO in ch.qos.logback.core.joran.action.AppenderRefAction - Attaching appender ' \
- 'named [STDOUT] to null","16:23:01,368 |-INFO in ch.qos.logback.access.joran.action.Configu' \
- 'rationAction - End of configuration.","16:23:01,371 |-INFO in ch.qos.logback.access.joran.' \
- 'JoranConfigurator@53799e55 - Registering current configuration as safe fallback point","",' \
- '"15/09/04 16:23:01 INFO server.ServerConnector: Started ServerConnector@388859e4{HTTP/1.1}' \
- '{0.0.0.0:37394}","15/09/04 16:23:01 INFO server.Server: Started @27514ms","15/09/04 16:23:' \
- '01 INFO livy.WebServer: Starting server on 37394","Starting livy-repl on http://10.0.0.11:' \
- '37394"]}]}'
- post_statement_json = '{"id":0,"state":"running","output":null}'
- running_statement_json = '{"total_statements":1,"statements":[{"id":0,"state":"running","output":null}]}'
- ready_statement_json = '{"total_statements":1,"statements":[{"id":0,"state":"available","output":{"status":"ok",' \
- '"execution_count":0,"data":{"text/plain":"Pi is roughly 3.14336"}}}]}'
-
- get_responses = []
- post_responses = []
+
+ def __init__(self):
+ self.pi_result = "Pi is roughly 3.14336"
+
+ self.session_create_json = '{"id":0,"state":"starting","kind":"spark","log":[]}'
+ self.ready_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"idle","kind":"spark","log":[""]}]}'
+ self.error_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"error","kind":"spark","log":' \
+ '[""]}]}'
+ self.busy_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"busy","kind":"spark","log":[""]}]}'
+ self.post_statement_json = '{"id":0,"state":"running","output":null}'
+ self.running_statement_json = '{"total_statements":1,"statements":[{"id":0,"state":"running","output":null}]}'
+ self.ready_statement_json = '{"total_statements":1,"statements":[{"id":0,"state":"available","output":{"statu' \
+ 's":"ok","execution_count":0,"data":{"text/plain":"Pi is roughly 3.14336"}}}]}'
+
+ self.get_responses = []
+ self.post_responses = []
def _next_response_get(self, *args):
val = self.get_responses[0]
@@ -69,103 +51,83 @@ class TestLivySession:
self.post_responses = self.post_responses[1:]
return val
+ def _create_session(self, kind=Constants.session_kind_spark, session_id="-1", sql_created=False, http_client=None):
+ if http_client is None:
+ http_client = MagicMock()
+
+ return LivySession(http_client, session_id, sql_created, {"kind": kind})
+
@raises(AssertionError)
def test_constructor_throws_status_sleep_seconds(self):
- kind = "scala"
- http_client = MagicMock()
- session_id = "-1"
- sql_created = False
conf.override({
"status_sleep_seconds": 0,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 60
})
- LivySession(http_client, kind, session_id, sql_created)
+ self._create_session()
conf.load()
@raises(AssertionError)
def test_constructor_throws_statement_sleep_seconds(self):
- kind = "scala"
- http_client = MagicMock()
- session_id = "-1"
- sql_created = False
conf.override({
"status_sleep_seconds": 3,
"statement_sleep_seconds": 0,
"create_sql_context_timeout_seconds": 60
})
- LivySession(http_client, kind, session_id, sql_created)
+ self._create_session()
conf.load({})
@raises(AssertionError)
def test_constructor_throws_sql_create_timeout_seconds(self):
- kind = "scala"
- http_client = MagicMock()
- session_id = "-1"
- sql_created = False
conf.override({
"status_sleep_seconds": 4,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 0
})
- LivySession(http_client, kind, session_id, sql_created)
+ self._create_session()
conf.load()
@raises(ValueError)
def test_constructor_throws_invalid_session_sql_combo(self):
- kind = "scala"
- http_client = MagicMock()
- session_id = "-1"
- sql_created = True
conf.override({
"status_sleep_seconds": 2,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 60
})
- LivySession(http_client, kind, session_id, sql_created)
+ self._create_session(sql_created=True)
conf.load()
def test_constructor_starts_with_existing_session(self):
- kind = "scala"
- http_client = MagicMock()
- session_id = "1"
- sql_created = True
conf.override({
"status_sleep_seconds": 4,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 60
})
- session = LivySession(http_client, kind, session_id, sql_created)
+ session_id = "1"
+ session = self._create_session(session_id=session_id, sql_created=True)
conf.load()
- assert session.id == "1"
+ assert session.id == session_id
assert session.started_sql_context
def test_constructor_starts_with_no_session(self):
- kind = "scala"
- http_client = MagicMock()
- session_id = "-1"
- sql_created = False
conf.override({
"status_sleep_seconds": 4,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 60
})
- session = LivySession(http_client, kind, session_id, sql_created)
+ session = self._create_session()
conf.load()
assert session.id == "-1"
assert not session.started_sql_context
def test_is_final_status(self):
- kind = "scala"
- http_client = MagicMock()
-
conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, kind, "-1", False)
+ session = self._create_session()
conf.load()
assert not session.is_final_status("idle")
@@ -176,7 +138,6 @@ class TestLivySession:
assert session.is_final_status("error")
def test_start_scala_starts_session(self):
- kind = "scala"
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
@@ -184,18 +145,18 @@ class TestLivySession:
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, kind, "-1", False)
+ kind = Constants.session_kind_spark
+ session = self._create_session(kind=kind, http_client=http_client)
session.start()
conf.load()
- assert_equals(kind, session.language)
+ assert_equals(kind, session.kind)
assert_equals("starting", session._status)
assert_equals("0", session.id)
http_client.post.assert_called_with(
- "/sessions", [201], {"kind": "spark", "name": "remotesparkmagics_{}".format(get_instance_id())})
+ "/sessions", [201], {"kind": "spark"})
def test_start_python_starts_session(self):
- kind = "python"
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
@@ -203,15 +164,33 @@ class TestLivySession:
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, kind, "-1", False)
+ kind = Constants.session_kind_pyspark
+ session = self._create_session(kind=kind, http_client=http_client)
session.start()
conf.load()
- assert_equals(kind, session.language)
+ assert_equals(kind, session.kind)
assert_equals("starting", session._status)
assert_equals("0", session.id)
http_client.post.assert_called_with(
- "/sessions", [201],{"kind": "pyspark", "name": "remotesparkmagics_{}".format(get_instance_id())})
+ "/sessions", [201], {"kind": "pyspark"})
+
+ def test_start_passes_in_all_properties(self):
+ http_client = MagicMock()
+ http_client.post.return_value = DummyResponse(201, self.session_create_json)
+
+ conf.override({
+ "status_sleep_seconds": 0.01,
+ "statement_sleep_seconds": 0.01
+ })
+ kind = Constants.session_kind_spark
+ properties = {"kind": kind, "extra": 1}
+ session = LivySession(http_client, "-1", False, properties)
+ session.start()
+ conf.load()
+
+ http_client.post.assert_called_with(
+ "/sessions", [201], properties)
def test_status_gets_latest(self):
http_client = MagicMock()
@@ -221,7 +200,7 @@ class TestLivySession:
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, "scala", "-1", False)
+ session = self._create_session(http_client=http_client)
conf.load()
session.start()
@@ -242,7 +221,7 @@ class TestLivySession:
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, "scala", "-1", False)
+ session = self._create_session(http_client=http_client)
conf.override({})
session.start()
@@ -265,7 +244,7 @@ class TestLivySession:
"status_sleep_seconds": 0.011,
"statement_sleep_seconds": 6000
})
- session = LivySession(http_client, "scala", "-1", False)
+ session = self._create_session(http_client=http_client)
conf.load()
session.start()
@@ -285,7 +264,7 @@ class TestLivySession:
"status_sleep_seconds": 0.011,
"statement_sleep_seconds": 6000
})
- session = LivySession(http_client, "scala", "-1", False)
+ session = self._create_session(http_client=http_client)
conf.load()
session.start()
@@ -299,7 +278,7 @@ class TestLivySession:
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, "scala", "-1", False)
+ session = self._create_session(http_client=http_client)
conf.load()
session.start()
@@ -315,7 +294,7 @@ class TestLivySession:
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, "scala", "-1", False)
+ session = self._create_session(http_client=http_client)
conf.load()
session.delete()
@@ -331,14 +310,14 @@ class TestLivySession:
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, "scala", "-1", False)
+ session = self._create_session(http_client=http_client)
conf.load()
session._status = "dead"
session.delete()
def test_execute(self):
- kind = "scala"
+ kind = Constants.session_kind_spark
http_client = MagicMock()
self.post_responses = [DummyResponse(201, self.session_create_json),
DummyResponse(201, self.post_statement_json)]
@@ -350,7 +329,7 @@ class TestLivySession:
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, kind, "-1", False)
+ session = self._create_session(kind=kind, http_client=http_client)
conf.load()
session.start()
command = "command"
@@ -364,7 +343,7 @@ class TestLivySession:
assert_equals(self.pi_result, result[1])
def test_create_sql_hive_context_happens_once(self):
- kind = "scala"
+ kind = Constants.session_kind_spark
http_client = MagicMock()
self.post_responses = [DummyResponse(201, self.session_create_json),
DummyResponse(201, self.post_statement_json),
@@ -380,7 +359,7 @@ class TestLivySession:
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, kind, "-1", False)
+ session = self._create_session(kind=kind, http_client=http_client)
conf.load()
session.start()
@@ -396,12 +375,11 @@ class TestLivySession:
"(sc)\nimport sqlContext.implicits._"}) \
in http_client.post.call_args_list
assert call("/sessions/0/statements", [201], {"code": "val hiveContext = new org.apache.spark.sql.hive.Hive"
- "Context(sc)"}) \
- in http_client.post.call_args_list
+ "Context(sc)"}) in http_client.post.call_args_list
assert len(http_client.post.call_args_list) == 2
def test_create_sql_context_spark(self):
- kind = "scala"
+ kind = Constants.session_kind_spark
http_client = MagicMock()
self.post_responses = [DummyResponse(201, self.session_create_json),
DummyResponse(201, self.post_statement_json),
@@ -417,7 +395,7 @@ class TestLivySession:
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, kind, "-1", False)
+ session = self._create_session(kind=kind, http_client=http_client)
conf.load()
session.start()
@@ -427,12 +405,10 @@ class TestLivySession:
"(sc)\nimport sqlContext.implicits._"}) \
in http_client.post.call_args_list
assert call("/sessions/0/statements", [201], {"code": "val hiveContext = new org.apache.spark.sql.hive.Hive"
- "Context(sc)"}) \
- in http_client.post.call_args_list
-
+ "Context(sc)"}) in http_client.post.call_args_list
def test_create_sql_hive_context_pyspark(self):
- kind = "python"
+ kind = Constants.session_kind_pyspark
http_client = MagicMock()
self.post_responses = [DummyResponse(201, self.session_create_json),
DummyResponse(201, self.post_statement_json),
@@ -448,16 +424,15 @@ class TestLivySession:
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, kind, "-1", False)
+ session = self._create_session(kind=kind, http_client=http_client)
conf.load()
session.start()
session.create_sql_context()
- assert call("/sessions/0/statements", [201], {"code": "from pyspark.sql import SQLContext\n"
- "from pyspark.sql.types import *\n"
- "sqlContext = SQLContext(sc)"}) \
- in http_client.post.call_args_list
+ assert call("/sessions/0/statements", [201], {"code": "from pyspark.sql import SQLContext\nfrom pyspark."
+ "sql.types import *\nsqlContext = SQLContext("
+ "sc)"}) in http_client.post.call_args_list
assert call("/sessions/0/statements", [201], {"code": "from pyspark.sql import HiveContext\n"
"hiveContext = HiveContext(sc)"}) \
in http_client.post.call_args_list
@@ -477,7 +452,7 @@ class TestLivySession:
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, kind, "-1", False)
+ session = self._create_session(kind=kind, http_client=http_client)
conf.load()
session.start()
@@ -490,19 +465,19 @@ class TestLivySession:
connection_string = get_connection_string(url, username, password)
http_client = MagicMock()
http_client.connection_string = connection_string
- kind = "scala"
+ kind = Constants.session_kind_spark
conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
- session = LivySession(http_client, kind, "-1", False)
+ session = self._create_session(kind=kind, http_client=http_client)
conf.load()
serialized = session.get_state().to_dict()
assert serialized["connectionstring"] == connection_string
assert serialized["id"] == "-1"
- assert serialized["language"] == kind
+ assert serialized["kind"] == kind
assert serialized["sqlcontext"] == False
assert serialized["version"] == "0.0.0"
assert len(serialized.keys()) == 5
diff --git a/tests/test_remotesparkmagics.py b/tests/test_remotesparkmagics.py
index e5f6058..8afdc90 100644
--- a/tests/test_remotesparkmagics.py
+++ b/tests/test_remotesparkmagics.py
@@ -3,6 +3,7 @@ from nose.tools import raises, with_setup
from remotespark.remotesparkmagics import RemoteSparkMagics
from remotespark.livyclientlib.dataframeparseexception import DataFrameParseException
+import remotespark.utils.configuration as conf
magic = None
@@ -13,6 +14,8 @@ shell = None
def _setup():
global magic, spark_controller, shell
+ conf.override({})
+
shell = MagicMock()
magic = RemoteSparkMagics(shell=None)
magic.shell = shell
@@ -38,7 +41,7 @@ def test_info_command_parses():
@with_setup(_setup, _teardown)
def test_add_sessions_command_parses():
- # Do not skip
+ # Do not skip and python
add_sessions_mock = MagicMock()
spark_controller.add_session = add_sessions_mock
command = "add"
@@ -49,20 +52,38 @@ def test_add_sessions_command_parses():
magic.spark(line)
- add_sessions_mock.assert_called_once_with(name, language, connection_string, False)
+ add_sessions_mock.assert_called_once_with(name, connection_string, False, {"kind": "pyspark"})
- # Skip
+ # Skip and scala - upper case
add_sessions_mock = MagicMock()
spark_controller.add_session = add_sessions_mock
command = "add"
name = "name"
- language = "python"
+ language = "Scala"
connection_string = "url=http://location:port;username=name;password=word"
line = " ".join([command, name, language, connection_string, "skip"])
magic.spark(line)
- add_sessions_mock.assert_called_once_with(name, language, connection_string, True)
+ add_sessions_mock.assert_called_once_with(name, connection_string, True, {"kind": "spark"})
+
+
+@with_setup(_setup, _teardown)
+def test_add_sessions_command_extra_properties():
+ magic.spark("config {\"extra\": \"yes\"}")
+ assert magic.properties == {"extra": "yes"}
+
+ add_sessions_mock = MagicMock()
+ spark_controller.add_session = add_sessions_mock
+ command = "add"
+ name = "name"
+ language = "scala"
+ connection_string = "url=http://location:port;username=name;password=word"
+ line = " ".join([command, name, language, connection_string])
+
+ magic.spark(line)
+
+ add_sessions_mock.assert_called_once_with(name, connection_string, False, {"kind": "spark", "extra": "yes"})
@with_setup(_setup, _teardown)
diff --git a/tests/test_sparkcontroller.py b/tests/test_sparkcontroller.py
index fcd1ff4..8d886fc 100644
--- a/tests/test_sparkcontroller.py
+++ b/tests/test_sparkcontroller.py
@@ -25,17 +25,17 @@ def _teardown():
@with_setup(_setup, _teardown)
def test_add_session():
name = "name"
- language = "python"
+ properties = {"kind": "spark"}
connection_string = "url=http://location:port;username=name;password=word"
client = "client"
session = MagicMock()
client_factory.create_session = MagicMock(return_value=session)
client_factory.build_client = MagicMock(return_value=client)
- controller.add_session(name, language, connection_string, False)
+ controller.add_session(name, connection_string, False, properties)
- client_factory.create_session.assert_called_once_with(language, connection_string, "-1", False)
- client_factory.build_client.assert_called_once_with(language, session)
+ client_factory.create_session.assert_called_once_with(connection_string, properties, "-1", False)
+ client_factory.build_client.assert_called_once_with(session)
client_manager.add_client.assert_called_once_with(name, client)
session.start.assert_called_once_with()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 10
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"mkdir ~/.sparkmagic",
"cp remotespark/default_config.json ~/.sparkmagic/config.json"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==4.9.0
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
arrow==1.3.0
async-lru==2.0.5
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
comm==0.2.2
decorator==5.2.1
defusedxml==0.7.1
exceptiongroup==1.2.2
fastjsonschema==2.21.1
fqdn==1.5.1
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==4.1.1
ipython==4.0.0
ipython-genutils==0.2.0
ipywidgets==7.8.5
isoduration==20.11.0
Jinja2==3.1.6
json5==0.10.0
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter-lsp==2.2.5
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyter_server==2.15.0
jupyter_server_terminals==0.5.3
jupyterlab==4.1.5
jupyterlab_pygments==0.3.0
jupyterlab_server==2.27.3
jupyterlab_widgets==1.1.11
MarkupSafe==3.0.2
mistune==3.1.3
mock==5.2.0
narwhals==1.32.0
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nose==1.3.7
notebook==7.1.3
notebook_shim==0.2.4
numpy==2.0.2
overrides==7.7.0
packaging==24.2
pandas==2.2.3
pandocfilters==1.5.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==4.3.7
plotly==6.0.1
pluggy==1.5.0
prometheus_client==0.21.1
ptyprocess==0.7.0
pycparser==2.22
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
pytz==2025.2
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
-e git+https://github.com/jupyter-incubator/sparkmagic.git@3c0230b30b1f63780fbf2d4ba9d83e0a83f51eea#egg=remotespark
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
Send2Trash==1.8.3
simplegeneric==0.8.1
six==1.17.0
sniffio==1.3.1
soupsieve==2.6
terminado==0.18.1
tinycss2==1.4.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
uri-template==1.3.0
urllib3==2.3.0
webcolors==24.11.1
webencodings==0.5.1
websocket-client==1.8.0
widgetsnbextension==3.6.10
zipp==3.21.0
| name: sparkmagic
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==4.9.0
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- arrow==1.3.0
- async-lru==2.0.5
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- comm==0.2.2
- decorator==5.2.1
- defusedxml==0.7.1
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- fqdn==1.5.1
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==4.1.1
- ipython==4.0.0
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- isoduration==20.11.0
- jinja2==3.1.6
- json5==0.10.0
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyter-lsp==2.2.5
- jupyter-server==2.15.0
- jupyter-server-terminals==0.5.3
- jupyterlab==4.1.5
- jupyterlab-pygments==0.3.0
- jupyterlab-server==2.27.3
- jupyterlab-widgets==1.1.11
- markupsafe==3.0.2
- mistune==3.1.3
- mock==5.2.0
- narwhals==1.32.0
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nose==1.3.7
- notebook==7.1.3
- notebook-shim==0.2.4
- numpy==2.0.2
- overrides==7.7.0
- packaging==24.2
- pandas==2.2.3
- pandocfilters==1.5.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==4.3.7
- plotly==6.0.1
- pluggy==1.5.0
- prometheus-client==0.21.1
- ptyprocess==0.7.0
- pycparser==2.22
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pytz==2025.2
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- send2trash==1.8.3
- simplegeneric==0.8.1
- six==1.17.0
- sniffio==1.3.1
- soupsieve==2.6
- terminado==0.18.1
- tinycss2==1.4.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- uri-template==1.3.0
- urllib3==2.3.0
- webcolors==24.11.1
- webencodings==0.5.1
- websocket-client==1.8.0
- widgetsnbextension==3.6.10
- zipp==3.21.0
prefix: /opt/conda/envs/sparkmagic
| [
"tests/test_clientmanagerstateserializer.py::test_deserialize_not_emtpy",
"tests/test_livyclient.py::test_serialize",
"tests/test_livyclient.py::test_kind",
"tests/test_livyclientfactory.py::test_build_session_with_defaults",
"tests/test_livyclientfactory.py::test_build_session",
"tests/test_livyclientfactory.py::test_can_build_all_clients",
"tests/test_livyclientfactory.py::test_build_unknown_language",
"tests/test_livyclientfactory.py::test_build_pyspark",
"tests/test_livyclientfactory.py::test_build_spark"
]
| [
"tests/test_clientmanagerstateserializer.py::test_deserialize_not_emtpy_but_dead",
"tests/test_clientmanagerstateserializer.py::test_deserialize_not_emtpy_but_error",
"tests/test_clientmanagerstateserializer.py::test_deserialize_empty",
"tests/test_remotesparkmagics.py::test_info_command_parses",
"tests/test_remotesparkmagics.py::test_add_sessions_command_parses",
"tests/test_remotesparkmagics.py::test_add_sessions_command_extra_properties",
"tests/test_remotesparkmagics.py::test_delete_sessions_command_parses",
"tests/test_remotesparkmagics.py::test_cleanup_command_parses",
"tests/test_remotesparkmagics.py::test_bad_command_throws_exception",
"tests/test_remotesparkmagics.py::test_run_cell_command_parses",
"tests/test_remotesparkmagics.py::test_run_cell_command_writes_to_err",
"tests/test_remotesparkmagics.py::test_run_sql_command_parses",
"tests/test_remotesparkmagics.py::test_run_hive_command_parses",
"tests/test_remotesparkmagics.py::test_run_sql_command_returns_none_when_exception",
"tests/test_remotesparkmagics.py::test_run_hive_command_returns_none_when_exception",
"tests/test_remotesparkmagics.py::test_run_sql_command_stores_variable_in_user_ns",
"tests/test_sparkcontroller.py::test_add_session",
"tests/test_sparkcontroller.py::test_add_session_skip",
"tests/test_sparkcontroller.py::test_delete_session",
"tests/test_sparkcontroller.py::test_cleanup",
"tests/test_sparkcontroller.py::test_run_cell",
"tests/test_sparkcontroller.py::test_get_client_keys"
]
| [
"tests/test_clientmanagerstateserializer.py::test_serializer_throws_none_path",
"tests/test_clientmanagerstateserializer.py::test_serializer_throws_none_factory",
"tests/test_clientmanagerstateserializer.py::test_serialize_not_empty",
"tests/test_livyclient.py::test_create_sql_context_automatically",
"tests/test_livyclient.py::test_execute_code",
"tests/test_livyclient.py::test_execute_sql",
"tests/test_livyclient.py::test_execute_hive",
"tests/test_livyclient.py::test_close_session",
"tests/test_livyclient.py::test_session_id"
]
| []
| Modified BSD License | 343 | [
"remotespark/livyclientlib/livysessionstate.py",
"remotespark/remotesparkmagics.py",
"remotespark/utils/configuration.py",
"remotespark/livyclientlib/livyclient.py",
"remotespark/livyclientlib/clientmanagerstateserializer.py",
"remotespark/default_config.json",
"remotespark/livyclientlib/livysession.py",
"remotespark/livyclientlib/livyclientfactory.py",
"remotespark/livyclientlib/sparkcontroller.py",
"remotespark/utils/constants.py"
]
| [
"remotespark/livyclientlib/livysessionstate.py",
"remotespark/remotesparkmagics.py",
"remotespark/utils/configuration.py",
"remotespark/livyclientlib/livyclient.py",
"remotespark/livyclientlib/clientmanagerstateserializer.py",
"remotespark/default_config.json",
"remotespark/livyclientlib/livysession.py",
"remotespark/livyclientlib/livyclientfactory.py",
"remotespark/livyclientlib/sparkcontroller.py",
"remotespark/utils/constants.py"
]
|
|
mogproject__color-ssh-8 | 3c6ef87beb0faf48b0af7f4498b1be5ff34e6fe1 | 2015-12-20 18:03:15 | cdcbb8980f7a4e49797192dc089915d702322460 | diff --git a/src/color_ssh/__init__.py b/src/color_ssh/__init__.py
index d18f409..b794fd4 100644
--- a/src/color_ssh/__init__.py
+++ b/src/color_ssh/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '0.0.2'
+__version__ = '0.1.0'
diff --git a/src/color_ssh/color_ssh.py b/src/color_ssh/color_ssh.py
index 4c278c2..20f7651 100644
--- a/src/color_ssh/color_ssh.py
+++ b/src/color_ssh/color_ssh.py
@@ -1,71 +1,135 @@
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
+import io
import shlex
import subprocess
from optparse import OptionParser
from color_ssh.util.util import *
+from multiprocessing.pool import Pool
__all__ = []
class Setting(object):
VERSION = 'color-ssh %s' % __import__('color_ssh').__version__
- USAGE = """%prog [options...] [user@]hostname command"""
+ USAGE = '\n'.join([
+ '%prog [options...] [user@]hostname command',
+ ' %prog [options...] -h host_file command',
+ ' %prog [options...] -H "[user@]hostname [[user@]hostname]...]" command'
+ ])
+ DEFAULT_PARALLELISM = 32
- def __init__(self, label=None, command=None):
- self.label = label
- self.command = command
+ def __init__(self, parallelism=None, tasks=None):
+ self.parallelism = parallelism
+ self.tasks = tasks
def parse_args(self, argv, stdout=io2bytes(sys.stdout)):
"""
:param argv: list of str
:param stdout: binary-data stdout output
"""
- parser = OptionParser(version=self.VERSION, usage=self.USAGE)
+ parser = OptionParser(version=self.VERSION, usage=self.USAGE, conflict_handler='resolve')
parser.allow_interspersed_args = False
parser.add_option(
'-l', '--label', dest='label', default=None, type='string', metavar='LABEL',
- help='set label name to LABEL'
+ help='label name'
)
parser.add_option(
'--ssh', dest='ssh', default=str('ssh'), type='string', metavar='SSH',
- help='override ssh command line string to SSH'
+ help='override ssh command line string'
+ )
+ parser.add_option(
+ '-h', '--hosts', dest='host_file', default=None, type='string', metavar='HOST_FILE',
+ help='hosts file (each line "[user@]host")'
+ )
+ parser.add_option(
+ '-H', '--host', dest='host_string', default=None, type='string', metavar='HOST_STRING',
+ help='additional host entries ("[user@]host")'
+ )
+ parser.add_option(
+ '-p', '--par', dest='parallelism', default=self.DEFAULT_PARALLELISM, type='int', metavar='PAR',
+ help='max number of parallel threads (default: %d)' % self.DEFAULT_PARALLELISM
)
option, args = parser.parse_args(argv[1:])
+ hosts = self._load_hosts(option.host_file) + (option.host_string.split() if option.host_string else [])
- if len(args) < 2:
+ if len(args) < (1 if hosts else 2):
stdout.write(arg2bytes(parser.format_help().encode('utf-8')))
parser.exit(2)
- self.label = option.label or args[0].rsplit('@', 1)[-1]
- self.command = shlex.split(option.ssh) + args
+ prefix = shlex.split(option.ssh)
+
+ if not hosts:
+ hosts = args[:1]
+ command = args[1:]
+ else:
+ command = args
+
+ tasks = [(option.label or self._extract_label(host), prefix + [host] + command) for host in hosts]
+
+ self.parallelism = option.parallelism
+ self.tasks = tasks
return self
+ @staticmethod
+ def _load_hosts(path):
+ if not path:
+ return []
+
+ with io.open(path) as f:
+ lines = f.readlines()
+ return list(filter(lambda x: x, (line.strip() for line in lines)))
+
+ @staticmethod
+ def _extract_label(host):
+ return host.rsplit('@', 1)[-1]
-def main(argv=sys.argv, stdin=io2bytes(sys.stdin), stdout=io2bytes(sys.stdout), stderr=io2bytes(sys.stderr)):
- """
- Main function
- """
- setting = Setting().parse_args(argv)
- prefix = ['color-cat', '-l', setting.label]
+
+def run_task(args):
+ label, command = args
+
+ # We don't pass stdout/stderr file descriptors since this function runs in the forked processes.
+ stdout = io2bytes(sys.stdout)
+ stderr = io2bytes(sys.stderr)
+
+ prefix = ['color-cat', '-l', label]
try:
proc_stdout = subprocess.Popen(prefix, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr)
proc_stderr = subprocess.Popen(prefix + ['-s', '+'], stdin=subprocess.PIPE, stdout=stderr, stderr=stderr)
- ret = subprocess.call(setting.command, stdin=stdin, stdout=proc_stdout.stdin, stderr=proc_stderr.stdin)
+ ret = subprocess.call(command, stdin=None, stdout=proc_stdout.stdin, stderr=proc_stderr.stdin)
proc_stdout.stdin.close()
proc_stderr.stdin.close()
proc_stdout.wait()
proc_stderr.wait()
+ except Exception as e:
+ msg = '%s: %s\nlabel=%s, command=%s\n' % (e.__class__.__name__, e, label, command)
+ stderr.write(msg.encode('utf-8', 'ignore'))
+ return 1
+ return ret
+
+def main(argv=sys.argv, stdout=io2bytes(sys.stdout), stderr=io2bytes(sys.stderr)):
+ """
+ Main function
+ """
+
+ try:
+ setting = Setting().parse_args(argv, stdout)
+ n = min(len(setting.tasks), setting.parallelism)
+ if n <= 1:
+ ret = map(run_task, setting.tasks)
+ else:
+ pool = Pool(n)
+ ret = pool.map(run_task, setting.tasks)
except Exception as e:
- msg = '%s: %s\nCommand: %s\n' % (e.__class__.__name__, e, setting.command)
+ msg = '%s: %s\n' % (e.__class__.__name__, e)
stderr.write(msg.encode('utf-8', 'ignore'))
return 1
- return ret
+ return max(ret)
diff --git a/src/color_ssh/util/util.py b/src/color_ssh/util/util.py
index 55d27c2..9273954 100644
--- a/src/color_ssh/util/util.py
+++ b/src/color_ssh/util/util.py
@@ -13,4 +13,4 @@ def arg2bytes(arg):
def io2bytes(fd):
- return fd.buffer if PY3 else fd
+ return fd.buffer if hasattr(fd, 'buffer') else fd
| Load host list from file
- `--host`
- with parallelism option
- cf. pssh (parallel-ssh) | mogproject/color-ssh | diff --git a/tests/color_ssh/test_color_ssh.py b/tests/color_ssh/test_color_ssh.py
index ea82202..b5c4ba7 100644
--- a/tests/color_ssh/test_color_ssh.py
+++ b/tests/color_ssh/test_color_ssh.py
@@ -1,11 +1,11 @@
# encoding: utf-8
from __future__ import division, print_function, absolute_import, unicode_literals
-import os
-import io
import sys
+import os
import tempfile
import six
+from contextlib import contextmanager
from mog_commons.unittest import TestCase
from color_ssh import color_ssh
from color_ssh.color_ssh import Setting
@@ -13,9 +13,8 @@ from color_ssh.util.util import PY3
class TestSetting(TestCase):
- def _check(self, setting, expected):
- self.assertEqual(setting.label, expected.label)
- self.assertEqual(setting.command, expected.command)
+ def _check(self, setting, tasks):
+ self.assertEqual(setting.tasks, tasks)
def _parse(self, args):
xs = []
@@ -30,35 +29,76 @@ class TestSetting(TestCase):
def test_parse_args(self):
self._check(self._parse(['server-1', 'pwd']),
- Setting('server-1', ['ssh', 'server-1', 'pwd']))
+ [('server-1', ['ssh', 'server-1', 'pwd'])])
self._check(self._parse(['user@server-1', 'ls', '-l']),
- Setting('server-1', ['ssh', 'user@server-1', 'ls', '-l']))
+ [('server-1', ['ssh', 'user@server-1', 'ls', '-l'])])
+
+ # label
self._check(self._parse(['-l', 'label', 'user@server-1', 'ls', '-l']),
- Setting('label', ['ssh', 'user@server-1', 'ls', '-l']))
+ [('label', ['ssh', 'user@server-1', 'ls', '-l'])])
self._check(self._parse(['--label', 'label', 'user@server-1', 'ls', '-l']),
- Setting('label', ['ssh', 'user@server-1', 'ls', '-l']))
+ [('label', ['ssh', 'user@server-1', 'ls', '-l'])])
self._check(self._parse(['-llabel', 'user@server-1', 'ls', '-l']),
- Setting('label', ['ssh', 'user@server-1', 'ls', '-l']))
+ [('label', ['ssh', 'user@server-1', 'ls', '-l'])])
self._check(self._parse(['--label', 'label', '--ssh', '/usr/bin/ssh -v', 'user@server-1', 'ls', '-l']),
- Setting('label', ['/usr/bin/ssh', '-v', 'user@server-1', 'ls', '-l']))
+ [('label', ['/usr/bin/ssh', '-v', 'user@server-1', 'ls', '-l'])])
+
+ # ssh
self._check(self._parse(['--ssh', '/usr/bin/ssh -v --option "a b c"', 'user@server-1', 'ls', '-l']),
- Setting('server-1', ['/usr/bin/ssh', '-v', '--option', 'a b c', 'user@server-1', 'ls', '-l']))
+ [('server-1', ['/usr/bin/ssh', '-v', '--option', 'a b c', 'user@server-1', 'ls', '-l'])])
self._check(self._parse(['--label', 'あいう'.encode('utf-8'), 'user@server-1', 'ls', '-l']),
- Setting('あいう' if PY3 else 'あいう'.encode('utf-8'), ['ssh', 'user@server-1', 'ls', '-l']))
+ [('あいう' if PY3 else 'あいう'.encode('utf-8'), ['ssh', 'user@server-1', 'ls', '-l'])])
self._check(self._parse(['--label', b'\xff\xfe', 'user@server-1', 'ls', '-l']),
- Setting('\udcff\udcfe' if PY3 else b'\xff\xfe', ['ssh', 'user@server-1', 'ls', '-l']))
+ [('\udcff\udcfe' if PY3 else b'\xff\xfe', ['ssh', 'user@server-1', 'ls', '-l'])])
self._check(self._parse(['server-1', 'echo', b'\xff\xfe']),
- Setting('server-1', ['ssh', 'server-1', 'echo', '\udcff\udcfe' if PY3 else b'\xff\xfe']))
+ [('server-1', ['ssh', 'server-1', 'echo', '\udcff\udcfe' if PY3 else b'\xff\xfe'])])
+
+ # hosts
+ hosts_path = os.path.join('tests', 'resources', 'test_color_ssh_hosts.txt')
+ self._check(self._parse(['-h', hosts_path, 'pwd']), [
+ ('server-1', ['ssh', 'server-1', 'pwd']),
+ ('server-2', ['ssh', 'server-2', 'pwd']),
+ ('server-3', ['ssh', 'server-3', 'pwd']),
+ ('server-4', ['ssh', 'server-4', 'pwd']),
+ ('server-5', ['ssh', 'server-5', 'pwd']),
+ ('server-6', ['ssh', 'server-6', 'pwd']),
+ ('server-7', ['ssh', 'server-7', 'pwd']),
+ ('server-8', ['ssh', 'server-8', 'pwd']),
+ ('server-9', ['ssh', 'root@server-9', 'pwd']),
+ ('server-10', ['ssh', 'root@server-10', 'pwd']),
+ ])
+ self._check(self._parse(['-H', 'server-11 root@server-12', 'pwd']), [
+ ('server-11', ['ssh', 'server-11', 'pwd']),
+ ('server-12', ['ssh', 'root@server-12', 'pwd']),
+ ])
+ self._check(self._parse(['--hosts', hosts_path, '--host', 'server-11 root@server-12', 'pwd']), [
+ ('server-1', ['ssh', 'server-1', 'pwd']),
+ ('server-2', ['ssh', 'server-2', 'pwd']),
+ ('server-3', ['ssh', 'server-3', 'pwd']),
+ ('server-4', ['ssh', 'server-4', 'pwd']),
+ ('server-5', ['ssh', 'server-5', 'pwd']),
+ ('server-6', ['ssh', 'server-6', 'pwd']),
+ ('server-7', ['ssh', 'server-7', 'pwd']),
+ ('server-8', ['ssh', 'server-8', 'pwd']),
+ ('server-9', ['ssh', 'root@server-9', 'pwd']),
+ ('server-10', ['ssh', 'root@server-10', 'pwd']),
+ ('server-11', ['ssh', 'server-11', 'pwd']),
+ ('server-12', ['ssh', 'root@server-12', 'pwd']),
+ ])
+
+ # parallelism
+ self.assertEqual(self._parse(['-H', 'server-11 root@server-12', '-p3', 'pwd']).parallelism, 3)
def test_parse_args_error(self):
with self.withBytesOutput() as (out, err):
self.assertSystemExit(2, Setting().parse_args, ['color-ssh'], out)
self.assertSystemExit(2, Setting().parse_args, ['color-ssh', 'server-1'], out)
self.assertSystemExit(2, Setting().parse_args, ['color-ssh', '--label', 'x'], out)
+ self.assertSystemExit(2, Setting().parse_args, ['color-ssh', '--host', ' ', 'pwd'], out)
class TestMain(TestCase):
- def test_main(self):
+ def test_main_single_proc(self):
# requires: POSIX environment, color-cat command
def f(bs):
return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m|\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m'
@@ -66,28 +106,83 @@ class TestMain(TestCase):
def g(bs):
return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m+\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m'
- with tempfile.TemporaryFile() as out:
- with tempfile.TemporaryFile() as err:
- args = ['color-ssh', '--ssh', str('bash'),
- os.path.join('tests', 'resources', 'test_color_ssh_01.sh'), 'abc', 'def']
- ret = color_ssh.main(args, stdout=out, stderr=err)
- self.assertEqual(ret, 0)
+ with self.__with_temp_output() as (out, err):
+ args = ['color-ssh', '--ssh', str('bash'),
+ os.path.join('tests', 'resources', 'test_color_ssh_01.sh'), 'abc', 'def']
+ ret = color_ssh.main(args, stdout=out, stderr=err)
+ self.assertEqual(ret, 0)
+
+ out.seek(0)
+ err.seek(0)
+
+ self.assertEqual(out.read(), f(b'abc') + f(b'foo') + f('あいうえお'.encode('utf-8')) + f(b'\xff\xfe'))
+ self.assertEqual(err.read(), g(b'def') + g(b'bar') + g('かきくけこ'.encode('utf-8')) + g(b'\xfd\xfc'))
+
+ def test_main_multi_proc(self):
+ # requires: POSIX environment, color-cat command
+ def f(bs):
+ return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m|\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m'
+
+ def g(bs):
+ return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m+\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m'
+
+ with self.__with_temp_output() as (out, err):
+ path = os.path.join('tests', 'resources', 'test_color_ssh_01.sh')
+ args = ['color-ssh', '--ssh', str('bash'), '-H', '%s %s' % (path, path), 'abc', 'def']
+
+ self.assertFalse(out.closed)
+ self.assertFalse(err.closed)
+
+ ret = color_ssh.main(args, stdout=out, stderr=err)
+ self.assertEqual(ret, 0)
+
+ out.seek(0)
+ err.seek(0)
+
+ self.assertEqual(out.read(), (f(b'abc') + f(b'foo') + f('あいうえお'.encode('utf-8')) + f(b'\xff\xfe')) * 2)
+ self.assertEqual(err.read(), (g(b'def') + g(b'bar') + g('かきくけこ'.encode('utf-8')) + g(b'\xfd\xfc')) * 2)
+
+ def test_main_load_error(self):
+ with self.__with_temp_output() as (out, err):
+ args = ['color-ssh', '-h', 'not_exist_file', '--ssh', str('./tests/resources/not_exist_command'), 'x', 'y']
+ ret = color_ssh.main(args, stdout=out, stderr=err)
+ self.assertEqual(ret, 1)
+
+ out.seek(0)
+ err.seek(0)
+
+ self.assertEqual(out.read(), b'')
+ self.assertTrue(b'No such file or directory' in err.read())
+
+ def test_main_task_error(self):
+ with self.__with_temp_output() as (out, err):
+ args = ['color-ssh', '--ssh', str('./tests/resources/not_exist_command'), 'x', 'y']
+ ret = color_ssh.main(args, stdout=out, stderr=err)
+ self.assertEqual(ret, 1)
- out.seek(0)
- err.seek(0)
+ out.seek(0)
+ err.seek(0)
- self.assertEqual(out.read(), f(b'abc') + f(b'foo') + f('あいうえお'.encode('utf-8')) + f(b'\xff\xfe'))
- self.assertEqual(err.read(), g(b'def') + g(b'bar') + g('かきくけこ'.encode('utf-8')) + g(b'\xfd\xfc'))
+ self.assertEqual(out.read(), b'')
+ self.assertTrue(b'No such file or directory' in err.read())
- def test_main_error(self):
+ @staticmethod
+ @contextmanager
+ def __with_temp_output():
with tempfile.TemporaryFile() as out:
with tempfile.TemporaryFile() as err:
- args = ['color-ssh', '--ssh', str('./tests/resources/not_exist_command'), 'x', 'y']
- ret = color_ssh.main(args, stdout=out, stderr=err)
- self.assertEqual(ret, 1)
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
- out.seek(0)
- err.seek(0)
+ try:
+ try:
+ sys.stdout.buffer = out
+ sys.stderr.buffer = err
+ except AttributeError:
+ sys.stdout = out
+ sys.stderr = err
- self.assertEqual(out.read(), b'')
- self.assertTrue(b'No such file or directory' in err.read())
+ yield out, err
+ finally:
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
diff --git a/tests/resources/test_color_ssh_hosts.txt b/tests/resources/test_color_ssh_hosts.txt
new file mode 100644
index 0000000..3b9d2dd
--- /dev/null
+++ b/tests/resources/test_color_ssh_hosts.txt
@@ -0,0 +1,10 @@
+server-1
+server-2
+server-3
+server-4
+server-5
+server-6
+server-7
+server-8
+root@server-9
+root@server-10
\ No newline at end of file
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pip",
"pip_packages": [
"six",
"mog-commons",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/mogproject/color-ssh.git@3c6ef87beb0faf48b0af7f4498b1be5ff34e6fe1#egg=color_ssh
exceptiongroup==1.2.2
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
mog-commons==0.2.3
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: color-ssh
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mog-commons==0.2.3
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/color-ssh
| [
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_load_error",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_multi_proc",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_single_proc",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_task_error"
]
| []
| [
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args_error"
]
| []
| null | 344 | [
"src/color_ssh/util/util.py",
"src/color_ssh/color_ssh.py",
"src/color_ssh/__init__.py"
]
| [
"src/color_ssh/util/util.py",
"src/color_ssh/color_ssh.py",
"src/color_ssh/__init__.py"
]
|
|
mogproject__color-ssh-9 | cdcbb8980f7a4e49797192dc089915d702322460 | 2015-12-20 18:59:01 | cdcbb8980f7a4e49797192dc089915d702322460 | diff --git a/src/color_ssh/color_ssh.py b/src/color_ssh/color_ssh.py
index 20f7651..f8d82d8 100644
--- a/src/color_ssh/color_ssh.py
+++ b/src/color_ssh/color_ssh.py
@@ -5,8 +5,8 @@ import io
import shlex
import subprocess
from optparse import OptionParser
-from color_ssh.util.util import *
from multiprocessing.pool import Pool
+from color_ssh.util.util import *
__all__ = []
@@ -52,6 +52,10 @@ class Setting(object):
'-p', '--par', dest='parallelism', default=self.DEFAULT_PARALLELISM, type='int', metavar='PAR',
help='max number of parallel threads (default: %d)' % self.DEFAULT_PARALLELISM
)
+ parser.add_option(
+ '--distribute', dest='distribute', default=None, type='string', metavar='PREFIX',
+ help='split and distribute command-line arguments to each host'
+ )
option, args = parser.parse_args(argv[1:])
hosts = self._load_hosts(option.host_file) + (option.host_string.split() if option.host_string else [])
@@ -64,11 +68,16 @@ class Setting(object):
if not hosts:
hosts = args[:1]
- command = args[1:]
+ del args[0]
+
+ # distribute args
+ if option.distribute:
+ dist_prefix = shlex.split(option.distribute)
+ d = distribute(len(hosts), args)
+ tasks = [(option.label or self._extract_label(host),
+ prefix + [host] + dist_prefix + d[i]) for i, host in enumerate(hosts) if d[i]]
else:
- command = args
-
- tasks = [(option.label or self._extract_label(host), prefix + [host] + command) for host in hosts]
+ tasks = [(option.label or self._extract_label(host), prefix + [host] + args) for host in hosts]
self.parallelism = option.parallelism
self.tasks = tasks
diff --git a/src/color_ssh/util/util.py b/src/color_ssh/util/util.py
index 9273954..e1cdb8a 100644
--- a/src/color_ssh/util/util.py
+++ b/src/color_ssh/util/util.py
@@ -3,7 +3,7 @@ from __future__ import division, print_function, absolute_import, unicode_litera
import sys
import os
-__all__ = ['PY3', 'arg2bytes', 'io2bytes']
+__all__ = ['PY3', 'arg2bytes', 'io2bytes', 'distribute']
PY3 = sys.version_info >= (3,)
@@ -14,3 +14,26 @@ def arg2bytes(arg):
def io2bytes(fd):
return fd.buffer if hasattr(fd, 'buffer') else fd
+
+
+def distribute(num_workers, tasks):
+ """
+ Split tasks and distribute to each worker.
+
+ :param num_workers: int
+ :param tasks: list
+ :return: [[task]] (list of the list of tasks)
+ """
+ assert 0 <= num_workers, 'num_workers must be non-negative integer.'
+
+ ret = []
+ if num_workers == 0:
+ return ret
+
+ quotient, extra = divmod(len(tasks), num_workers)
+ j = 0
+ for i in range(num_workers):
+ k = quotient + (1 if i < extra else 0)
+ ret.append(tasks[j:j + k])
+ j += k
+ return ret
| distribute option | mogproject/color-ssh | diff --git a/tests/color_ssh/test_color_ssh.py b/tests/color_ssh/test_color_ssh.py
index b5c4ba7..67684f8 100644
--- a/tests/color_ssh/test_color_ssh.py
+++ b/tests/color_ssh/test_color_ssh.py
@@ -88,6 +88,13 @@ class TestSetting(TestCase):
# parallelism
self.assertEqual(self._parse(['-H', 'server-11 root@server-12', '-p3', 'pwd']).parallelism, 3)
+ self.assertEqual(self._parse(['-H', 'server-11 root@server-12', '--par', '15', 'pwd']).parallelism, 15)
+
+ # distribute
+ self._check(self._parse(['-H', 'server-11 root@server-12', '--distribute', 'echo "foo bar"', 'x', 'y', 'z']), [
+ ('server-11', ['ssh', 'server-11', 'echo', 'foo bar', 'x', 'y']),
+ ('server-12', ['ssh', 'root@server-12', 'echo', 'foo bar', 'z']),
+ ])
def test_parse_args_error(self):
with self.withBytesOutput() as (out, err):
@@ -139,8 +146,10 @@ class TestMain(TestCase):
out.seek(0)
err.seek(0)
- self.assertEqual(out.read(), (f(b'abc') + f(b'foo') + f('あいうえお'.encode('utf-8')) + f(b'\xff\xfe')) * 2)
- self.assertEqual(err.read(), (g(b'def') + g(b'bar') + g('かきくけこ'.encode('utf-8')) + g(b'\xfd\xfc')) * 2)
+ self.assertEqual(sorted(out.read()),
+ sorted((f(b'abc') + f(b'foo') + f('あいうえお'.encode('utf-8')) + f(b'\xff\xfe')) * 2))
+ self.assertEqual(sorted(err.read()),
+ sorted((g(b'def') + g(b'bar') + g('かきくけこ'.encode('utf-8')) + g(b'\xfd\xfc')) * 2))
def test_main_load_error(self):
with self.__with_temp_output() as (out, err):
diff --git a/tests/util/__init__.py b/tests/util/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/util/test_util.py b/tests/util/test_util.py
new file mode 100644
index 0000000..291a4a7
--- /dev/null
+++ b/tests/util/test_util.py
@@ -0,0 +1,28 @@
+from __future__ import division, print_function, absolute_import, unicode_literals
+
+from mog_commons.unittest import TestCase
+from color_ssh.util.util import distribute
+
+
+class TestUtil(TestCase):
+ def test_distribute(self):
+ self.assertEqual(distribute(0, []), [])
+ self.assertEqual(distribute(0, ['a']), [])
+ self.assertEqual(distribute(1, []), [[]])
+ self.assertEqual(distribute(1, ['a']), [['a']])
+ self.assertEqual(distribute(1, ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']),
+ [['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']])
+ self.assertEqual(distribute(2, ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']),
+ [['a', 'b', 'c', 'd'], ['e', 'f', 'g', 'h']])
+ self.assertEqual(distribute(3, ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']),
+ [['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h']])
+ self.assertEqual(distribute(5, ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']),
+ [['a', 'b'], ['c', 'd'], ['e', 'f'], ['g'], ['h']])
+ self.assertEqual(distribute(5, ['a', 'b', 'c', 'd']),
+ [['a'], ['b'], ['c'], ['d'], []])
+
+ xs = distribute(12345, range(200000))
+ self.assertEqual(sum(map(sum, xs)), 200000 * (200000 - 1) / 2)
+
+ def test_distribute_error(self):
+ self.assertRaises(AssertionError, distribute, -1, [])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 2
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"six",
"mog-commons",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
-e git+https://github.com/mogproject/color-ssh.git@cdcbb8980f7a4e49797192dc089915d702322460#egg=color_ssh
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.0.3
MarkupSafe==2.0.1
mog-commons==0.2.3
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: color-ssh
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- jinja2==3.0.3
- markupsafe==2.0.1
- mog-commons==0.2.3
- six==1.17.0
prefix: /opt/conda/envs/color-ssh
| [
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args",
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args_error",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_load_error",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_multi_proc",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_single_proc",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_task_error",
"tests/util/test_util.py::TestUtil::test_distribute",
"tests/util/test_util.py::TestUtil::test_distribute_error"
]
| []
| []
| []
| null | 345 | [
"src/color_ssh/color_ssh.py",
"src/color_ssh/util/util.py"
]
| [
"src/color_ssh/color_ssh.py",
"src/color_ssh/util/util.py"
]
|
|
falconry__falcon-676 | 37f175f120aaea587c521715ed4815122446a953 | 2015-12-21 19:02:02 | b78ffaac7c412d3b3d6cd3c70dd05024d79d2cce | diff --git a/falcon/request.py b/falcon/request.py
index 54be8c0..bd83227 100644
--- a/falcon/request.py
+++ b/falcon/request.py
@@ -353,7 +353,8 @@ class Request(object):
@property
def client_accepts_msgpack(self):
- return self.client_accepts('application/x-msgpack')
+ return (self.client_accepts('application/x-msgpack')
+ or self.client_accepts('application/msgpack'))
@property
def client_accepts_xml(self):
| Request.client_accepts_msgpack only supports 'application/x-msgpack'
The use of the 'x-' prefix is now discouraged for media types. We should update this Request property to also return True for 'application/msgpack', and verify the change with additional tests. | falconry/falcon | diff --git a/tests/test_req_vars.py b/tests/test_req_vars.py
index 9e88754..c71f02e 100644
--- a/tests/test_req_vars.py
+++ b/tests/test_req_vars.py
@@ -348,6 +348,12 @@ class TestReqVars(testing.TestBase):
self.assertFalse(req.client_accepts_json)
self.assertTrue(req.client_accepts_msgpack)
+ headers = {'Accept': 'application/msgpack'}
+ req = Request(testing.create_environ(headers=headers))
+ self.assertFalse(req.client_accepts_xml)
+ self.assertFalse(req.client_accepts_json)
+ self.assertTrue(req.client_accepts_msgpack)
+
headers = {
'Accept': 'application/json,application/xml,application/x-msgpack'
}
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"coverage",
"ddt",
"pyyaml",
"requests",
"testtools",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"tools/test-requires"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
ddt==1.7.2
exceptiongroup==1.2.2
-e git+https://github.com/falconry/falcon.git@37f175f120aaea587c521715ed4815122446a953#egg=falcon
idna==3.10
iniconfig==2.1.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
python-mimeparse==2.0.0
PyYAML==6.0.2
requests==2.32.3
six==1.17.0
testtools==2.7.2
tomli==2.2.1
urllib3==2.3.0
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- ddt==1.7.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- python-mimeparse==2.0.0
- pyyaml==6.0.2
- requests==2.32.3
- six==1.17.0
- testtools==2.7.2
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/falcon
| [
"tests/test_req_vars.py::TestReqVars::test_client_accepts_props"
]
| [
"tests/test_req_vars.py::TestReqVars::test_client_accepts"
]
| [
"tests/test_req_vars.py::TestReqVars::test_attribute_headers",
"tests/test_req_vars.py::TestReqVars::test_bogus_content_length_nan",
"tests/test_req_vars.py::TestReqVars::test_bogus_content_length_neg",
"tests/test_req_vars.py::TestReqVars::test_client_accepts_bogus",
"tests/test_req_vars.py::TestReqVars::test_client_prefers",
"tests/test_req_vars.py::TestReqVars::test_content_length",
"tests/test_req_vars.py::TestReqVars::test_content_length_method",
"tests/test_req_vars.py::TestReqVars::test_content_type_method",
"tests/test_req_vars.py::TestReqVars::test_date_1___Date____date__",
"tests/test_req_vars.py::TestReqVars::test_date_2___If_Modified_since____if_modified_since__",
"tests/test_req_vars.py::TestReqVars::test_date_3___If_Unmodified_since____if_unmodified_since__",
"tests/test_req_vars.py::TestReqVars::test_date_invalid_1___Date____date__",
"tests/test_req_vars.py::TestReqVars::test_date_invalid_2___If_Modified_Since____if_modified_since__",
"tests/test_req_vars.py::TestReqVars::test_date_invalid_3___If_Unmodified_Since____if_unmodified_since__",
"tests/test_req_vars.py::TestReqVars::test_date_missing_1_date",
"tests/test_req_vars.py::TestReqVars::test_date_missing_2_if_modified_since",
"tests/test_req_vars.py::TestReqVars::test_date_missing_3_if_unmodified_since",
"tests/test_req_vars.py::TestReqVars::test_empty",
"tests/test_req_vars.py::TestReqVars::test_empty_path",
"tests/test_req_vars.py::TestReqVars::test_host",
"tests/test_req_vars.py::TestReqVars::test_method",
"tests/test_req_vars.py::TestReqVars::test_missing_attribute_header",
"tests/test_req_vars.py::TestReqVars::test_missing_qs",
"tests/test_req_vars.py::TestReqVars::test_nonlatin_path",
"tests/test_req_vars.py::TestReqVars::test_range",
"tests/test_req_vars.py::TestReqVars::test_range_invalid",
"tests/test_req_vars.py::TestReqVars::test_range_unit",
"tests/test_req_vars.py::TestReqVars::test_reconstruct_url",
"tests/test_req_vars.py::TestReqVars::test_relative_uri",
"tests/test_req_vars.py::TestReqVars::test_subdomain",
"tests/test_req_vars.py::TestReqVars::test_uri",
"tests/test_req_vars.py::TestReqVars::test_uri_http_1_0",
"tests/test_req_vars.py::TestReqVars::test_uri_https"
]
| []
| Apache License 2.0 | 346 | [
"falcon/request.py"
]
| [
"falcon/request.py"
]
|
|
jupyter-incubator__sparkmagic-89 | 5d7c9a29da1f4a3a12fc9cd821807b474625afc1 | 2015-12-22 00:54:05 | 5d7c9a29da1f4a3a12fc9cd821807b474625afc1 | diff --git a/remotespark/sparkkernelbase.py b/remotespark/sparkkernelbase.py
index 3abecfd..bd4c70f 100644
--- a/remotespark/sparkkernelbase.py
+++ b/remotespark/sparkkernelbase.py
@@ -9,6 +9,11 @@ from remotespark.utils.utils import get_connection_string
class SparkKernelBase(IPythonKernel):
+ run_command = "run"
+ config_command = "config"
+ sql_command = "sql"
+ hive_command = "hive"
+
def __init__(self, implementation, implementation_version, language, language_version, language_info,
kernel_conf_name, session_language, client_name, **kwargs):
# Required by Jupyter - Override
@@ -32,7 +37,7 @@ class SparkKernelBase(IPythonKernel):
# Disable warnings for test env in HDI
requests.packages.urllib3.disable_warnings()
- if "testing" not in kwargs.keys():
+ if not kwargs.get("testing", False):
(username, password, url) = self._get_configuration()
self.connection_string = get_connection_string(url, username, password)
self._load_magics_extension()
@@ -41,29 +46,40 @@ class SparkKernelBase(IPythonKernel):
if self._fatal_error is not None:
self._abort_with_fatal_error(self._fatal_error)
- if not self.session_started:
- self._start_session()
-
- # Modify code by prepending spark magic text
- if code.lower().startswith("%sql\n") or code.lower().startswith("%sql "):
- code = "%%spark -c sql\n{}".format(code[5:])
- elif code.lower().startswith("%%sql\n") or code.lower().startswith("%%sql "):
- code = "%%spark -c sql\n{}".format(code[6:])
- elif code.lower().startswith("%hive\n") or code.lower().startswith("%hive "):
- code = "%%spark -c hive\n{}".format(code[6:])
- elif code.lower().startswith("%%hive\n") or code.lower().startswith("%%hive "):
- code = "%%spark -c hive\n{}".format(code[7:])
+ subcommand, flags, code_to_run = self._parse_user_command(code)
+
+ if subcommand == self.run_command:
+ code_to_run = "%%spark\n{}".format(code_to_run)
+ return self._run_starting_session(code_to_run, silent, store_history, user_expressions, allow_stdin)
+ elif subcommand == self.sql_command:
+ code_to_run = "%%spark -c sql\n{}".format(code_to_run)
+ return self._run_starting_session(code_to_run, silent, store_history, user_expressions, allow_stdin)
+ elif subcommand == self.hive_command:
+ code_to_run = "%%spark -c hive\n{}".format(code_to_run)
+ return self._run_starting_session(code_to_run, silent, store_history, user_expressions, allow_stdin)
+ elif subcommand == self.config_command:
+ restart_session = False
+
+ if self.session_started:
+ if "f" not in flags:
+ raise KeyError("A session has already been started. In order to modify the Spark configuration, "
+ "please provide the '-f' flag at the beginning of the config magic:\n\te.g. `%config"
+ " -f {}`\n\nNote that this will kill the current session and will create a new one "
+ "with the configuration provided. All previously run commands in the session will be"
+ " lost.")
+ else:
+ restart_session = True
+
+ code_to_run = "%%spark config {}".format(code_to_run)
+
+ return self._run_restarting_session(code_to_run, silent, store_history, user_expressions, allow_stdin,
+ restart_session)
else:
- code = "%%spark\n{}".format(code)
-
- return self._execute_cell(code, silent, store_history, user_expressions, allow_stdin)
+ raise KeyError("Magic '{}' not supported.".format(subcommand))
def do_shutdown(self, restart):
# Cleanup
- if self.session_started:
- code = "%spark cleanup"
- self._execute_cell_for_user(code, True, False)
- self.session_started = False
+ self._delete_session()
return self._do_shutdown_ipykernel(restart)
@@ -83,6 +99,27 @@ class SparkKernelBase(IPythonKernel):
log_if_error="Failed to create a Livy session.")
self.logger.debug("Added session.")
+ def _delete_session(self):
+ if self.session_started:
+ code = "%spark cleanup"
+ self._execute_cell_for_user(code, True, False)
+ self.session_started = False
+
+ def _run_starting_session(self, code, silent, store_history, user_expressions, allow_stdin):
+ self._start_session()
+ return self._execute_cell(code, silent, store_history, user_expressions, allow_stdin)
+
+ def _run_restarting_session(self, code, silent, store_history, user_expressions, allow_stdin, restart):
+ if restart:
+ self._delete_session()
+
+ res = self._execute_cell(code, silent, store_history, user_expressions, allow_stdin)
+
+ if restart:
+ self._start_session()
+
+ return res
+
def _get_configuration(self):
try:
credentials = getattr(conf, 'kernel_' + self.kernel_conf_name + '_credentials')()
@@ -95,6 +132,35 @@ class SparkKernelBase(IPythonKernel):
self.kernel_conf_name)
self._abort_with_fatal_error(message)
+ def _parse_user_command(self, code):
+ # Normalize 2 signs to 1
+ if code.startswith("%%"):
+ code = code[1:]
+
+ # When no magic, return run command
+ if not code.startswith("%"):
+ code = "%{} {}".format(self.run_command, code)
+
+ # Remove percentage sign
+ code = code[1:]
+
+ split_code = code.split(None, 1)
+ subcommand = split_code[0].lower()
+ flags = []
+ rest = split_code[1]
+
+ # Get all flags
+ flag_split = rest.split(None, 1)
+ while len(flag_split) >= 2 and flag_split[0].startswith("-"):
+ flags.append(flag_split[0][1:].lower())
+ rest = flag_split[1]
+ flag_split = rest.split(None, 1)
+
+ # flags to lower
+ flags = [i.lower() for i in flags]
+
+ return subcommand, flags, rest
+
def _execute_cell(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False,
shutdown_if_error=False, log_if_error=None):
reply_content = self._execute_cell_for_user(code, silent, store_history, user_expressions, allow_stdin)
| Expose session configs through wrapper kernel | jupyter-incubator/sparkmagic | diff --git a/tests/test_sparkkernelbase.py b/tests/test_sparkkernelbase.py
index a3c3ce1..d5f105c 100644
--- a/tests/test_sparkkernelbase.py
+++ b/tests/test_sparkkernelbase.py
@@ -1,5 +1,5 @@
from mock import MagicMock, call
-from nose.tools import with_setup
+from nose.tools import with_setup, raises
from remotespark.sparkkernelbase import SparkKernelBase
import remotespark.utils.configuration as conf
@@ -53,7 +53,7 @@ def test_get_config():
pwd = "p"
url = "url"
- config = { "kernel_python_credentials": {user_ev: usr, pass_ev: pwd, url_ev: url} }
+ config = {"kernel_python_credentials": {user_ev: usr, pass_ev: pwd, url_ev: url}}
conf.override(config)
u, p, r = kernel._get_configuration()
@@ -93,7 +93,55 @@ def test_start_session():
assert kernel.session_started
assert call("%spark add TestKernel python {} skip".format(conn_str), True, False, None, False) \
- in execute_cell_mock.mock_calls
+ in execute_cell_mock.mock_calls
+
+
+@with_setup(_setup(), _teardown())
+def test_delete_session():
+ kernel.session_started = True
+
+ kernel._delete_session()
+
+ assert not kernel.session_started
+ assert call("%spark cleanup", True, False) in execute_cell_mock.mock_calls
+
+
+@with_setup(_setup, _teardown)
+def test_set_config():
+ def _check(prepend, session_started=False, key_error_expected=False):
+ # Set up
+ properties = """{"extra": 2}"""
+ code = prepend + properties
+ kernel.session_started = session_started
+ execute_cell_mock.reset_mock()
+
+ # Call method
+ try:
+ kernel.do_execute(code, False)
+ except KeyError:
+ if not key_error_expected:
+ assert False
+
+ # When exception is expected, nothing to check
+ return
+
+ assert session_started == kernel.session_started
+ assert call("%%spark config {}".format(properties), False, True, None, False) \
+ in execute_cell_mock.mock_calls
+
+ if session_started and not key_error_expected:
+ # This means -f must be present, so check that a restart happened
+ assert call("%spark cleanup", True, False) in execute_cell_mock.mock_calls
+ assert call("%spark add TestKernel python {} skip".format(conn_str), True, False, None, False) \
+ in execute_cell_mock.mock_calls
+
+ _check("%config ")
+ _check("%config\n")
+ _check("%%config ")
+ _check("%%config\n")
+ _check("%config -f ")
+ _check("%config ", True, True)
+ _check("%config -f ", True, False)
@with_setup(_setup, _teardown)
@@ -111,6 +159,17 @@ def test_do_execute_initializes_magics_if_not_run():
assert call("%%spark\n{}".format(code), False, True, None, False) in execute_cell_mock.mock_calls
+@with_setup(_setup, _teardown)
+@raises(KeyError)
+def test_magic_not_supported():
+ # Set up
+ code = "%alex some spark code"
+ kernel.session_started = True
+
+ # Call method
+ kernel.do_execute(code, False)
+
+
@with_setup(_setup, _teardown)
def test_call_spark():
# Set up
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==3.7.1
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
attrs==24.2.0
beautifulsoup4==4.13.3
bleach==6.0.0
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
charset-normalizer==3.4.1
comm==0.1.4
decorator==5.1.1
defusedxml==0.7.1
entrypoints==0.4
exceptiongroup==1.2.2
fastjsonschema==2.21.1
idna==3.10
importlib-metadata==6.7.0
importlib-resources==5.12.0
iniconfig==2.0.0
ipykernel==4.1.1
ipython==4.0.0
ipython-genutils==0.2.0
ipywidgets==7.8.5
Jinja2==3.1.6
jsonschema==4.17.3
jupyter-server==1.24.0
jupyter_client==7.4.9
jupyter_core==4.12.0
jupyterlab-pygments==0.2.2
jupyterlab_widgets==1.1.11
MarkupSafe==2.1.5
mistune==3.0.2
mock==5.2.0
nbclassic==1.2.0
nbclient==0.7.4
nbconvert==7.6.0
nbformat==5.8.0
nest-asyncio==1.6.0
nose==1.3.7
notebook==6.5.7
notebook_shim==0.2.4
numpy==1.21.6
packaging==24.0
pandas==1.3.5
pandocfilters==1.5.1
pexpect==4.9.0
pickleshare==0.7.5
pkgutil_resolve_name==1.3.10
plotly==5.18.0
pluggy==1.2.0
prometheus-client==0.17.1
ptyprocess==0.7.0
pycparser==2.21
Pygments==2.17.2
pyrsistent==0.19.3
pytest==7.4.4
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==26.2.1
-e git+https://github.com/jupyter-incubator/sparkmagic.git@5d7c9a29da1f4a3a12fc9cd821807b474625afc1#egg=remotespark
requests==2.31.0
Send2Trash==1.8.3
simplegeneric==0.8.1
six==1.17.0
sniffio==1.3.1
soupsieve==2.4.1
tenacity==8.2.3
terminado==0.17.1
tinycss2==1.2.1
tomli==2.0.1
tornado==6.2
traitlets==5.9.0
typing_extensions==4.7.1
urllib3==2.0.7
webencodings==0.5.1
websocket-client==1.6.1
widgetsnbextension==3.6.10
zipp==3.15.0
| name: sparkmagic
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==3.7.1
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- attrs==24.2.0
- beautifulsoup4==4.13.3
- bleach==6.0.0
- cffi==1.15.1
- charset-normalizer==3.4.1
- comm==0.1.4
- decorator==5.1.1
- defusedxml==0.7.1
- entrypoints==0.4
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- idna==3.10
- importlib-metadata==6.7.0
- importlib-resources==5.12.0
- iniconfig==2.0.0
- ipykernel==4.1.1
- ipython==4.0.0
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jinja2==3.1.6
- jsonschema==4.17.3
- jupyter-client==7.4.9
- jupyter-core==4.12.0
- jupyter-server==1.24.0
- jupyterlab-pygments==0.2.2
- jupyterlab-widgets==1.1.11
- markupsafe==2.1.5
- mistune==3.0.2
- mock==5.2.0
- nbclassic==1.2.0
- nbclient==0.7.4
- nbconvert==7.6.0
- nbformat==5.8.0
- nest-asyncio==1.6.0
- nose==1.3.7
- notebook==6.5.7
- notebook-shim==0.2.4
- numpy==1.21.6
- packaging==24.0
- pandas==1.3.5
- pandocfilters==1.5.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pkgutil-resolve-name==1.3.10
- plotly==5.18.0
- pluggy==1.2.0
- prometheus-client==0.17.1
- ptyprocess==0.7.0
- pycparser==2.21
- pygments==2.17.2
- pyrsistent==0.19.3
- pytest==7.4.4
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==26.2.1
- requests==2.31.0
- send2trash==1.8.3
- simplegeneric==0.8.1
- six==1.17.0
- sniffio==1.3.1
- soupsieve==2.4.1
- tenacity==8.2.3
- terminado==0.17.1
- tinycss2==1.2.1
- tomli==2.0.1
- tornado==6.2
- traitlets==5.9.0
- typing-extensions==4.7.1
- urllib3==2.0.7
- webencodings==0.5.1
- websocket-client==1.6.1
- widgetsnbextension==3.6.10
- zipp==3.15.0
prefix: /opt/conda/envs/sparkmagic
| [
"tests/test_sparkkernelbase.py::test_delete_session",
"tests/test_sparkkernelbase.py::test_set_config",
"tests/test_sparkkernelbase.py::test_magic_not_supported"
]
| []
| [
"tests/test_sparkkernelbase.py::test_get_config",
"tests/test_sparkkernelbase.py::test_get_config_not_set",
"tests/test_sparkkernelbase.py::test_initialize_magics",
"tests/test_sparkkernelbase.py::test_start_session",
"tests/test_sparkkernelbase.py::test_do_execute_initializes_magics_if_not_run",
"tests/test_sparkkernelbase.py::test_call_spark",
"tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happened",
"tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happens_for_execution",
"tests/test_sparkkernelbase.py::test_call_spark_sql_new_line",
"tests/test_sparkkernelbase.py::test_call_spark_hive_new_line",
"tests/test_sparkkernelbase.py::test_shutdown_cleans_up"
]
| []
| Modified BSD License | 347 | [
"remotespark/sparkkernelbase.py"
]
| [
"remotespark/sparkkernelbase.py"
]
|
|
box__box-python-sdk-99 | f365c177f70ce6cfc2d53528ed649cdac20bb43d | 2015-12-22 19:59:14 | f365c177f70ce6cfc2d53528ed649cdac20bb43d | diff --git a/HISTORY.rst b/HISTORY.rst
index b567bfa..9af5bb4 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -6,7 +6,17 @@ Release History
Upcoming
++++++++
-1.3.2
+1.3.3 (2015-12-22)
+++++++++++++++++++
+
+- Added a new class, ``DeveloperTokenClient`` that makes it easy to get started using the SDK with a Box developer
+ token. It uses another new class, ``DeveloperTokenAuth`` for auth.
+
+**Bugfixes**
+
+- Added limit, offset, and filter_term parameters to ``client.users()`` to match up with the Box API.
+
+1.3.2 (2015-11-16)
++++++++++++++++++
- Fix ``boxsdk.util.log.setup_logging()`` on Python 3.
diff --git a/boxsdk/client.py b/boxsdk/client.py
index 8a04d76..f84d9a3 100644
--- a/boxsdk/client.py
+++ b/boxsdk/client.py
@@ -100,21 +100,48 @@ def group(self, group_id):
"""
return Group(session=self._session, object_id=group_id)
- def users(self):
+ def users(self, limit=None, offset=0, filter_term=None):
"""
Get a list of all users for the Enterprise along with their user_id, public_name, and login.
+ :param limit:
+ The maximum number of users to return. If not specified, the Box API will determine an appropriate limit.
+ :type limit:
+ `int` or None
+ :param offset:
+ The user index at which to start the response.
+ :type offset:
+ `int`
+ :param filter_term:
+ Filters the results to only users starting with the filter_term in either the name or the login.
+ :type filter_term:
+ `unicode` or None
:return:
The list of all users in the enterprise.
:rtype:
`list` of :class:`User`
"""
url = '{0}/users'.format(API.BASE_API_URL)
- box_response = self._session.get(url)
+ params = dict(offset=offset)
+ if limit is not None:
+ params['limit'] = limit
+ if filter_term is not None:
+ params['filter_term'] = filter_term
+ box_response = self._session.get(url, params=params)
response = box_response.json()
return [User(self._session, item['id'], item) for item in response['entries']]
- def search(self, query, limit, offset, ancestor_folders=None, file_extensions=None, metadata_filters=None, result_type=None, content_types=None):
+ def search(
+ self,
+ query,
+ limit,
+ offset,
+ ancestor_folders=None,
+ file_extensions=None,
+ metadata_filters=None,
+ result_type=None,
+ content_types=None
+ ):
"""
Search Box for items matching the given query.
@@ -155,14 +182,16 @@ def search(self, query, limit, offset, ancestor_folders=None, file_extensions=No
:rtype:
`list` of :class:`Item`
"""
- return Search(self._session).search(query=query,
- limit=limit,
- offset=offset,
- ancestor_folders=ancestor_folders,
- file_extensions=file_extensions,
- metadata_filters=metadata_filters,
- result_type=result_type,
- content_types=content_types)
+ return Search(self._session).search(
+ query=query,
+ limit=limit,
+ offset=offset,
+ ancestor_folders=ancestor_folders,
+ file_extensions=file_extensions,
+ metadata_filters=metadata_filters,
+ result_type=result_type,
+ content_types=content_types,
+ )
def events(self):
"""
@@ -333,5 +362,8 @@ def with_shared_link(self, shared_link, shared_link_password):
class DeveloperTokenClient(Client):
+ """
+ Box client subclass which authorizes with a developer token.
+ """
def __init__(self, oauth=None, network_layer=None, session=None):
super(DeveloperTokenClient, self).__init__(oauth or DeveloperTokenAuth(), network_layer, session)
diff --git a/setup.py b/setup.py
index 2d16b34..59885f9 100644
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@ def main():
install_requires.append('ordereddict>=1.1')
setup(
name='boxsdk',
- version='1.3.2',
+ version='1.3.3',
description='Official Box Python SDK',
long_description=open(join(base_dir, 'README.rst')).read(),
author='Box',
@@ -69,7 +69,7 @@ def main():
url='http://opensource.box.com',
packages=find_packages(exclude=['demo', 'docs', 'test']),
install_requires=install_requires,
- extras_require={'jwt': jwt_requires, 'redis': redis_requires},
+ extras_require={'jwt': jwt_requires, 'redis': redis_requires, 'all': jwt_requires + redis_requires},
tests_require=['pytest', 'pytest-xdist', 'mock', 'sqlalchemy', 'bottle', 'jsonpatch'],
cmdclass={'test': PyTest},
classifiers=CLASSIFIERS,
| The get all users in enterprise API needs to be pageable
Right now it just return a list of :class:`User` | box/box-python-sdk | diff --git a/test/unit/test_client.py b/test/unit/test_client.py
index ef6ba70..81f2341 100644
--- a/test/unit/test_client.py
+++ b/test/unit/test_client.py
@@ -159,10 +159,40 @@ def test_factory_returns_the_correct_object(mock_client, test_class, factory_met
assert obj.object_id == fake_id
-def test_users_return_the_correct_user_objects(mock_client, mock_box_session, users_response, user_id_1, user_id_2):
[email protected](scope='module', params=(None, 'user1'))
+def users_filter_term(request):
+ return request.param
+
+
[email protected](scope='module', params=(0, 10))
+def users_offset(request):
+ return request.param
+
+
[email protected](scope='module', params=(0, 10))
+def users_limit(request):
+ return request.param
+
+
+def test_users_return_the_correct_user_objects(
+ mock_client,
+ mock_box_session,
+ users_response,
+ user_id_1,
+ user_id_2,
+ users_filter_term,
+ users_offset,
+ users_limit,
+):
# pylint:disable=redefined-outer-name
mock_box_session.get.return_value = users_response
- users = mock_client.users()
+ users = mock_client.users(users_limit, users_offset, users_filter_term)
+ expected_params = {'offset': users_offset}
+ if users_limit is not None:
+ expected_params['limit'] = users_limit
+ if users_filter_term is not None:
+ expected_params['filter_term'] = users_filter_term
+ mock_box_session.get.assert_called_once_with('{0}/users'.format(API.BASE_API_URL), params=expected_params)
assert users[0].object_id == user_id_1
assert users[1].object_id == user_id_2
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt",
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.11.7
async-timeout==4.0.2
attrs==22.2.0
Babel==2.11.0
bottle==0.13.2
-e git+https://github.com/box/box-python-sdk.git@f365c177f70ce6cfc2d53528ed649cdac20bb43d#egg=boxsdk
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
coverage==6.2
cryptography==40.0.2
dill==0.3.4
distlib==0.3.9
docutils==0.18.1
execnet==1.9.0
filelock==3.4.1
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
isort==5.10.1
Jinja2==3.0.3
jsonpatch==1.32
jsonpointer==2.3
lazy-object-proxy==1.7.1
MarkupSafe==2.0.1
mccabe==0.7.0
mock==1.0.1
packaging==21.3
pep8==1.7.1
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pycparser==2.21
Pygments==2.14.0
PyJWT==2.4.0
pylint==2.13.9
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==2.5.1
pytest-mock==3.6.1
pytest-xdist==1.17.1
pytz==2025.2
redis==4.3.6
requests==2.27.1
requests-toolbelt==1.0.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
SQLAlchemy==1.4.54
swebench-matterhorn @ file:///swebench_matterhorn
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typed-ast==1.5.5
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
wrapt==1.16.0
zipp==3.6.0
| name: box-python-sdk
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.11.7
- async-timeout==4.0.2
- attrs==22.2.0
- babel==2.11.0
- bottle==0.13.2
- cffi==1.15.1
- charset-normalizer==2.0.12
- coverage==6.2
- cryptography==40.0.2
- dill==0.3.4
- distlib==0.3.9
- docutils==0.18.1
- execnet==1.9.0
- filelock==3.4.1
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- isort==5.10.1
- jinja2==3.0.3
- jsonpatch==1.32
- jsonpointer==2.3
- lazy-object-proxy==1.7.1
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==1.0.1
- packaging==21.3
- pep8==1.7.1
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycparser==2.21
- pygments==2.14.0
- pyjwt==2.4.0
- pylint==2.13.9
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==2.5.1
- pytest-mock==3.6.1
- pytest-xdist==1.17.1
- pytz==2025.2
- redis==4.3.6
- requests==2.27.1
- requests-toolbelt==1.0.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sqlalchemy==1.4.54
- swebench-matterhorn==0.0.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typed-ast==1.5.5
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/box-python-sdk
| [
"test/unit/test_client.py::test_users_return_the_correct_user_objects[None-0-0]",
"test/unit/test_client.py::test_users_return_the_correct_user_objects[None-10-0]",
"test/unit/test_client.py::test_users_return_the_correct_user_objects[None-10-10]",
"test/unit/test_client.py::test_users_return_the_correct_user_objects[user1-10-10]",
"test/unit/test_client.py::test_users_return_the_correct_user_objects[user1-0-10]",
"test/unit/test_client.py::test_users_return_the_correct_user_objects[user1-10-0]",
"test/unit/test_client.py::test_users_return_the_correct_user_objects[user1-0-0]",
"test/unit/test_client.py::test_users_return_the_correct_user_objects[None-0-10]"
]
| []
| [
"test/unit/test_client.py::test_factory_returns_the_correct_object[Folder-folder]",
"test/unit/test_client.py::test_factory_returns_the_correct_object[File-file]",
"test/unit/test_client.py::test_factory_returns_the_correct_object[User-user]",
"test/unit/test_client.py::test_factory_returns_the_correct_object[Group-group]",
"test/unit/test_client.py::test_factory_returns_the_correct_object[GroupMembership-group_membership]",
"test/unit/test_client.py::test_search_instantiates_search_and_calls_search",
"test/unit/test_client.py::test_events_returns_event_object",
"test/unit/test_client.py::test_groups_return_the_correct_group_objects",
"test/unit/test_client.py::test_create_group_returns_the_correct_group_object",
"test/unit/test_client.py::test_get_shared_item_returns_the_correct_item[file-None]",
"test/unit/test_client.py::test_get_shared_item_returns_the_correct_item[file-p4ssw0rd]",
"test/unit/test_client.py::test_get_shared_item_returns_the_correct_item[folder-None]",
"test/unit/test_client.py::test_get_shared_item_returns_the_correct_item[folder-p4ssw0rd]",
"test/unit/test_client.py::test_make_request_passes_request_on_to_session[get]",
"test/unit/test_client.py::test_make_request_passes_request_on_to_session[post]",
"test/unit/test_client.py::test_make_request_passes_request_on_to_session[put]",
"test/unit/test_client.py::test_make_request_passes_request_on_to_session[delete]",
"test/unit/test_client.py::test_make_request_passes_request_on_to_session[options]",
"test/unit/test_client.py::test_create_app_user_returns_the_correct_user_object",
"test/unit/test_client.py::test_create_enterprise_user_returns_the_correct_user_object"
]
| []
| Apache License 2.0 | 348 | [
"HISTORY.rst",
"boxsdk/client.py",
"setup.py"
]
| [
"HISTORY.rst",
"boxsdk/client.py",
"setup.py"
]
|
|
jupyter-incubator__sparkmagic-93 | 7025cb5b607abde80d5dd8a701c40c3598801e9d | 2015-12-23 01:03:52 | 7025cb5b607abde80d5dd8a701c40c3598801e9d | diff --git a/remotespark/livyclientlib/livyclientfactory.py b/remotespark/livyclientlib/livyclientfactory.py
index f7b2ae3..3bdda7f 100644
--- a/remotespark/livyclientlib/livyclientfactory.py
+++ b/remotespark/livyclientlib/livyclientfactory.py
@@ -30,11 +30,15 @@ class LivyClientFactory(object):
@staticmethod
def create_session(connection_string, properties, session_id="-1", sql_created=False):
- cso = get_connection_string_elements(connection_string)
-
- retry_policy = LinearRetryPolicy(seconds_to_sleep=5, max_retries=5)
- http_client = LivyReliableHttpClient(cso.url, cso.username, cso.password, retry_policy)
+ http_client = LivyClientFactory.create_http_client(connection_string)
session = LivySession(http_client, session_id, sql_created, properties)
return session
+
+ @staticmethod
+ def create_http_client(connection_string):
+ cso = get_connection_string_elements(connection_string)
+
+ retry_policy = LinearRetryPolicy(seconds_to_sleep=5, max_retries=5)
+ return LivyReliableHttpClient(cso.url, cso.username, cso.password, retry_policy)
diff --git a/remotespark/livyclientlib/reliablehttpclient.py b/remotespark/livyclientlib/reliablehttpclient.py
index bcc14d5..c146dae 100644
--- a/remotespark/livyclientlib/reliablehttpclient.py
+++ b/remotespark/livyclientlib/reliablehttpclient.py
@@ -7,7 +7,6 @@ from time import sleep
import requests
import remotespark.utils.configuration as conf
-from remotespark.utils.constants import Constants
from remotespark.utils.log import Log
from remotespark.utils.utils import get_connection_string
diff --git a/remotespark/livyclientlib/sparkcontroller.py b/remotespark/livyclientlib/sparkcontroller.py
index da60fb7..1254f48 100644
--- a/remotespark/livyclientlib/sparkcontroller.py
+++ b/remotespark/livyclientlib/sparkcontroller.py
@@ -20,24 +20,49 @@ class SparkController(object):
else:
self.client_manager = ClientManager()
- def run_cell(self, cell, client_name = None):
+ def run_cell(self, cell, client_name=None):
client_to_use = self.get_client_by_name_or_default(client_name)
return client_to_use.execute(cell)
- def run_cell_sql(self, cell, client_name = None):
+ def run_cell_sql(self, cell, client_name=None):
client_to_use = self.get_client_by_name_or_default(client_name)
return client_to_use.execute_sql(cell)
- def run_cell_hive(self, cell, client_name = None):
+ def run_cell_hive(self, cell, client_name=None):
client_to_use = self.get_client_by_name_or_default(client_name)
return client_to_use.execute_hive(cell)
+ def get_all_sessions_endpoint(self, connection_string):
+ http_client = self.client_factory.create_http_client(connection_string)
+ r = http_client.get("/sessions", [200])
+ sessions = r.json()["sessions"]
+ session_list = [self.client_factory.create_session(connection_string, {"kind": s["kind"]}, s["id"])
+ for s in sessions]
+ for s in session_list:
+ s.refresh_status()
+ return session_list
+
+ def get_all_sessions_endpoint_info(self, connection_string):
+ sessions = self.get_all_sessions_endpoint(connection_string)
+ return [str(s) for s in sessions]
+
def cleanup(self):
self.client_manager.clean_up_all()
+ def cleanup_endpoint(self, connection_string):
+ for session in self.get_all_sessions_endpoint(connection_string):
+ session.delete()
+
def delete_session_by_name(self, name):
self.client_manager.delete_client(name)
+ def delete_session_by_id(self, connection_string, session_id):
+ http_client = self.client_factory.create_http_client(connection_string)
+ r = http_client.get("/sessions/{}".format(session_id), [200, 404])
+ if r.status_code != 404:
+ session = self.client_factory.create_session(connection_string, {"kind": r.json()["kind"]}, session_id, False)
+ session.delete()
+
def add_session(self, name, connection_string, skip_if_exists, properties):
if skip_if_exists and (name in self.client_manager.get_sessions_list()):
self.logger.debug("Skipping {} because it already exists in list of sessions.".format(name))
@@ -60,4 +85,3 @@ class SparkController(object):
else:
client_name = client_name.lower()
return self.client_manager.get_client(client_name)
-
diff --git a/remotespark/remotesparkmagics.py b/remotespark/remotesparkmagics.py
index d1f40b3..705954f 100644
--- a/remotespark/remotesparkmagics.py
+++ b/remotespark/remotesparkmagics.py
@@ -43,6 +43,8 @@ class RemoteSparkMagics(Magics):
except KeyError:
self.logger.error("Could not read env vars for serialization.")
+ self.properties = conf.session_configs()
+
self.logger.debug("Initialized spark magics.")
@magic_arguments()
@@ -107,12 +109,20 @@ class RemoteSparkMagics(Magics):
# info
if subcommand == "info":
- self._print_info()
+ if len(args.command) == 2:
+ connection_string = args.command[1]
+ info_sessions = self.spark_controller.get_all_sessions_endpoint_info(connection_string)
+ self._print_endpoint_info(info_sessions)
+ elif len(args.command) == 1:
+ self._print_local_info()
+ else:
+ raise ValueError("Subcommand 'info' requires no value or a connection string to show all sessions. "
+ "{}".format(usage))
# config
elif subcommand == "config":
# Would normally do " ".join(args.command[1:]) but parse_argstring removes quotes...
rest_of_line = user_input[7:]
- conf.override(conf.session_configs.__name__, json.loads(rest_of_line))
+ self.properties = json.loads(rest_of_line)
# add
elif subcommand == "add":
if len(args.command) != 4 and len(args.command) != 5:
@@ -127,19 +137,32 @@ class RemoteSparkMagics(Magics):
else:
skip = False
- properties = copy.deepcopy(conf.session_configs())
+ properties = copy.deepcopy(self.properties)
properties["kind"] = self._get_livy_kind(language)
self.spark_controller.add_session(name, connection_string, skip, properties)
# delete
elif subcommand == "delete":
- if len(args.command) != 2:
- raise ValueError("Subcommand 'delete' requires an argument. {}".format(usage))
- name = args.command[1].lower()
- self.spark_controller.delete_session_by_name(name)
+ if len(args.command) == 2:
+ name = args.command[1].lower()
+ self.spark_controller.delete_session_by_name(name)
+ elif len(args.command) == 3:
+ connection_string = args.command[1]
+ session_id = args.command[2]
+ self.spark_controller.delete_session_by_id(connection_string, session_id)
+ else:
+ raise ValueError("Subcommand 'delete' requires a session name, or a connection string and id. {}"
+ .format(usage))
# cleanup
elif subcommand == "cleanup":
- self.spark_controller.cleanup()
+ if len(args.command) == 2:
+ connection_string = args.command[1]
+ self.spark_controller.cleanup_endpoint(connection_string)
+ elif len(args.command) == 1:
+ self.spark_controller.cleanup()
+ else:
+ raise ValueError("Subcommand 'cleanup' requires no value or a connection string to clean up sessions. "
+ "{}".format(usage))
# run
elif len(subcommand) == 0:
if args.context == Constants.context_name_spark:
@@ -170,15 +193,21 @@ class RemoteSparkMagics(Magics):
self.shell.write_err(e.out)
return None
- def _print_info(self):
- sessions_info = ["\t\t{}".format(i) for i in self.spark_controller.get_manager_sessions_str()]
+ def _print_local_info(self):
+ sessions_info = [" {}".format(i) for i in self.spark_controller.get_manager_sessions_str()]
print("""Info for running Spark:
Sessions:
{}
Session configs:
{}
-""".format("\n".join(sessions_info), conf.session_configs()))
+""".format("\n".join(sessions_info), self.properties))
+ def _print_endpoint_info(self, info_sessions):
+ sessions_info = [" {}".format(i) for i in info_sessions]
+ print("""Info for endpoint:
+ Sessions:
+{}
+""".format("\n".join(sessions_info)))
@staticmethod
def _get_livy_kind(language):
diff --git a/remotespark/utils/configuration.py b/remotespark/utils/configuration.py
index 5668901..df09e52 100644
--- a/remotespark/utils/configuration.py
+++ b/remotespark/utils/configuration.py
@@ -36,23 +36,16 @@ def load(fsrw_class = None):
overrides = {}
else:
overrides = json.loads(line)
- override_all(overrides)
+ override(overrides)
-def override_all(obj):
+def override(obj):
"""Given a dictionary representing the overrided defaults for this
configuration, initialize the global configuration."""
global _overrides
_overrides = obj
-def override(config, value):
- """Given a string representing a configuration and a value for that configuration,
- override the configuration. Initialize the overrided configuration beforehand."""
- initialize()
- _overrides[config] = value
-
-
def _override(f):
"""A decorator which first initializes the overrided configurations,
then checks the global overrided defaults for the given configuration,
| Manage livy endpoint from magics
This will be the API:
* `%spark add session_name language conn_string`
will create a session against the endpoint specified
* `%spark info`
will display the info for the sessions created in that notebook
* `%spark config <configuration_overrides>`
will add session configs for subsequent sessions
* `%spark info conn_string`
will list the sessions for a given livy endpoint by providing `session_id, language, state`
* `%spark delete session_name`
will delete a session by its name from the notebook that created it
* `%spark delete conn_string session_id`
will delete a session for a given endpoint by its id
* `%spark cleanup`
will delete all sessions created by the notebook
* `%spark cleanup conn_string`
will delete all session for the given livy endpoint
This covers #56, #75, and #76 for magics in Python kernel.
We are not designing the API for the wrapper kernels here and we'll tackle that as a separate improvement.
ping @msftristew @ellisonbg to take a look when they can | jupyter-incubator/sparkmagic | diff --git a/tests/test_clientmanager.py b/tests/test_clientmanager.py
index 400fadd..e088b35 100644
--- a/tests/test_clientmanager.py
+++ b/tests/test_clientmanager.py
@@ -30,8 +30,8 @@ def test_deserialize_on_creation():
def test_serialize_periodically():
- conf.override_all({conf.serialize_period_seconds.__name__: 0.1,
- conf.serialize_periodically.__name__: True})
+ conf.override({conf.serialize_period_seconds.__name__: 0.1,
+ conf.serialize_periodically.__name__: True})
serializer = MagicMock()
ClientManager(serializer)
diff --git a/tests/test_configuration.py b/tests/test_configuration.py
index 8cdac08..92fd020 100644
--- a/tests/test_configuration.py
+++ b/tests/test_configuration.py
@@ -27,7 +27,7 @@ def test_configuration_initialize():
@with_setup(_setup)
def test_configuration_initialize_lazy():
"""Tests that the initialize function has no behavior if the override dict is already initialized"""
- conf.override_all({})
+ conf.override({})
fsrw_class = MagicMock(side_effect=ValueError)
conf.initialize(fsrw_class)
@@ -56,7 +56,7 @@ def test_configuration_load_not_lazy():
read_lines = MagicMock(return_value=[json.dumps(config)])
fsrw.read_lines = read_lines
fsrw_class = MagicMock(return_value=fsrw)
- conf.override_all({conf.default_chart_type.__name__: "bar"})
+ conf.override({ conf.default_chart_type.__name__: "bar" })
conf.load(fsrw_class)
assert conf._overrides is not None
assert_equals(conf._overrides, config)
@@ -65,21 +65,9 @@ def test_configuration_load_not_lazy():
@with_setup(_setup)
def test_configuration_override():
- kpc = { 'username': 'U', 'password': 'P', 'url': 'L' }
- overrides = { conf.kernel_python_credentials.__name__: kpc }
- conf.override_all(overrides)
- conf.override(conf.execute_timeout_seconds.__name__, 1)
- assert_equals(conf._overrides, { conf.kernel_python_credentials.__name__: kpc,
- conf.execute_timeout_seconds.__name__: 1 })
- assert_equals(conf.execute_timeout_seconds(), 1)
- assert_equals(conf.kernel_python_credentials(), kpc)
-
-
-@with_setup(_setup)
-def test_configuration_override_all():
z = 1500
config = { conf.status_sleep_seconds.__name__: z }
- conf.override_all(config)
+ conf.override(config)
assert_equals(conf._overrides, config)
assert_equals(conf.status_sleep_seconds(), z)
@@ -88,7 +76,7 @@ def test_configuration_override_all():
def test_configuration_decorator():
def test_f():
return 0
- conf.override_all({test_f.__name__: -1})
+ conf.override({test_f.__name__: -1})
test_f_decorated = conf._override(test_f)
assert_not_equals(test_f_decorated(), test_f())
assert_equals(test_f_decorated(), -1)
\ No newline at end of file
diff --git a/tests/test_livysession.py b/tests/test_livysession.py
index 1706786..d406532 100644
--- a/tests/test_livysession.py
+++ b/tests/test_livysession.py
@@ -19,6 +19,7 @@ class DummyResponse:
def json(self):
return json.loads(self._json_text)
+ @property
def status_code(self):
return self._status_code
@@ -59,7 +60,7 @@ class TestLivySession:
@raises(AssertionError)
def test_constructor_throws_status_sleep_seconds(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 60
@@ -69,7 +70,7 @@ class TestLivySession:
@raises(AssertionError)
def test_constructor_throws_statement_sleep_seconds(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 3,
"statement_sleep_seconds": 0,
"create_sql_context_timeout_seconds": 60
@@ -79,7 +80,7 @@ class TestLivySession:
@raises(AssertionError)
def test_constructor_throws_sql_create_timeout_seconds(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 4,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 0
@@ -89,7 +90,7 @@ class TestLivySession:
@raises(ValueError)
def test_constructor_throws_invalid_session_sql_combo(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 2,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 60
@@ -98,7 +99,7 @@ class TestLivySession:
conf.load()
def test_constructor_starts_with_existing_session(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 4,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 60
@@ -111,7 +112,7 @@ class TestLivySession:
assert session.started_sql_context
def test_constructor_starts_with_no_session(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 4,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 60
@@ -123,7 +124,7 @@ class TestLivySession:
assert not session.started_sql_context
def test_is_final_status(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -141,7 +142,7 @@ class TestLivySession:
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -160,7 +161,7 @@ class TestLivySession:
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -179,7 +180,7 @@ class TestLivySession:
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -196,7 +197,7 @@ class TestLivySession:
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
http_client.get.return_value = DummyResponse(200, self.ready_sessions_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -217,12 +218,12 @@ class TestLivySession:
DummyResponse(200, self.ready_sessions_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
session = self._create_session(http_client=http_client)
- conf.override_all({})
+ conf.override({})
session.start()
@@ -240,7 +241,7 @@ class TestLivySession:
DummyResponse(200, self.error_sessions_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.011,
"statement_sleep_seconds": 6000
})
@@ -260,7 +261,7 @@ class TestLivySession:
DummyResponse(200, self.ready_sessions_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.011,
"statement_sleep_seconds": 6000
})
@@ -274,7 +275,7 @@ class TestLivySession:
def test_delete_session_when_active(self):
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -290,7 +291,7 @@ class TestLivySession:
def test_delete_session_when_not_started(self):
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -306,7 +307,7 @@ class TestLivySession:
def test_delete_session_when_dead_throws(self):
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -325,7 +326,7 @@ class TestLivySession:
self.get_responses = [DummyResponse(200, self.running_statement_json),
DummyResponse(200, self.ready_statement_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -355,7 +356,7 @@ class TestLivySession:
DummyResponse(200, self.ready_sessions_json),
DummyResponse(200, self.ready_statement_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -391,7 +392,7 @@ class TestLivySession:
DummyResponse(200, self.ready_sessions_json),
DummyResponse(200, self.ready_statement_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -420,7 +421,7 @@ class TestLivySession:
DummyResponse(200, self.ready_sessions_json),
DummyResponse(200, self.ready_statement_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -448,7 +449,7 @@ class TestLivySession:
DummyResponse(200, self.running_statement_json),
DummyResponse(200, self.ready_statement_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -466,7 +467,7 @@ class TestLivySession:
http_client = MagicMock()
http_client.connection_string = connection_string
kind = Constants.session_kind_spark
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
diff --git a/tests/test_remotesparkmagics.py b/tests/test_remotesparkmagics.py
index eaffb2e..e8f2d8f 100644
--- a/tests/test_remotesparkmagics.py
+++ b/tests/test_remotesparkmagics.py
@@ -14,7 +14,7 @@ shell = None
def _setup():
global magic, spark_controller, shell
- conf.override_all({})
+ conf.override({})
shell = MagicMock()
magic = RemoteSparkMagics(shell=None)
@@ -31,7 +31,7 @@ def _teardown():
@with_setup(_setup, _teardown)
def test_info_command_parses():
print_info_mock = MagicMock()
- magic._print_info = print_info_mock
+ magic._print_local_info = print_info_mock
command = "info"
magic.spark(command)
@@ -39,6 +39,18 @@ def test_info_command_parses():
print_info_mock.assert_called_once_with()
+@with_setup(_setup, _teardown)
+def test_info_endpoint_command_parses():
+ print_info_mock = MagicMock()
+ magic._print_endpoint_info = print_info_mock
+ command = "info conn_str"
+ spark_controller.get_all_sessions_endpoint_info = MagicMock(return_value=None)
+
+ magic.spark(command)
+
+ print_info_mock.assert_called_once_with(None)
+
+
@with_setup(_setup, _teardown)
def test_add_sessions_command_parses():
# Do not skip and python
@@ -70,9 +82,8 @@ def test_add_sessions_command_parses():
@with_setup(_setup, _teardown)
def test_add_sessions_command_extra_properties():
- conf.override_all({})
magic.spark("config {\"extra\": \"yes\"}")
- assert conf.session_configs() == {"extra": "yes"}
+ assert magic.properties == {"extra": "yes"}
add_sessions_mock = MagicMock()
spark_controller.add_session = add_sessions_mock
@@ -85,7 +96,6 @@ def test_add_sessions_command_extra_properties():
magic.spark(line)
add_sessions_mock.assert_called_once_with(name, connection_string, False, {"kind": "spark", "extra": "yes"})
- conf.load()
@with_setup(_setup, _teardown)
@@ -101,6 +111,17 @@ def test_delete_sessions_command_parses():
mock_method.assert_called_once_with(name)
+@with_setup(_setup, _teardown)
+def test_delete_sessions_command_parses():
+ mock_method = MagicMock()
+ spark_controller.delete_session_by_id = mock_method
+ line = "delete conn_str 7"
+
+ magic.spark(line)
+
+ mock_method.assert_called_once_with("conn_str", "7")
+
+
@with_setup(_setup, _teardown)
def test_cleanup_command_parses():
mock_method = MagicMock()
@@ -112,6 +133,17 @@ def test_cleanup_command_parses():
mock_method.assert_called_once_with()
+@with_setup(_setup, _teardown)
+def test_cleanup_endpoint_command_parses():
+ mock_method = MagicMock()
+ spark_controller.cleanup_endpoint = mock_method
+ line = "cleanup conn_str"
+
+ magic.spark(line)
+
+ mock_method.assert_called_once_with("conn_str")
+
+
@raises(ValueError)
@with_setup(_setup, _teardown)
def test_bad_command_throws_exception():
diff --git a/tests/test_sparkcontroller.py b/tests/test_sparkcontroller.py
index 4b825d0..e522404 100644
--- a/tests/test_sparkcontroller.py
+++ b/tests/test_sparkcontroller.py
@@ -1,5 +1,6 @@
from mock import MagicMock
from nose.tools import with_setup
+import json
from remotespark.livyclientlib.sparkcontroller import SparkController
@@ -8,6 +9,19 @@ client_factory = None
controller = None
+class DummyResponse:
+ def __init__(self, status_code, json_text):
+ self._status_code = status_code
+ self._json_text = json_text
+
+ def json(self):
+ return json.loads(self._json_text)
+
+ @property
+ def status_code(self):
+ return self._status_code
+
+
def _setup():
global client_manager, client_factory, controller
@@ -78,7 +92,7 @@ def test_cleanup():
def test_run_cell():
default_client = MagicMock()
chosen_client = MagicMock()
- default_client.execute = chosen_client.execute = MagicMock(return_value=(True,""))
+ default_client.execute = chosen_client.execute = MagicMock(return_value=(True, ""))
client_manager.get_any_client = MagicMock(return_value=default_client)
client_manager.get_client = MagicMock(return_value=chosen_client)
name = "session_name"
@@ -102,7 +116,63 @@ def test_run_cell():
controller.run_cell_hive(cell, None)
default_client.execute_hive.assert_called_with(cell)
+
@with_setup(_setup, _teardown)
def test_get_client_keys():
controller.get_client_keys()
client_manager.get_sessions_list.assert_called_once_with()
+
+
+@with_setup(_setup, _teardown)
+def test_get_all_sessions():
+ http_client = MagicMock()
+ http_client.get.return_value = DummyResponse(200, '{"from":0,"total":2,"sessions":[{"id":0,"state":"idle","kind":'
+ '"spark","log":[""]}, {"id":1,"state":"busy","kind":"spark","log"'
+ ':[""]}]}')
+ client_factory.create_http_client.return_value = http_client
+
+ sessions = controller.get_all_sessions_endpoint("conn_str")
+
+ assert len(sessions) == 2
+
+
+@with_setup(_setup, _teardown)
+def test_cleanup_endpoint():
+ s0 = MagicMock()
+ s1 = MagicMock()
+ controller.get_all_sessions_endpoint = MagicMock(return_value=[s0, s1])
+
+ controller.cleanup_endpoint("conn_str")
+
+ s0.delete.assert_called_once_with()
+ s1.delete.assert_called_once_with()
+
+
+@with_setup(_setup, _teardown)
+def test_delete_session_by_id_existent():
+ http_client = MagicMock()
+ http_client.get.return_value = DummyResponse(200, '{"id":0,"state":"starting","kind":"spark","log":[]}')
+ client_factory.create_http_client.return_value = http_client
+ session = MagicMock()
+ create_session_method = MagicMock(return_value=session)
+ client_factory.create_session = create_session_method
+
+ controller.delete_session_by_id("conn_str", "0")
+
+ create_session_method.assert_called_once_with("conn_str", {"kind": "spark"}, "0", False)
+ session.delete.assert_called_once_with()
+
+
+@with_setup(_setup, _teardown)
+def test_delete_session_by_id_non_existent():
+ http_client = MagicMock()
+ http_client.get.return_value = DummyResponse(404, '')
+ client_factory.create_http_client.return_value = http_client
+ session = MagicMock()
+ create_session_method = MagicMock(return_value=session)
+ client_factory.create_session = create_session_method
+
+ controller.delete_session_by_id("conn_str", "0")
+
+ assert len(create_session_method.mock_calls) == 0
+ assert len(session.delete.mock_calls) == 0
diff --git a/tests/test_sparkkernelbase.py b/tests/test_sparkkernelbase.py
index 01bf903..15a9355 100644
--- a/tests/test_sparkkernelbase.py
+++ b/tests/test_sparkkernelbase.py
@@ -54,7 +54,7 @@ def test_get_config():
url = "url"
config = {"kernel_python_credentials": {user_ev: usr, pass_ev: pwd, url_ev: url}}
- conf.override_all(config)
+ conf.override(config)
u, p, r = kernel._get_configuration()
@@ -67,7 +67,7 @@ def test_get_config():
@with_setup(_setup, _teardown)
def test_get_config_not_set():
- conf.override_all({})
+ conf.override({})
try:
kernel._get_configuration()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 5
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==4.9.0
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
arrow==1.3.0
async-lru==2.0.5
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
comm==0.2.2
decorator==5.2.1
defusedxml==0.7.1
exceptiongroup==1.2.2
fastjsonschema==2.21.1
fqdn==1.5.1
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==4.1.1
ipython==4.0.0
ipython-genutils==0.2.0
ipywidgets==7.8.5
isoduration==20.11.0
Jinja2==3.1.6
json5==0.10.0
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter-lsp==2.2.5
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyter_server==2.15.0
jupyter_server_terminals==0.5.3
jupyterlab==4.1.5
jupyterlab_pygments==0.3.0
jupyterlab_server==2.27.3
jupyterlab_widgets==1.1.11
MarkupSafe==3.0.2
mistune==3.1.3
mock==5.2.0
narwhals==1.32.0
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nose==1.3.7
notebook==7.1.3
notebook_shim==0.2.4
numpy==2.0.2
overrides==7.7.0
packaging==24.2
pandas==2.2.3
pandocfilters==1.5.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==4.3.7
plotly==6.0.1
pluggy==1.5.0
prometheus_client==0.21.1
ptyprocess==0.7.0
pycparser==2.22
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
pytz==2025.2
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
-e git+https://github.com/jupyter-incubator/sparkmagic.git@7025cb5b607abde80d5dd8a701c40c3598801e9d#egg=remotespark
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
Send2Trash==1.8.3
simplegeneric==0.8.1
six==1.17.0
sniffio==1.3.1
soupsieve==2.6
terminado==0.18.1
tinycss2==1.4.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
uri-template==1.3.0
urllib3==2.3.0
webcolors==24.11.1
webencodings==0.5.1
websocket-client==1.8.0
widgetsnbextension==3.6.10
zipp==3.21.0
| name: sparkmagic
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==4.9.0
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- arrow==1.3.0
- async-lru==2.0.5
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- comm==0.2.2
- decorator==5.2.1
- defusedxml==0.7.1
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- fqdn==1.5.1
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==4.1.1
- ipython==4.0.0
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- isoduration==20.11.0
- jinja2==3.1.6
- json5==0.10.0
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyter-lsp==2.2.5
- jupyter-server==2.15.0
- jupyter-server-terminals==0.5.3
- jupyterlab==4.1.5
- jupyterlab-pygments==0.3.0
- jupyterlab-server==2.27.3
- jupyterlab-widgets==1.1.11
- markupsafe==3.0.2
- mistune==3.1.3
- mock==5.2.0
- narwhals==1.32.0
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nose==1.3.7
- notebook==7.1.3
- notebook-shim==0.2.4
- numpy==2.0.2
- overrides==7.7.0
- packaging==24.2
- pandas==2.2.3
- pandocfilters==1.5.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==4.3.7
- plotly==6.0.1
- pluggy==1.5.0
- prometheus-client==0.21.1
- ptyprocess==0.7.0
- pycparser==2.22
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pytz==2025.2
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- send2trash==1.8.3
- simplegeneric==0.8.1
- six==1.17.0
- sniffio==1.3.1
- soupsieve==2.6
- terminado==0.18.1
- tinycss2==1.4.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- uri-template==1.3.0
- urllib3==2.3.0
- webcolors==24.11.1
- webencodings==0.5.1
- websocket-client==1.8.0
- widgetsnbextension==3.6.10
- zipp==3.21.0
prefix: /opt/conda/envs/sparkmagic
| [
"tests/test_clientmanager.py::test_serialize_periodically",
"tests/test_configuration.py::test_configuration_initialize_lazy",
"tests/test_configuration.py::test_configuration_load_not_lazy",
"tests/test_configuration.py::test_configuration_override",
"tests/test_configuration.py::test_configuration_decorator",
"tests/test_sparkkernelbase.py::test_get_config",
"tests/test_sparkkernelbase.py::test_get_config_not_set"
]
| [
"tests/test_configuration.py::test_configuration_initialize",
"tests/test_remotesparkmagics.py::test_info_command_parses",
"tests/test_remotesparkmagics.py::test_info_endpoint_command_parses",
"tests/test_remotesparkmagics.py::test_add_sessions_command_parses",
"tests/test_remotesparkmagics.py::test_add_sessions_command_extra_properties",
"tests/test_remotesparkmagics.py::test_delete_sessions_command_parses",
"tests/test_remotesparkmagics.py::test_cleanup_command_parses",
"tests/test_remotesparkmagics.py::test_cleanup_endpoint_command_parses",
"tests/test_remotesparkmagics.py::test_bad_command_throws_exception",
"tests/test_remotesparkmagics.py::test_run_cell_command_parses",
"tests/test_remotesparkmagics.py::test_run_cell_command_writes_to_err",
"tests/test_remotesparkmagics.py::test_run_sql_command_parses",
"tests/test_remotesparkmagics.py::test_run_hive_command_parses",
"tests/test_remotesparkmagics.py::test_run_sql_command_returns_none_when_exception",
"tests/test_remotesparkmagics.py::test_run_hive_command_returns_none_when_exception",
"tests/test_remotesparkmagics.py::test_run_sql_command_stores_variable_in_user_ns",
"tests/test_sparkcontroller.py::test_add_session",
"tests/test_sparkcontroller.py::test_add_session_skip",
"tests/test_sparkcontroller.py::test_delete_session",
"tests/test_sparkcontroller.py::test_cleanup",
"tests/test_sparkcontroller.py::test_run_cell",
"tests/test_sparkcontroller.py::test_get_client_keys",
"tests/test_sparkcontroller.py::test_get_all_sessions",
"tests/test_sparkcontroller.py::test_cleanup_endpoint",
"tests/test_sparkcontroller.py::test_delete_session_by_id_existent",
"tests/test_sparkcontroller.py::test_delete_session_by_id_non_existent",
"tests/test_sparkkernelbase.py::test_do_execute_initializes_magics_if_not_run",
"tests/test_sparkkernelbase.py::test_call_spark",
"tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happened",
"tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happens_for_execution",
"tests/test_sparkkernelbase.py::test_call_spark_sql_new_line",
"tests/test_sparkkernelbase.py::test_call_spark_hive_new_line"
]
| [
"tests/test_clientmanager.py::test_get_client_throws_when_client_not_exists",
"tests/test_clientmanager.py::test_deserialize_on_creation",
"tests/test_clientmanager.py::test_get_client",
"tests/test_clientmanager.py::test_delete_client",
"tests/test_clientmanager.py::test_delete_client_throws_when_client_not_exists",
"tests/test_clientmanager.py::test_add_client_throws_when_client_exists",
"tests/test_clientmanager.py::test_client_names_returned",
"tests/test_clientmanager.py::test_get_any_client",
"tests/test_clientmanager.py::test_get_any_client_raises_exception_with_no_client",
"tests/test_clientmanager.py::test_get_any_client_raises_exception_with_two_clients",
"tests/test_clientmanager.py::test_clean_up",
"tests/test_clientmanager.py::test_clean_up_serializer",
"tests/test_configuration.py::test_configuration_load",
"tests/test_sparkkernelbase.py::test_initialize_magics",
"tests/test_sparkkernelbase.py::test_start_session",
"tests/test_sparkkernelbase.py::test_delete_session",
"tests/test_sparkkernelbase.py::test_shutdown_cleans_up"
]
| [
"tests/test_sparkkernelbase.py::test_set_config",
"tests/test_sparkkernelbase.py::test_magic_not_supported"
]
| Modified BSD License | 349 | [
"remotespark/remotesparkmagics.py",
"remotespark/utils/configuration.py",
"remotespark/livyclientlib/livyclientfactory.py",
"remotespark/livyclientlib/reliablehttpclient.py",
"remotespark/livyclientlib/sparkcontroller.py"
]
| [
"remotespark/remotesparkmagics.py",
"remotespark/utils/configuration.py",
"remotespark/livyclientlib/livyclientfactory.py",
"remotespark/livyclientlib/reliablehttpclient.py",
"remotespark/livyclientlib/sparkcontroller.py"
]
|
|
jupyter-incubator__sparkmagic-94 | a2ed74cab292c2a462d2080914c5b61ec064e448 | 2015-12-23 01:52:07 | a2ed74cab292c2a462d2080914c5b61ec064e448 | diff --git a/remotespark/remotesparkmagics.py b/remotespark/remotesparkmagics.py
index dda3e53..705954f 100644
--- a/remotespark/remotesparkmagics.py
+++ b/remotespark/remotesparkmagics.py
@@ -43,6 +43,8 @@ class RemoteSparkMagics(Magics):
except KeyError:
self.logger.error("Could not read env vars for serialization.")
+ self.properties = conf.session_configs()
+
self.logger.debug("Initialized spark magics.")
@magic_arguments()
@@ -120,7 +122,7 @@ class RemoteSparkMagics(Magics):
elif subcommand == "config":
# Would normally do " ".join(args.command[1:]) but parse_argstring removes quotes...
rest_of_line = user_input[7:]
- conf.override(conf.session_configs.__name__, json.loads(rest_of_line))
+ self.properties = json.loads(rest_of_line)
# add
elif subcommand == "add":
if len(args.command) != 4 and len(args.command) != 5:
@@ -135,7 +137,7 @@ class RemoteSparkMagics(Magics):
else:
skip = False
- properties = copy.deepcopy(conf.session_configs())
+ properties = copy.deepcopy(self.properties)
properties["kind"] = self._get_livy_kind(language)
self.spark_controller.add_session(name, connection_string, skip, properties)
@@ -198,8 +200,7 @@ class RemoteSparkMagics(Magics):
{}
Session configs:
{}
-""".format("\n".join(sessions_info), conf.session_configs()))
-
+""".format("\n".join(sessions_info), self.properties))
def _print_endpoint_info(self, info_sessions):
sessions_info = [" {}".format(i) for i in info_sessions]
diff --git a/remotespark/sparkkernelbase.py b/remotespark/sparkkernelbase.py
index 8adbb03..7b700fd 100644
--- a/remotespark/sparkkernelbase.py
+++ b/remotespark/sparkkernelbase.py
@@ -13,6 +13,11 @@ class SparkKernelBase(IPythonKernel):
config_command = "config"
sql_command = "sql"
hive_command = "hive"
+ info_command = "info"
+ delete_command = "delete"
+ clean_up_command = "cleanup"
+
+ force_flag = "f"
def __init__(self, implementation, implementation_version, language, language_version, language_info,
kernel_conf_name, session_language, client_name, **kwargs):
@@ -61,7 +66,7 @@ class SparkKernelBase(IPythonKernel):
restart_session = False
if self.session_started:
- if "f" not in flags:
+ if self.force_flag not in flags:
self._show_user_error("A session has already been started. In order to modify the Spark configura"
"tion, please provide the '-f' flag at the beginning of the config magic:\n"
"\te.g. `%config -f {}`\n\nNote that this will kill the current session and"
@@ -76,6 +81,33 @@ class SparkKernelBase(IPythonKernel):
return self._run_restarting_session(code_to_run, silent, store_history, user_expressions, allow_stdin,
restart_session)
+ elif subcommand == self.info_command:
+ code_to_run = "%spark info {}".format(self.connection_string)
+ return self._run_without_session(code_to_run, silent, store_history, user_expressions, allow_stdin)
+ elif subcommand == self.delete_command:
+ if self.force_flag not in flags:
+ self._show_user_error("The session you are trying to delete could be this kernel's session. In order "
+ "to delete this session, please provide the '-f' flag at the beginning of the "
+ "delete magic:\n\te.g. `%delete -f id`\n\nAll previously run commands in the "
+ "session will be lost.")
+ code_to_run = ""
+ else:
+ self.session_started = False
+ code_to_run = "%spark delete {} {}".format(self.connection_string, code_to_run)
+
+ return self._run_without_session(code_to_run, silent, store_history, user_expressions, allow_stdin)
+ elif subcommand == self.clean_up_command:
+ if self.force_flag not in flags:
+ self._show_user_error("The sessions you are trying to delete could be this kernel's session or other "
+ "people's sessions. In order to delete them, please provide the '-f' flag at the "
+ "beginning of the cleanup magic:\n\te.g. `%cleanup -f`\n\nAll previously run "
+ "commands in the sessions will be lost.")
+ code_to_run = ""
+ else:
+ self.session_started = False
+ code_to_run = "%spark cleanup {}".format(self.connection_string)
+
+ return self._run_without_session(code_to_run, silent, store_history, user_expressions, allow_stdin)
else:
self._show_user_error("Magic '{}' not supported.".format(subcommand))
return self._run_without_session("", silent, store_history, user_expressions, allow_stdin)
@@ -153,14 +185,21 @@ class SparkKernelBase(IPythonKernel):
split_code = code.split(None, 1)
subcommand = split_code[0].lower()
flags = []
- rest = split_code[1]
+ if len(split_code) > 1:
+ rest = split_code[1]
+ else:
+ rest = ""
# Get all flags
flag_split = rest.split(None, 1)
- while len(flag_split) >= 2 and flag_split[0].startswith("-"):
- flags.append(flag_split[0][1:].lower())
- rest = flag_split[1]
- flag_split = rest.split(None, 1)
+ while len(flag_split) >= 1 and flag_split[0].startswith("-"):
+ if len(flag_split) >= 2:
+ flags.append(flag_split[0][1:].lower())
+ rest = flag_split[1]
+ flag_split = rest.split(None, 1)
+ if len(flag_split) == 1:
+ flags.append(flag_split[0][1:].lower())
+ flag_split = [""]
# flags to lower
flags = [i.lower() for i in flags]
diff --git a/remotespark/utils/configuration.py b/remotespark/utils/configuration.py
index 5668901..df09e52 100644
--- a/remotespark/utils/configuration.py
+++ b/remotespark/utils/configuration.py
@@ -36,23 +36,16 @@ def load(fsrw_class = None):
overrides = {}
else:
overrides = json.loads(line)
- override_all(overrides)
+ override(overrides)
-def override_all(obj):
+def override(obj):
"""Given a dictionary representing the overrided defaults for this
configuration, initialize the global configuration."""
global _overrides
_overrides = obj
-def override(config, value):
- """Given a string representing a configuration and a value for that configuration,
- override the configuration. Initialize the overrided configuration beforehand."""
- initialize()
- _overrides[config] = value
-
-
def _override(f):
"""A decorator which first initializes the overrided configurations,
then checks the global overrided defaults for the given configuration,
| Expose livy endpoint management through wrapper kernel | jupyter-incubator/sparkmagic | diff --git a/tests/test_clientmanager.py b/tests/test_clientmanager.py
index 400fadd..e088b35 100644
--- a/tests/test_clientmanager.py
+++ b/tests/test_clientmanager.py
@@ -30,8 +30,8 @@ def test_deserialize_on_creation():
def test_serialize_periodically():
- conf.override_all({conf.serialize_period_seconds.__name__: 0.1,
- conf.serialize_periodically.__name__: True})
+ conf.override({conf.serialize_period_seconds.__name__: 0.1,
+ conf.serialize_periodically.__name__: True})
serializer = MagicMock()
ClientManager(serializer)
diff --git a/tests/test_configuration.py b/tests/test_configuration.py
index 8cdac08..92fd020 100644
--- a/tests/test_configuration.py
+++ b/tests/test_configuration.py
@@ -27,7 +27,7 @@ def test_configuration_initialize():
@with_setup(_setup)
def test_configuration_initialize_lazy():
"""Tests that the initialize function has no behavior if the override dict is already initialized"""
- conf.override_all({})
+ conf.override({})
fsrw_class = MagicMock(side_effect=ValueError)
conf.initialize(fsrw_class)
@@ -56,7 +56,7 @@ def test_configuration_load_not_lazy():
read_lines = MagicMock(return_value=[json.dumps(config)])
fsrw.read_lines = read_lines
fsrw_class = MagicMock(return_value=fsrw)
- conf.override_all({conf.default_chart_type.__name__: "bar"})
+ conf.override({ conf.default_chart_type.__name__: "bar" })
conf.load(fsrw_class)
assert conf._overrides is not None
assert_equals(conf._overrides, config)
@@ -65,21 +65,9 @@ def test_configuration_load_not_lazy():
@with_setup(_setup)
def test_configuration_override():
- kpc = { 'username': 'U', 'password': 'P', 'url': 'L' }
- overrides = { conf.kernel_python_credentials.__name__: kpc }
- conf.override_all(overrides)
- conf.override(conf.execute_timeout_seconds.__name__, 1)
- assert_equals(conf._overrides, { conf.kernel_python_credentials.__name__: kpc,
- conf.execute_timeout_seconds.__name__: 1 })
- assert_equals(conf.execute_timeout_seconds(), 1)
- assert_equals(conf.kernel_python_credentials(), kpc)
-
-
-@with_setup(_setup)
-def test_configuration_override_all():
z = 1500
config = { conf.status_sleep_seconds.__name__: z }
- conf.override_all(config)
+ conf.override(config)
assert_equals(conf._overrides, config)
assert_equals(conf.status_sleep_seconds(), z)
@@ -88,7 +76,7 @@ def test_configuration_override_all():
def test_configuration_decorator():
def test_f():
return 0
- conf.override_all({test_f.__name__: -1})
+ conf.override({test_f.__name__: -1})
test_f_decorated = conf._override(test_f)
assert_not_equals(test_f_decorated(), test_f())
assert_equals(test_f_decorated(), -1)
\ No newline at end of file
diff --git a/tests/test_livysession.py b/tests/test_livysession.py
index da9168f..d406532 100644
--- a/tests/test_livysession.py
+++ b/tests/test_livysession.py
@@ -60,7 +60,7 @@ class TestLivySession:
@raises(AssertionError)
def test_constructor_throws_status_sleep_seconds(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 60
@@ -70,7 +70,7 @@ class TestLivySession:
@raises(AssertionError)
def test_constructor_throws_statement_sleep_seconds(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 3,
"statement_sleep_seconds": 0,
"create_sql_context_timeout_seconds": 60
@@ -80,7 +80,7 @@ class TestLivySession:
@raises(AssertionError)
def test_constructor_throws_sql_create_timeout_seconds(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 4,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 0
@@ -90,7 +90,7 @@ class TestLivySession:
@raises(ValueError)
def test_constructor_throws_invalid_session_sql_combo(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 2,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 60
@@ -99,7 +99,7 @@ class TestLivySession:
conf.load()
def test_constructor_starts_with_existing_session(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 4,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 60
@@ -112,7 +112,7 @@ class TestLivySession:
assert session.started_sql_context
def test_constructor_starts_with_no_session(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 4,
"statement_sleep_seconds": 2,
"create_sql_context_timeout_seconds": 60
@@ -124,7 +124,7 @@ class TestLivySession:
assert not session.started_sql_context
def test_is_final_status(self):
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -142,7 +142,7 @@ class TestLivySession:
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -161,7 +161,7 @@ class TestLivySession:
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -180,7 +180,7 @@ class TestLivySession:
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -197,7 +197,7 @@ class TestLivySession:
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
http_client.get.return_value = DummyResponse(200, self.ready_sessions_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -218,12 +218,12 @@ class TestLivySession:
DummyResponse(200, self.ready_sessions_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
session = self._create_session(http_client=http_client)
- conf.override_all({})
+ conf.override({})
session.start()
@@ -241,7 +241,7 @@ class TestLivySession:
DummyResponse(200, self.error_sessions_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.011,
"statement_sleep_seconds": 6000
})
@@ -261,7 +261,7 @@ class TestLivySession:
DummyResponse(200, self.ready_sessions_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.011,
"statement_sleep_seconds": 6000
})
@@ -275,7 +275,7 @@ class TestLivySession:
def test_delete_session_when_active(self):
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -291,7 +291,7 @@ class TestLivySession:
def test_delete_session_when_not_started(self):
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -307,7 +307,7 @@ class TestLivySession:
def test_delete_session_when_dead_throws(self):
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -326,7 +326,7 @@ class TestLivySession:
self.get_responses = [DummyResponse(200, self.running_statement_json),
DummyResponse(200, self.ready_statement_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -356,7 +356,7 @@ class TestLivySession:
DummyResponse(200, self.ready_sessions_json),
DummyResponse(200, self.ready_statement_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -392,7 +392,7 @@ class TestLivySession:
DummyResponse(200, self.ready_sessions_json),
DummyResponse(200, self.ready_statement_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -421,7 +421,7 @@ class TestLivySession:
DummyResponse(200, self.ready_sessions_json),
DummyResponse(200, self.ready_statement_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -449,7 +449,7 @@ class TestLivySession:
DummyResponse(200, self.running_statement_json),
DummyResponse(200, self.ready_statement_json)]
http_client.get.side_effect = self._next_response_get
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
@@ -467,7 +467,7 @@ class TestLivySession:
http_client = MagicMock()
http_client.connection_string = connection_string
kind = Constants.session_kind_spark
- conf.override_all({
+ conf.override({
"status_sleep_seconds": 0.01,
"statement_sleep_seconds": 0.01
})
diff --git a/tests/test_remotesparkmagics.py b/tests/test_remotesparkmagics.py
index b8f966f..e8f2d8f 100644
--- a/tests/test_remotesparkmagics.py
+++ b/tests/test_remotesparkmagics.py
@@ -14,7 +14,7 @@ shell = None
def _setup():
global magic, spark_controller, shell
- conf.override_all({})
+ conf.override({})
shell = MagicMock()
magic = RemoteSparkMagics(shell=None)
@@ -82,9 +82,8 @@ def test_add_sessions_command_parses():
@with_setup(_setup, _teardown)
def test_add_sessions_command_extra_properties():
- conf.override_all({})
magic.spark("config {\"extra\": \"yes\"}")
- assert conf.session_configs() == {"extra": "yes"}
+ assert magic.properties == {"extra": "yes"}
add_sessions_mock = MagicMock()
spark_controller.add_session = add_sessions_mock
@@ -97,7 +96,6 @@ def test_add_sessions_command_extra_properties():
magic.spark(line)
add_sessions_mock.assert_called_once_with(name, connection_string, False, {"kind": "spark", "extra": "yes"})
- conf.load()
@with_setup(_setup, _teardown)
diff --git a/tests/test_sparkkernelbase.py b/tests/test_sparkkernelbase.py
index 01bf903..78d43d4 100644
--- a/tests/test_sparkkernelbase.py
+++ b/tests/test_sparkkernelbase.py
@@ -54,7 +54,7 @@ def test_get_config():
url = "url"
config = {"kernel_python_credentials": {user_ev: usr, pass_ev: pwd, url_ev: url}}
- conf.override_all(config)
+ conf.override(config)
u, p, r = kernel._get_configuration()
@@ -67,7 +67,7 @@ def test_get_config():
@with_setup(_setup, _teardown)
def test_get_config_not_set():
- conf.override_all({})
+ conf.override({})
try:
kernel._get_configuration()
@@ -172,6 +172,82 @@ def test_magic_not_supported():
kernel.do_execute(code, False)
+@with_setup(_setup, _teardown)
+def test_info():
+ code = "%info"
+
+ # Call method
+ kernel.do_execute(code, False)
+
+ # Assertions
+ assert not kernel.session_started
+ assert call("%spark info {}".format(conn_str), False, True, None, False) in execute_cell_mock.mock_calls
+
+
+@with_setup(_setup, _teardown)
+def test_delete_force():
+ code = "%delete -f 9"
+ kernel.session_started = True
+ user_error = MagicMock()
+ kernel._show_user_error = user_error
+
+ # Call method
+ kernel.do_execute(code, False)
+
+ # Assertions
+ assert not kernel.session_started
+ assert call("%spark delete {} 9".format(conn_str), False, True, None, False) in execute_cell_mock.mock_calls
+ assert len(user_error.mock_calls) == 0
+
+
+@with_setup(_setup, _teardown)
+def test_delete_not_force():
+ code = "%delete 9"
+ kernel.session_started = True
+ user_error = MagicMock()
+ kernel._show_user_error = user_error
+
+ # Call method
+ kernel.do_execute(code, False)
+
+ # Assertions
+ assert kernel.session_started
+ assert not call("%spark delete {} 9".format(conn_str), False, True, None, False) in execute_cell_mock.mock_calls
+ assert len(user_error.mock_calls) == 1
+
+
+@with_setup(_setup, _teardown)
+def test_cleanup_force():
+ code = "%cleanup -f"
+ kernel.session_started = True
+ user_error = MagicMock()
+ kernel._show_user_error = user_error
+
+ # Call method
+ kernel.do_execute(code, False)
+
+ # Assertions
+ assert not kernel.session_started
+ assert call("%spark cleanup {}".format(conn_str), False, True, None, False) in execute_cell_mock.mock_calls
+ assert len(user_error.mock_calls) == 0
+
+
+@with_setup(_setup, _teardown)
+def test_cleanup_not_force():
+ code = "%cleanup"
+ kernel.session_started = True
+ user_error = MagicMock()
+ kernel._show_user_error = user_error
+
+ # Call method
+ kernel.do_execute(code, False)
+
+ # Assertions
+ assert kernel.session_started
+ assert not call("%spark cleanup {}".format(conn_str), False, True, None, False) in execute_cell_mock.mock_calls
+ assert len(user_error.mock_calls) == 1
+
+
@with_setup(_setup, _teardown)
def test_call_spark():
# Set up
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"mkdir ~/.sparkmagic",
"cp remotespark/default_config.json ~/.sparkmagic/config.json"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==4.9.0
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
arrow==1.3.0
async-lru==2.0.5
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
comm==0.2.2
decorator==5.2.1
defusedxml==0.7.1
exceptiongroup==1.2.2
fastjsonschema==2.21.1
fqdn==1.5.1
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==4.1.1
ipython==4.0.0
ipython-genutils==0.2.0
ipywidgets==7.8.5
isoduration==20.11.0
Jinja2==3.1.6
json5==0.10.0
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter-lsp==2.2.5
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyter_server==2.15.0
jupyter_server_terminals==0.5.3
jupyterlab==4.1.5
jupyterlab_pygments==0.3.0
jupyterlab_server==2.27.3
jupyterlab_widgets==1.1.11
MarkupSafe==3.0.2
mistune==3.1.3
mock==5.2.0
narwhals==1.32.0
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nose==1.3.7
notebook==7.1.3
notebook_shim==0.2.4
numpy==2.0.2
overrides==7.7.0
packaging==24.2
pandas==2.2.3
pandocfilters==1.5.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==4.3.7
plotly==6.0.1
pluggy==1.5.0
prometheus_client==0.21.1
ptyprocess==0.7.0
pycparser==2.22
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
pytz==2025.2
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
-e git+https://github.com/jupyter-incubator/sparkmagic.git@a2ed74cab292c2a462d2080914c5b61ec064e448#egg=remotespark
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
Send2Trash==1.8.3
simplegeneric==0.8.1
six==1.17.0
sniffio==1.3.1
soupsieve==2.6
terminado==0.18.1
tinycss2==1.4.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
uri-template==1.3.0
urllib3==2.3.0
webcolors==24.11.1
webencodings==0.5.1
websocket-client==1.8.0
widgetsnbextension==3.6.10
zipp==3.21.0
| name: sparkmagic
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==4.9.0
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- arrow==1.3.0
- async-lru==2.0.5
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- comm==0.2.2
- decorator==5.2.1
- defusedxml==0.7.1
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- fqdn==1.5.1
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==4.1.1
- ipython==4.0.0
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- isoduration==20.11.0
- jinja2==3.1.6
- json5==0.10.0
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyter-lsp==2.2.5
- jupyter-server==2.15.0
- jupyter-server-terminals==0.5.3
- jupyterlab==4.1.5
- jupyterlab-pygments==0.3.0
- jupyterlab-server==2.27.3
- jupyterlab-widgets==1.1.11
- markupsafe==3.0.2
- mistune==3.1.3
- mock==5.2.0
- narwhals==1.32.0
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nose==1.3.7
- notebook==7.1.3
- notebook-shim==0.2.4
- numpy==2.0.2
- overrides==7.7.0
- packaging==24.2
- pandas==2.2.3
- pandocfilters==1.5.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==4.3.7
- plotly==6.0.1
- pluggy==1.5.0
- prometheus-client==0.21.1
- ptyprocess==0.7.0
- pycparser==2.22
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pytz==2025.2
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- send2trash==1.8.3
- simplegeneric==0.8.1
- six==1.17.0
- sniffio==1.3.1
- soupsieve==2.6
- terminado==0.18.1
- tinycss2==1.4.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- uri-template==1.3.0
- urllib3==2.3.0
- webcolors==24.11.1
- webencodings==0.5.1
- websocket-client==1.8.0
- widgetsnbextension==3.6.10
- zipp==3.21.0
prefix: /opt/conda/envs/sparkmagic
| [
"tests/test_clientmanager.py::test_serialize_periodically",
"tests/test_configuration.py::test_configuration_initialize_lazy",
"tests/test_configuration.py::test_configuration_load_not_lazy",
"tests/test_configuration.py::test_configuration_override",
"tests/test_configuration.py::test_configuration_decorator",
"tests/test_sparkkernelbase.py::test_get_config",
"tests/test_sparkkernelbase.py::test_get_config_not_set"
]
| [
"tests/test_configuration.py::test_configuration_initialize",
"tests/test_remotesparkmagics.py::test_info_command_parses",
"tests/test_remotesparkmagics.py::test_info_endpoint_command_parses",
"tests/test_remotesparkmagics.py::test_add_sessions_command_parses",
"tests/test_remotesparkmagics.py::test_add_sessions_command_extra_properties",
"tests/test_remotesparkmagics.py::test_delete_sessions_command_parses",
"tests/test_remotesparkmagics.py::test_cleanup_command_parses",
"tests/test_remotesparkmagics.py::test_cleanup_endpoint_command_parses",
"tests/test_remotesparkmagics.py::test_bad_command_throws_exception",
"tests/test_remotesparkmagics.py::test_run_cell_command_parses",
"tests/test_remotesparkmagics.py::test_run_cell_command_writes_to_err",
"tests/test_remotesparkmagics.py::test_run_sql_command_parses",
"tests/test_remotesparkmagics.py::test_run_hive_command_parses",
"tests/test_remotesparkmagics.py::test_run_sql_command_returns_none_when_exception",
"tests/test_remotesparkmagics.py::test_run_hive_command_returns_none_when_exception",
"tests/test_remotesparkmagics.py::test_run_sql_command_stores_variable_in_user_ns",
"tests/test_sparkkernelbase.py::test_do_execute_initializes_magics_if_not_run",
"tests/test_sparkkernelbase.py::test_info",
"tests/test_sparkkernelbase.py::test_delete_force",
"tests/test_sparkkernelbase.py::test_cleanup_force",
"tests/test_sparkkernelbase.py::test_cleanup_not_force",
"tests/test_sparkkernelbase.py::test_call_spark",
"tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happened",
"tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happens_for_execution",
"tests/test_sparkkernelbase.py::test_call_spark_sql_new_line",
"tests/test_sparkkernelbase.py::test_call_spark_hive_new_line"
]
| [
"tests/test_clientmanager.py::test_get_client_throws_when_client_not_exists",
"tests/test_clientmanager.py::test_deserialize_on_creation",
"tests/test_clientmanager.py::test_get_client",
"tests/test_clientmanager.py::test_delete_client",
"tests/test_clientmanager.py::test_delete_client_throws_when_client_not_exists",
"tests/test_clientmanager.py::test_add_client_throws_when_client_exists",
"tests/test_clientmanager.py::test_client_names_returned",
"tests/test_clientmanager.py::test_get_any_client",
"tests/test_clientmanager.py::test_get_any_client_raises_exception_with_no_client",
"tests/test_clientmanager.py::test_get_any_client_raises_exception_with_two_clients",
"tests/test_clientmanager.py::test_clean_up",
"tests/test_clientmanager.py::test_clean_up_serializer",
"tests/test_configuration.py::test_configuration_load",
"tests/test_sparkkernelbase.py::test_initialize_magics",
"tests/test_sparkkernelbase.py::test_start_session",
"tests/test_sparkkernelbase.py::test_delete_session",
"tests/test_sparkkernelbase.py::test_shutdown_cleans_up"
]
| [
"tests/test_sparkkernelbase.py::test_set_config",
"tests/test_sparkkernelbase.py::test_magic_not_supported",
"tests/test_sparkkernelbase.py::test_delete_not_force"
]
| Modified BSD License | 350 | [
"remotespark/utils/configuration.py",
"remotespark/sparkkernelbase.py",
"remotespark/remotesparkmagics.py"
]
| [
"remotespark/utils/configuration.py",
"remotespark/sparkkernelbase.py",
"remotespark/remotesparkmagics.py"
]
|
|
mogproject__color-ssh-11 | 9adb19916b0205fd6a88beddcd8669114edc449c | 2015-12-23 11:42:42 | 8ef23299ceb4e19e5d33562edb0066686eead51d | diff --git a/src/color_ssh/__init__.py b/src/color_ssh/__init__.py
index b794fd4..df9144c 100644
--- a/src/color_ssh/__init__.py
+++ b/src/color_ssh/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '0.1.0'
+__version__ = '0.1.1'
diff --git a/src/color_ssh/color_cat.py b/src/color_ssh/color_cat.py
index a9d1412..036ced8 100644
--- a/src/color_ssh/color_cat.py
+++ b/src/color_ssh/color_cat.py
@@ -68,8 +68,9 @@ def main(argv=sys.argv, stdin=io2bytes(sys.stdin), stdout=io2bytes(sys.stdout),
"""
setting = Setting().parse_args(argv, stdout)
- # Note: Do not use 'fileinput' module because it causes a buffering problem.
- try:
+ @exception_handler(lambda e: stderr.write(('%s: %s\n' % (e.__class__.__name__, e)).encode('utf-8', 'ignore')))
+ def f():
+ # Note: Do not use 'fileinput' module because it causes a buffering problem.
for path in setting.paths:
fh = stdin if path is None else io.open(path, 'rb', 0)
try:
@@ -79,9 +80,6 @@ def main(argv=sys.argv, stdin=io2bytes(sys.stdin), stdout=io2bytes(sys.stdout),
finally:
if fh is not stdin:
fh.close()
+ return 0
- except Exception as e:
- stderr.write(('%s: %s\n' % (e.__class__.__name__, e)).encode('utf-8', 'ignore'))
- return 1
-
- return 0
+ return f()
diff --git a/src/color_ssh/color_ssh.py b/src/color_ssh/color_ssh.py
index f8d82d8..1d2e798 100644
--- a/src/color_ssh/color_ssh.py
+++ b/src/color_ssh/color_ssh.py
@@ -106,7 +106,12 @@ def run_task(args):
prefix = ['color-cat', '-l', label]
- try:
+ def exc_func(e):
+ msg = '%s: %s\nlabel=%s, command=%s\n' % (e.__class__.__name__, e, label, command)
+ stderr.write(msg.encode('utf-8', 'ignore'))
+
+ @exception_handler(exc_func)
+ def f():
proc_stdout = subprocess.Popen(prefix, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr)
proc_stderr = subprocess.Popen(prefix + ['-s', '+'], stdin=subprocess.PIPE, stdout=stderr, stderr=stderr)
ret = subprocess.call(command, stdin=None, stdout=proc_stdout.stdin, stderr=proc_stderr.stdin)
@@ -116,11 +121,9 @@ def run_task(args):
proc_stdout.wait()
proc_stderr.wait()
- except Exception as e:
- msg = '%s: %s\nlabel=%s, command=%s\n' % (e.__class__.__name__, e, label, command)
- stderr.write(msg.encode('utf-8', 'ignore'))
- return 1
- return ret
+ return ret
+
+ return f()
def main(argv=sys.argv, stdout=io2bytes(sys.stdout), stderr=io2bytes(sys.stderr)):
@@ -128,7 +131,8 @@ def main(argv=sys.argv, stdout=io2bytes(sys.stdout), stderr=io2bytes(sys.stderr)
Main function
"""
- try:
+ @exception_handler(lambda e: stderr.write(('%s: %s\n' % (e.__class__.__name__, e)).encode('utf-8', 'ignore')))
+ def f():
setting = Setting().parse_args(argv, stdout)
n = min(len(setting.tasks), setting.parallelism)
if n <= 1:
@@ -136,9 +140,6 @@ def main(argv=sys.argv, stdout=io2bytes(sys.stdout), stderr=io2bytes(sys.stderr)
else:
pool = Pool(n)
ret = pool.map(run_task, setting.tasks)
- except Exception as e:
- msg = '%s: %s\n' % (e.__class__.__name__, e)
- stderr.write(msg.encode('utf-8', 'ignore'))
- return 1
+ return max(ret)
- return max(ret)
+ return f()
diff --git a/src/color_ssh/util/util.py b/src/color_ssh/util/util.py
index e1cdb8a..8bdea63 100644
--- a/src/color_ssh/util/util.py
+++ b/src/color_ssh/util/util.py
@@ -3,7 +3,7 @@ from __future__ import division, print_function, absolute_import, unicode_litera
import sys
import os
-__all__ = ['PY3', 'arg2bytes', 'io2bytes', 'distribute']
+__all__ = ['PY3', 'arg2bytes', 'io2bytes', 'distribute', 'exception_handler']
PY3 = sys.version_info >= (3,)
@@ -37,3 +37,24 @@ def distribute(num_workers, tasks):
ret.append(tasks[j:j + k])
j += k
return ret
+
+
+#
+# Decorators
+#
+def exception_handler(exception_func):
+ def f(func):
+ import functools
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except KeyboardInterrupt:
+ return 130
+ except Exception as e:
+ exception_func(e)
+ return 1
+
+ return wrapper
+ return f
| Kill all processes when interrupted by keyboard | mogproject/color-ssh | diff --git a/tests/color_ssh/util/__init__.py b/tests/color_ssh/util/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/color_ssh/util/test_util.py b/tests/color_ssh/util/test_util.py
new file mode 100644
index 0000000..50082dc
--- /dev/null
+++ b/tests/color_ssh/util/test_util.py
@@ -0,0 +1,13 @@
+from __future__ import division, print_function, absolute_import, unicode_literals
+
+from mog_commons.unittest import TestCase
+from color_ssh.util.util import exception_handler
+
+
+class TestUtil(TestCase):
+ def test_exception_handler(self):
+ @exception_handler(lambda e: e)
+ def f():
+ raise KeyboardInterrupt
+
+ self.assertEqual(f(), 130)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 4
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pep8",
"coverage",
"six",
"mog-commons"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/mogproject/color-ssh.git@9adb19916b0205fd6a88beddcd8669114edc449c#egg=color_ssh
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
mog-commons==0.2.3
packaging==21.3
pep8==1.7.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: color-ssh
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mog-commons==0.2.3
- packaging==21.3
- pep8==1.7.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/color-ssh
| [
"tests/color_ssh/util/test_util.py::TestUtil::test_exception_handler"
]
| []
| []
| []
| null | 351 | [
"src/color_ssh/util/util.py",
"src/color_ssh/color_ssh.py",
"src/color_ssh/color_cat.py",
"src/color_ssh/__init__.py"
]
| [
"src/color_ssh/util/util.py",
"src/color_ssh/color_ssh.py",
"src/color_ssh/color_cat.py",
"src/color_ssh/__init__.py"
]
|
|
mogproject__color-ssh-12 | b39783565319ee50b34988c29193f02a90122e2c | 2015-12-26 06:06:56 | 8ef23299ceb4e19e5d33562edb0066686eead51d | diff --git a/src/color_ssh/__init__.py b/src/color_ssh/__init__.py
index df9144c..10939f0 100644
--- a/src/color_ssh/__init__.py
+++ b/src/color_ssh/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '0.1.1'
+__version__ = '0.1.2'
diff --git a/src/color_ssh/color_ssh.py b/src/color_ssh/color_ssh.py
index 1d2e798..e575962 100644
--- a/src/color_ssh/color_ssh.py
+++ b/src/color_ssh/color_ssh.py
@@ -4,6 +4,7 @@ import sys
import io
import shlex
import subprocess
+import re
from optparse import OptionParser
from multiprocessing.pool import Pool
from color_ssh.util.util import *
@@ -64,20 +65,25 @@ class Setting(object):
stdout.write(arg2bytes(parser.format_help().encode('utf-8')))
parser.exit(2)
- prefix = shlex.split(option.ssh)
-
if not hosts:
hosts = args[:1]
del args[0]
- # distribute args
+ # parse hosts
+ parsed_hosts = [self._parse_host(h) for h in hosts]
+
+ tasks = []
if option.distribute:
+ # distribute args
dist_prefix = shlex.split(option.distribute)
d = distribute(len(hosts), args)
- tasks = [(option.label or self._extract_label(host),
- prefix + [host] + dist_prefix + d[i]) for i, host in enumerate(hosts) if d[i]]
+ for i, (user, host, port) in enumerate(parsed_hosts):
+ if d[i]:
+ label = option.label or host
+ tasks.append((label, self._ssh_args(option.ssh, user, host, port) + dist_prefix + d[i]))
else:
- tasks = [(option.label or self._extract_label(host), prefix + [host] + args) for host in hosts]
+ for user, host, port in parsed_hosts:
+ tasks.append((option.label or host, self._ssh_args(option.ssh, user, host, port) + args))
self.parallelism = option.parallelism
self.tasks = tasks
@@ -93,8 +99,20 @@ class Setting(object):
return list(filter(lambda x: x, (line.strip() for line in lines)))
@staticmethod
- def _extract_label(host):
- return host.rsplit('@', 1)[-1]
+ def _parse_host(s):
+ """
+ :param s: string : [user@]host[:port]
+ :return: tuple of (user, host, port)
+ """
+ ret = re.match('^(?:([^:@]+)@)?([^:@]+)(?::(\d+))?$', s)
+ if not ret:
+ raise ValueError('Illegal format: %s' % s)
+ return ret.groups()
+
+ @staticmethod
+ def _ssh_args(ssh_cmd, user, host, port):
+ user_host = [('' if user is None else '%s@' % user) + host]
+ return shlex.split(ssh_cmd) + ([] if port is None else ['-p', port]) + user_host
def run_task(args):
| Support port option in host list and host string | mogproject/color-ssh | diff --git a/tests/color_ssh/test_color_ssh.py b/tests/color_ssh/test_color_ssh.py
index 67684f8..e2fc45a 100644
--- a/tests/color_ssh/test_color_ssh.py
+++ b/tests/color_ssh/test_color_ssh.py
@@ -62,14 +62,15 @@ class TestSetting(TestCase):
('server-4', ['ssh', 'server-4', 'pwd']),
('server-5', ['ssh', 'server-5', 'pwd']),
('server-6', ['ssh', 'server-6', 'pwd']),
- ('server-7', ['ssh', 'server-7', 'pwd']),
- ('server-8', ['ssh', 'server-8', 'pwd']),
+ ('server-7', ['ssh', '-p', '22', 'server-7', 'pwd']),
+ ('server-8', ['ssh', '-p', '1022', 'server-8', 'pwd']),
('server-9', ['ssh', 'root@server-9', 'pwd']),
- ('server-10', ['ssh', 'root@server-10', 'pwd']),
+ ('server-10', ['ssh', '-p', '1022', 'root@server-10', 'pwd']),
])
- self._check(self._parse(['-H', 'server-11 root@server-12', 'pwd']), [
+ self._check(self._parse(['-H', 'server-11 root@server-12 root@server-13:1022', 'pwd']), [
('server-11', ['ssh', 'server-11', 'pwd']),
('server-12', ['ssh', 'root@server-12', 'pwd']),
+ ('server-13', ['ssh', '-p', '1022', 'root@server-13', 'pwd']),
])
self._check(self._parse(['--hosts', hosts_path, '--host', 'server-11 root@server-12', 'pwd']), [
('server-1', ['ssh', 'server-1', 'pwd']),
@@ -78,10 +79,10 @@ class TestSetting(TestCase):
('server-4', ['ssh', 'server-4', 'pwd']),
('server-5', ['ssh', 'server-5', 'pwd']),
('server-6', ['ssh', 'server-6', 'pwd']),
- ('server-7', ['ssh', 'server-7', 'pwd']),
- ('server-8', ['ssh', 'server-8', 'pwd']),
+ ('server-7', ['ssh', '-p', '22', 'server-7', 'pwd']),
+ ('server-8', ['ssh', '-p', '1022', 'server-8', 'pwd']),
('server-9', ['ssh', 'root@server-9', 'pwd']),
- ('server-10', ['ssh', 'root@server-10', 'pwd']),
+ ('server-10', ['ssh', '-p', '1022', 'root@server-10', 'pwd']),
('server-11', ['ssh', 'server-11', 'pwd']),
('server-12', ['ssh', 'root@server-12', 'pwd']),
])
@@ -103,6 +104,16 @@ class TestSetting(TestCase):
self.assertSystemExit(2, Setting().parse_args, ['color-ssh', '--label', 'x'], out)
self.assertSystemExit(2, Setting().parse_args, ['color-ssh', '--host', ' ', 'pwd'], out)
+ def test_parse_host_error(self):
+ self.assertRaises(ValueError, Setting._parse_host, '')
+ self.assertRaises(ValueError, Setting._parse_host, '@')
+ self.assertRaises(ValueError, Setting._parse_host, ':')
+ self.assertRaises(ValueError, Setting._parse_host, 'a:')
+ self.assertRaises(ValueError, Setting._parse_host, 'a:b')
+ self.assertRaises(ValueError, Setting._parse_host, '@a:0')
+ self.assertRaises(ValueError, Setting._parse_host, 'a:b@c:0')
+ self.assertRaises(ValueError, Setting._parse_host, 'a@@c:0')
+
class TestMain(TestCase):
def test_main_single_proc(self):
diff --git a/tests/resources/test_color_ssh_hosts.txt b/tests/resources/test_color_ssh_hosts.txt
index 3b9d2dd..f91ff9f 100644
--- a/tests/resources/test_color_ssh_hosts.txt
+++ b/tests/resources/test_color_ssh_hosts.txt
@@ -4,7 +4,7 @@ server-3
server-4
server-5
server-6
-server-7
-server-8
+server-7:22
+server-8:1022
root@server-9
-root@server-10
\ No newline at end of file
+root@server-10:1022
\ No newline at end of file
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 2
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"six",
"mog-commons>=0.2.2"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/mogproject/color-ssh.git@b39783565319ee50b34988c29193f02a90122e2c#egg=color_ssh
coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
mog-commons==0.2.3
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
six==1.17.0
tomli==2.2.1
| name: color-ssh
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mog-commons==0.2.3
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/color-ssh
| [
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args",
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_host_error"
]
| []
| [
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args_error",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_load_error",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_multi_proc",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_single_proc",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_task_error"
]
| []
| null | 352 | [
"src/color_ssh/color_ssh.py",
"src/color_ssh/__init__.py"
]
| [
"src/color_ssh/color_ssh.py",
"src/color_ssh/__init__.py"
]
|
|
mogproject__color-ssh-18 | 4c8d1949137759046ddff3b922eb84148c6ffa19 | 2015-12-27 13:52:04 | 8ef23299ceb4e19e5d33562edb0066686eead51d | diff --git a/src/color_ssh/__init__.py b/src/color_ssh/__init__.py
index 8ce9b36..7525d19 100644
--- a/src/color_ssh/__init__.py
+++ b/src/color_ssh/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '0.1.3'
+__version__ = '0.1.4'
diff --git a/src/color_ssh/color_cat.py b/src/color_ssh/color_cat.py
index 036ced8..7509d49 100644
--- a/src/color_ssh/color_cat.py
+++ b/src/color_ssh/color_cat.py
@@ -75,7 +75,7 @@ def main(argv=sys.argv, stdin=io2bytes(sys.stdin), stdout=io2bytes(sys.stdout),
fh = stdin if path is None else io.open(path, 'rb', 0)
try:
for line in iter(fh.readline, b''):
- stdout.write(setting.prefix + line + RESET)
+ stdout.write(setting.prefix + line.rstrip(b'\n') + RESET + b'\n')
stdout.flush()
finally:
if fh is not stdin:
diff --git a/src/color_ssh/color_ssh.py b/src/color_ssh/color_ssh.py
index 6cfbb2d..5aaac08 100644
--- a/src/color_ssh/color_ssh.py
+++ b/src/color_ssh/color_ssh.py
@@ -131,7 +131,7 @@ class Setting(object):
def _build_host_string(user, host):
ret = host
if user:
- ret = '%s@' % user + ret
+ ret = str('%s@') % user + ret
return ret
@staticmethod
diff --git a/src/color_ssh/util/util.py b/src/color_ssh/util/util.py
index 8bdea63..ed7e846 100644
--- a/src/color_ssh/util/util.py
+++ b/src/color_ssh/util/util.py
@@ -2,6 +2,7 @@ from __future__ import division, print_function, absolute_import, unicode_litera
import sys
import os
+import errno
__all__ = ['PY3', 'arg2bytes', 'io2bytes', 'distribute', 'exception_handler']
@@ -52,6 +53,12 @@ def exception_handler(exception_func):
return func(*args, **kwargs)
except KeyboardInterrupt:
return 130
+ except IOError as e:
+ if e.errno == errno.EPIPE:
+ return 0
+ else:
+ exception_func(e)
+ return 1
except Exception as e:
exception_func(e)
return 1
| Reset color before newline | mogproject/color-ssh | diff --git a/tests/color_ssh/test_color_cat.py b/tests/color_ssh/test_color_cat.py
index 390d0b0..38d50cc 100644
--- a/tests/color_ssh/test_color_cat.py
+++ b/tests/color_ssh/test_color_cat.py
@@ -110,8 +110,8 @@ class TestMain(TestCase):
ret = color_cat.main(args, stdout=out, stderr=err)
self.assertEqual(ret, 0)
self.assertEqual(out.getvalue(),
- b'\x1b[31mfoo\n\x1b[0m\x1b[31mbar\n\x1b[0m\x1b[31mbaz\n\x1b[0m'
- b'\x1b[31m123\n\x1b[0m\x1b[31m456\n\x1b[0m\x1b[31m789\n\x1b[0m')
+ b'\x1b[31mfoo\x1b[0m\n\x1b[31mbar\x1b[0m\n\x1b[31mbaz\x1b[0m\n'
+ b'\x1b[31m123\x1b[0m\n\x1b[31m456\x1b[0m\n\x1b[31m789\x1b[0m\n')
self.assertEqual(err.getvalue(), b'')
def test_main_error(self):
diff --git a/tests/color_ssh/test_color_ssh.py b/tests/color_ssh/test_color_ssh.py
index 524e613..d035868 100644
--- a/tests/color_ssh/test_color_ssh.py
+++ b/tests/color_ssh/test_color_ssh.py
@@ -98,9 +98,10 @@ class TestSetting(TestCase):
])
# upload
- self._check(self._parse([
+ result = self._parse([
'-H', 'server-11 root@server-12', '--distribute', 'echo "foo bar"', '--upload', 'dir1/x', 'dir1/y', 'z'
- ]), [
+ ])
+ self._check(result, [
('server-11', ['ssh', 'server-11', 'echo', 'foo bar', 'dir1/x', 'dir1/y'], [
['ssh', 'server-11', 'mkdir', '-p', 'dir1'],
['scp', 'dir1/x', 'server-11:dir1/x'],
@@ -109,6 +110,10 @@ class TestSetting(TestCase):
('server-12', ['ssh', 'root@server-12', 'echo', 'foo bar', 'z'],
[['scp', 'z', 'root@server-12:z']]),
])
+ for _, cmd, setup in result.tasks:
+ self.assertTrue(all(isinstance(c, str) for c in cmd))
+ for xs in setup:
+ self.assertTrue(all(isinstance(c, str) for c in xs))
def test_parse_args_error(self):
with self.withBytesOutput() as (out, err):
@@ -132,10 +137,10 @@ class TestMain(TestCase):
def test_main_single_proc(self):
# requires: POSIX environment, color-cat command
def f(bs):
- return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m|\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m'
+ return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m|\x1b[0m\x1b[35m' + bs + b'\x1b[0m\n'
def g(bs):
- return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m+\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m'
+ return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m+\x1b[0m\x1b[35m' + bs + b'\x1b[0m\n'
with self.__with_temp_output() as (out, err):
args = ['color-ssh', '--ssh', str('bash'),
@@ -152,10 +157,10 @@ class TestMain(TestCase):
def test_main_multi_proc(self):
# requires: POSIX environment, color-cat command
def f(bs):
- return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m|\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m'
+ return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m|\x1b[0m\x1b[35m' + bs + b'\x1b[0m\n'
def g(bs):
- return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m+\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m'
+ return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m+\x1b[0m\x1b[35m' + bs + b'\x1b[0m\n'
with self.__with_temp_output() as (out, err):
path = os.path.join('tests', 'resources', 'test_color_ssh_01.sh')
@@ -199,6 +204,18 @@ class TestMain(TestCase):
self.assertEqual(out.read(), b'')
self.assertTrue(b'No such file or directory' in err.read())
+ def test_run_task_error(self):
+ with self.__with_temp_output() as (out, err):
+ ret = color_ssh.run_task(('lab', 'echo x', ['true', 'false']))
+ self.assertEqual(ret, 1)
+
+ out.seek(0)
+ err.seek(0)
+
+ self.assertEqual(out.read(), b'')
+ self.assertEqual(b'RuntimeError: Failed to execute setup command: false\nlabel=lab, command=echo x\n',
+ err.read())
+
@staticmethod
@contextmanager
def __with_temp_output():
diff --git a/tests/color_ssh/util/test_util.py b/tests/color_ssh/util/test_util.py
index 03f9fc3..9eb76ff 100644
--- a/tests/color_ssh/util/test_util.py
+++ b/tests/color_ssh/util/test_util.py
@@ -12,6 +12,12 @@ class TestUtil(TestCase):
self.assertEqual(f(), 130)
+ @exception_handler(lambda e: e)
+ def g():
+ raise IOError(32, '')
+
+ self.assertEqual(g(), 0)
+
def test_distribute(self):
self.assertEqual(distribute(0, []), [])
self.assertEqual(distribute(0, ['a']), [])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 4
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"six",
"mog-commons>=0.2.2"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/mogproject/color-ssh.git@4c8d1949137759046ddff3b922eb84148c6ffa19#egg=color_ssh
coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
mog-commons==0.2.3
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
six==1.17.0
tomli==2.2.1
| name: color-ssh
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mog-commons==0.2.3
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/color-ssh
| [
"tests/color_ssh/test_color_cat.py::TestMain::test_main",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_single_proc",
"tests/color_ssh/util/test_util.py::TestUtil::test_exception_handler"
]
| [
"tests/color_ssh/test_color_cat.py::TestSetting::test_parse_args_error"
]
| [
"tests/color_ssh/test_color_cat.py::TestSetting::test_parse_args",
"tests/color_ssh/test_color_cat.py::TestMain::test_main_error",
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args",
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args_error",
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_host_error",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_load_error",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_multi_proc",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_task_error",
"tests/color_ssh/test_color_ssh.py::TestMain::test_run_task_error",
"tests/color_ssh/util/test_util.py::TestUtil::test_distribute",
"tests/color_ssh/util/test_util.py::TestUtil::test_distribute_error"
]
| []
| null | 353 | [
"src/color_ssh/util/util.py",
"src/color_ssh/color_ssh.py",
"src/color_ssh/color_cat.py",
"src/color_ssh/__init__.py"
]
| [
"src/color_ssh/util/util.py",
"src/color_ssh/color_ssh.py",
"src/color_ssh/color_cat.py",
"src/color_ssh/__init__.py"
]
|
|
mogproject__color-ssh-23 | 881561051ebae23a03aec3063877ee1cf21e24c0 | 2015-12-30 03:06:36 | 8ef23299ceb4e19e5d33562edb0066686eead51d | diff --git a/src/color_ssh/color_ssh.py b/src/color_ssh/color_ssh.py
index 0534891..294a553 100644
--- a/src/color_ssh/color_ssh.py
+++ b/src/color_ssh/color_ssh.py
@@ -21,6 +21,9 @@ class Setting(object):
' %prog [options...] -H "[user@]hostname [[user@]hostname]...]" command'
])
DEFAULT_PARALLELISM = 32
+ CMD_SSH = str('ssh')
+ CMD_UPLOAD = [str('rsync'), str('-a')]
+ CMD_MKDIR = [str('mkdir'), str('-p')]
def __init__(self, parallelism=None, tasks=None):
self.parallelism = parallelism
@@ -39,7 +42,7 @@ class Setting(object):
help='label name'
)
parser.add_option(
- '--ssh', dest='ssh', default=str('ssh'), type='string', metavar='SSH',
+ '--ssh', dest='ssh', default=self.CMD_SSH, type='string', metavar='SSH',
help='override ssh command line string'
)
parser.add_option(
@@ -62,11 +65,16 @@ class Setting(object):
'--upload', dest='upload', default=False, action='store_true',
help='upload files before executing a command (all args are regarded as paths)'
)
+ parser.add_option(
+ '--upload-with', dest='upload_with', default=None, type='string', metavar='PATH',
+ help='file paths to be uploaded before executing a command'
+ )
option, args = parser.parse_args(argv[1:])
hosts = self._load_hosts(option.host_file) + (option.host_string.split() if option.host_string else [])
if len(args) < (1 if hosts else 2):
+ print(option.__dict__)
stdout.write(arg2bytes(parser.format_help().encode('utf-8')))
parser.exit(2)
@@ -77,6 +85,9 @@ class Setting(object):
# parse hosts
parsed_hosts = [self._parse_host(h) for h in hosts]
+ # parse upload-with option
+ upload_with = [] if option.upload_with is None else shlex.split(option.upload_with)
+
tasks = []
if option.distribute:
# distribute args
@@ -84,24 +95,16 @@ class Setting(object):
d = distribute(len(hosts), args)
for i, (user, host, port) in enumerate(parsed_hosts):
if d[i]:
- setup_commands = []
- if option.upload:
- # create directories
- dirs = list(set(x for x in [os.path.dirname(arg) for arg in d[i]] if x != '' and x != '.'))
- if dirs:
- setup_commands.append(
- self._ssh_args(option.ssh, user, host, port) + [str('mkdir'), str('-p')] + dirs
- )
-
- # upload files before executing main commands
- setup_commands.extend([self._scp_args(str('rsync -a'), user, host, port, arg) for arg in d[i]])
-
+ upload_paths = upload_with + d[i] if option.upload else []
label = option.label or host
ssh_args = self._ssh_args(option.ssh, user, host, port)
- tasks.append((label, ssh_args + dist_prefix + d[i], setup_commands))
+ tasks.append((label, ssh_args + dist_prefix + d[i],
+ self._build_upload_commands(user, host, port, option.ssh, upload_paths)))
else:
for user, host, port in parsed_hosts:
- tasks.append((option.label or host, self._ssh_args(option.ssh, user, host, port) + args, []))
+ tasks.append((option.label or host,
+ self._ssh_args(option.ssh, user, host, port) + args,
+ self._build_upload_commands(user, host, port, option.ssh, upload_with)))
self.parallelism = option.parallelism
self.tasks = tasks
@@ -140,12 +143,25 @@ class Setting(object):
[] if port is None else [str('-p'), port]) + [Setting._build_host_string(user, host)]
@staticmethod
- def _scp_args(scp_cmd, user, host, port, path):
- return shlex.split(scp_cmd) + ([] if port is None else [str('-P'), port]) + [
+ def _upload_args(user, host, port, path):
+ return Setting.CMD_UPLOAD + ([] if port is None else [str('-P'), port]) + [
path,
Setting._build_host_string(user, host) + str(':') + path
]
+ @staticmethod
+ def _build_upload_commands(user, host, port, ssh_cmd, paths):
+ # create directories
+ dirs = list(set(x for x in [os.path.dirname(path) for path in paths] if x != '' and x != '.'))
+
+ ret = []
+ if dirs:
+ ret.append(Setting._ssh_args(ssh_cmd, user, host, port) + Setting.CMD_MKDIR + sorted(dirs))
+
+ # upload files
+ ret.extend([Setting._upload_args(user, host, port, path) for path in paths])
+ return ret
+
def run_task(args):
label, command, setup_commands = args
| Add upload-with option | mogproject/color-ssh | diff --git a/tests/color_ssh/test_color_ssh.py b/tests/color_ssh/test_color_ssh.py
index 2c22fee..ccb7c34 100644
--- a/tests/color_ssh/test_color_ssh.py
+++ b/tests/color_ssh/test_color_ssh.py
@@ -115,6 +115,35 @@ class TestSetting(TestCase):
[['rsync', '-a', 'z', 'root@server-12:z']]),
])
+ # upload-with
+ self._check(self._parse(['--upload-with=dir1/x', 'server-1', 'pwd']),
+ [('server-1', ['ssh', 'server-1', 'pwd'], [
+ ['ssh', 'server-1', 'mkdir', '-p', 'dir1'],
+ ['rsync', '-a', 'dir1/x', 'server-1:dir1/x'],
+ ])])
+
+ self._check(
+ self._parse([
+ '--upload-with', 'dir2/c dir2/d dir3/e',
+ '-H', 'server-11 root@server-12', '--distribute', 'echo "foo bar"', '--upload', 'dir1/x', 'dir1/y', 'z'
+ ]), [
+ ('server-11', ['ssh', 'server-11', 'echo', 'foo bar', 'dir1/x', 'dir1/y'], [
+ ['ssh', 'server-11', 'mkdir', '-p', 'dir1', 'dir2', 'dir3'],
+ ['rsync', '-a', 'dir2/c', 'server-11:dir2/c'],
+ ['rsync', '-a', 'dir2/d', 'server-11:dir2/d'],
+ ['rsync', '-a', 'dir3/e', 'server-11:dir3/e'],
+ ['rsync', '-a', 'dir1/x', 'server-11:dir1/x'],
+ ['rsync', '-a', 'dir1/y', 'server-11:dir1/y']
+ ]),
+ ('server-12', ['ssh', 'root@server-12', 'echo', 'foo bar', 'z'], [
+ ['ssh', 'root@server-12', 'mkdir', '-p', 'dir2', 'dir3'],
+ ['rsync', '-a', 'dir2/c', 'root@server-12:dir2/c'],
+ ['rsync', '-a', 'dir2/d', 'root@server-12:dir2/d'],
+ ['rsync', '-a', 'dir3/e', 'root@server-12:dir3/e'],
+ ['rsync', '-a', 'z', 'root@server-12:z']
+ ]),
+ ])
+
def test_parse_args_error(self):
with self.withBytesOutput() as (out, err):
self.assertSystemExit(2, Setting().parse_args, ['color-ssh'], out)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"six",
"mog-commons>=0.2.2"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
-e git+https://github.com/mogproject/color-ssh.git@881561051ebae23a03aec3063877ee1cf21e24c0#egg=color_ssh
coverage==6.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.0.3
MarkupSafe==2.0.1
mog-commons==0.2.3
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: color-ssh
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==6.2
- jinja2==3.0.3
- markupsafe==2.0.1
- mog-commons==0.2.3
- pytest-cov==4.0.0
- six==1.17.0
- tomli==1.2.3
prefix: /opt/conda/envs/color-ssh
| [
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args"
]
| []
| [
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args_error",
"tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_host_error",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_load_error",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_multi_proc",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_single_proc",
"tests/color_ssh/test_color_ssh.py::TestMain::test_main_task_error",
"tests/color_ssh/test_color_ssh.py::TestMain::test_run_task_error"
]
| []
| null | 357 | [
"src/color_ssh/color_ssh.py"
]
| [
"src/color_ssh/color_ssh.py"
]
|
|
numberoverzero__bottom-17 | 4293d2726d2a7222faa55ca509871ee03f4e66e1 | 2016-01-02 04:11:06 | 4293d2726d2a7222faa55ca509871ee03f4e66e1 | diff --git a/.python-version b/.python-version
index fef12e2..d2286d0 100644
--- a/.python-version
+++ b/.python-version
@@ -1,1 +1,2 @@
bottom
+3.5.0
diff --git a/.travis.yml b/.travis.yml
index f37e3e8..a14cce9 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,8 +1,8 @@
language: python
-python: 3.5-dev
+python: 3.5
env:
- - TOXENV=py35
+ - TOX_ENV=py35
install: pip install tox coveralls
-script: tox -e $TOXENV
+script: tox -e $TOX_ENV
after_success:
- coveralls
diff --git a/MANIFEST.in b/MANIFEST.in
index 564aaf0..736edbe 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,2 +1,2 @@
-include README.markdown
+include README.rst
recursive-exclude tests *
diff --git a/README.markdown b/README.markdown
deleted file mode 100644
index 06f249c..0000000
--- a/README.markdown
+++ /dev/null
@@ -1,808 +0,0 @@
-# bottom 0.9.13
-
-[![Build Status]
-(https://travis-ci.org/numberoverzero/bottom.svg?branch=master)]
-(https://travis-ci.org/numberoverzero/bottom)[![Coverage Status]
-(https://coveralls.io/repos/numberoverzero/bottom/badge.png?branch=master)]
-(https://coveralls.io/r/numberoverzero/bottom?branch=master)
-
-Downloads https://pypi.python.org/pypi/bottom
-
-Source https://github.com/numberoverzero/bottom
-
-asyncio-based rfc2812-compliant IRC Client
-
-# Installation
-
-`pip install bottom`
-
-# Getting Started
-
-bottom isn't a kitchen-sink library. Instead, it provides a consistent API with a small surface area, tuned for performance and ease of extension. Similar to the routing style of bottle.py, hooking into events is one line.
-
-```python
-import bottom
-import asyncio
-
-NICK = 'bottom-bot'
-CHANNEL = '#python'
-
-bot = bottom.Client('localhost', 6697)
-
-
[email protected]('CLIENT_CONNECT')
-def connect():
- bot.send('NICK', nick=NICK)
- bot.send('USER', user=NICK, realname='Bot using bottom.py')
- bot.send('JOIN', channel=CHANNEL)
-
-
[email protected]('PING')
-def keepalive(message):
- bot.send('PONG', message=message)
-
-
[email protected]('PRIVMSG')
-def message(nick, target, message):
- ''' Echo all messages '''
-
- # Don't echo ourselves
- if nick == NICK:
- return
- # Direct message to bot
- if target == NICK:
- bot.send("PRIVMSG", target=nick, message=message)
- # Message in channel
- else:
- bot.send("PRIVMSG", target=target, message=message)
-
-asyncio.get_event_loop().run_until_complete(bot.run())
-```
-
-# Versioning and RFC2812
-
-* Bottom follows semver for its **public** API.
-
- * Currently, `Client` is the only public member of bottom.
- * IRC replies/codes which are not yet implemented may be added at any time, and will correspond to a patch - the function contract of `@on` method does not change.
- * You should not rely on the internal api staying the same between minor versions.
- * Over time, private apis may be raised to become public. The reverse will never occur.
-
-* There are a number of changes from RFC2812 - none should noticeably change how you interact with a standard IRC server. For specific adjustments, see the notes section of each command in [`Supported Commands`](#supported-commands).
-
-# Contributing
-Contributions welcome! When reporting issues, please provide enough detail to reproduce the bug - sample code is ideal. When submitting a PR, please make sure `tox` passes (including flake8).
-
-### Development
-bottom uses `tox`, `pytest` and `flake8`. To get everything set up:
-
-```
-# RECOMMENDED: create a virtualenv with:
-# mkvirtualenv bottom
-git clone https://github.com/numberoverzero/bottom.git
-pip install tox
-tox
-```
-
-### TODO
-* Better `Client` docstrings
-* Add missing replies/errors to `unpack.py:unpack_command`
- * Add reply/error parameters to `unpack.py:parameters`
- * Document [`Supported Events`](#supported-events)
-
-
-### Contributors
-* [fahhem](https://github.com/fahhem)
-* [thebigmunch](https://github.com/thebigmunch)
-* [tilal6991](https://github.com/tilal6991)
-
-# API
-
-### Client.run()
-
-*This is a coroutine.*
-
-Start the magic. This will connect the client, and then read until it disconnects. The `CLIENT_DISCONNECT` event will fire before the loop exits, allowing you to `yield from Client.connect()` and keep the client running.
-
-If you want to call this synchronously (block until it's complete) use the following:
-
-```python
-import asyncio
-# ... client is defined somewhere
-
-loop = asyncio.get_event_loop()
-task = client.run()
-loop.run_until_complete(task)
-```
-
-### Client.on(event)(func)
-
-This `@decorator` is the main way you'll interact with a `Client`. It takes a string, returning a function wrapper that validates the function and registers it for the given event. When that event occurs, the function will be called, mapping any arguments the function may expect from the set of available arguments for the event.
-
-Not all available arguments need to be used. For instance, both of the following are valid:
-
-```python
[email protected]('PRIVMSG')
-def event(nick, message, target):
- ''' Doesn't use user, host. argument order is different '''
- # message sent to bot - echo message
- if target == bot.nick:
- bot.send('PRIVMSG', target, message=message)
- # Some channel we're watching
- elif target == bot.monitored_channel:
- logger.info("{} -> {}: {}".format(nick, target, message))
-
-
[email protected]('PRIVMSG')
-def func(message, target):
- ''' Just waiting for the signal '''
- if message == codeword && target == secret_channel:
- execute_heist()
-```
-
-VAR_KWARGS can be used, as long as the name doesn't mask an actual parameter. VAR_ARGS may not be used.
-
-```python
-# OK - kwargs, no masking
[email protected]('PRIVMSG')
-def event(message, **everything_else):
- logger.log(everything_else['nick'] + " said " + message)
-
-
-# NOT OK - kwargs, masking parameter <nick>
[email protected]('PRIVMSG')
-def event(message, **nick):
- logger.log(nick['target'])
-
-
-# NOT OK - uses VAR_ARGS
[email protected]('PRIVMSG')
-def event(message, *args):
- logger.log(args)
-```
-
-Decorated functions will be invoked asynchronously, and may optionally use the `yield from` syntax. Functions do not need to be wrapped with `@ayncio.coroutine` - this is handled as part of the function caching process.
-
-### Client.trigger(event, **kwargs)
-
-*This is a coroutine.*
-
-Manually inject a command or reply as if it came from the server. This is useful for invoking other handlers.
-
-```python
-# Manually trigger `PRIVMSG` handlers:
-yield from bot.trigger('privmsg', nick="always_says_no", message="yes")
-```
-
-```python
-# Rename !commands to !help
[email protected]('privmsg')
-def parse(nick, target, message):
- if message == '!commands':
- bot.send('privmsg', target=nick,
- message="!commands was renamed to !help in 1.2")
- # Don't make them retype it, just make it happen
- yield from bot.trigger('privmsg', nick=nick,
- target=target, message="!help")
-```
-
-```python
-# While testing the auto-reconnect module, simulate a disconnect:
-def test_reconnect(bot):
- loop = asyncio.get_event_loop()
- loop.run_until_complete(bot.trigger("client_disconnect"))
- assert bot.connected
-```
-
-### Client.connect()
-
-*This is a coroutine.*
-
-Attempt to reconnect using the client's host, port.
-
-```python
[email protected]('client_disconnect')
-def reconnect():
- # Wait a few seconds
- yield from asyncio.sleep(3)
- yield from bot.connect()
-```
-
-### Client.disconnect()
-
-*This is a coroutine.*
-
-Disconnect from the server if connected.
-
-```python
[email protected]('privmsg')
-def suicide_pill(nick, message):
- if nick == "spy_handler" and message == "last stop":
- yield from bot.disconnect()
-```
-
-### Client.send(command, **kwargs)
-
-Send a command to the server. See [`Supported Commands`](#supported-commands) for a detailed breakdown of available commands and their parameters.
-
-# Supported Commands
-
-These commands can be sent to the server using [`Client.send`](#clientsendcommand-kwargs).
-
-For incoming signals and messages, see [`Supported Events`](#supported-events) below.
-
-#### Documentation Layout
-There are three parts to each command's documentation:
-
-1. **Python syntax** - sample calls using available parameters
-2. **Normalized IRC wire format** - the normalized translation from python keywords to a literal string that will be constructed by the client and sent to the server. The following syntax is used:
- * `<parameter>` the location of the `parameter` passed to `send`. Literal `<>` are not transferred.
- * `[value]` an optional value, which may be excluded. In some cases, such as [`LINKS`](#links), an optional value may only be provided if another dependant value is present. Literal `[]` are not transferred.
- * `:` the start of a field which may contain spaces. This is always the last field of an IRC line.
- * `"value"` literal value as printed. Literal `""` are not transferred.
-3. **Notes** - additional options or restrictions on commands that do not fit a pre-defined convention. Common notes include keywords for ease of searching:
- * `RFC_DELTA` - Some commands have different parameters from their RFC2812 definitions. **Please pay attention to these notes, since they are the most likely to cause issues**. These changes can include:
- * Addition of new required or optional parameters
- * Default values for new or existing parameters
- * `CONDITIONAL_OPTION` - there are some commands whose values depend on each other. For example, [`LINKS`](#links), `<mask>` REQUIRES `<remote>`.
- * `MULTIPLE_VALUES` - Some commands can handle non-string iterables, such as [`WHOWAS`](#whowas) where `<nick>` can handle both `"WiZ"` and `["WiZ", "WiZ-friend"]`.
- * `PARAM_RENAME` - Some commands have renamed parameters from their RFC2812 specification to improve comsistency.
-
-## Local Events
-*(trigger only)*
-
-#### CLIENT_CONNECT
-```python
-yield from client.trigger('CLIENT_CONNECT', host='localhost', port=6697)
-```
-#### CLIENT_DISCONNECT
-```python
-yield from client.trigger('CLIENT_DISCONNECT', host='localhost', port=6697)
-```
-
-## Connection Registration
-#### [PASS]
-```python
-client.send('PASS', password='hunter2')
-```
-
- PASS <password>
-
-#### [NICK]
-```python
-client.send('nick', nick='WiZ')
-```
-
- NICK <nick>
-
-* PARAM_RENAME `nickname -> nick`
-
-#### [USER]
-```python
-client.send('USER', user='WiZ-user', realname='Ronnie')
-client.send('USER', user='WiZ-user', mode='8', realname='Ronnie')
-```
-
- USER <user> [<mode>] :<realname>
-
-* RFC_DELTA `mode` is optional - default is `0`
-
-#### [OPER]
-```python
-client.send('OPER', user='WiZ', password='hunter2')
-```
-
- OPER <user> <password>
-
-* PARAM_RENAME `name -> user`
-
-#### [USERMODE][USERMODE] (renamed from [MODE][USERMODE])
-```python
-client.send('USERMODE', nick='WiZ')
-client.send('USERMODE', nick='WiZ', modes='+io')
-```
-
- MODE <nick> [<modes>]
-
-* RFC_DELTA rfc did not name `modes` parameter
-
-#### [SERVICE]
-```python
-client.send('SERVICE', nick='CHANSERV', distribution='*.en',
- type='0', info='manages channels')
-```
-
- SERVICE <nick> <distribution> <type> :<info>
-
-* PARAM_RENAME `nickname -> nick`
-
-#### [QUIT]
-```python
-client.send('QUIT')
-client.send('QUIT', message='Gone to Lunch')
-```
-
- QUIT :[<message>]
-
-* PARAM_RENAME `Quit Message -> message`
-
-#### [SQUIT]
-```python
-client.send('SQUIT', server='tolsun.oulu.fi')
-client.send('SQUIT', server='tolsun.oulu.fi', message='Bad Link')
-```
-
- SQUIT <server> :[<message>]
-
-* PARAM_RENAME `Comment -> message`
-* RFC_DELTA `message` is optional - rfc says comment SHOULD be supplied; syntax shows required
-
-## Channel Operations
-
-#### [JOIN]
-```python
-client.send('JOIN', channel='0') # send PART to all joined channels
-client.send('JOIN', channel='#foo-chan')
-client.send('JOIN', channel='#foo-chan', key='foo-key')
-client.send('JOIN', channel=['#foo-chan', '#other'], key='key-for-both')
-client.send('JOIN', channel=['#foo-chan', '#other'], key=['foo-key', 'other-key'])
-```
-
- JOIN <channel> [<key>]
-
-* MULTIPLE_VALUES `channel` and `key`
-* If `channel` has n > 1 values, `key` MUST have 1 or n values
-
-#### [PART]
-```python
-client.send('PART', channel='#foo-chan')
-client.send('PART', channel=['#foo-chan', '#other'])
-client.send('PART', channel='#foo-chan', message='I lost')
-```
-
- PART <channel> :[<message>]
-
-* MULTIPLE_VALUES `channel`
-
-#### [CHANNELMODE][CHANNELMODE] (renamed from [MODE][CHANNELMODE])
-```python
-client.send('CHANNELMODE', channel='#foo-chan', modes='+b')
-client.send('CHANNELMODE', channel='#foo-chan', modes='+l', params='10')
-```
-
- MODE <channel> <modes> [<params>]
-
-* PARAM_RENAME `modeparams -> params`
-
-#### [TOPIC]
-```python
-client.send('TOPIC', channel='#foo-chan')
-client.send('TOPIC', channel='#foo-chan', message='') # Clear channel message
-client.send('TOPIC', channel='#foo-chan', message='Yes, this is dog')
-```
-
- TOPIC <channel> :[<message>]
-
-* PARAM_RENAME `topic -> message`
-
-#### [NAMES]
-```python
-client.send('NAMES')
-client.send('NAMES', channel='#foo-chan')
-client.send('NAMES', channel=['#foo-chan', '#other'])
-client.send('NAMES', channel=['#foo-chan', '#other'], target='remote.*.edu')
-```
-
- NAMES [<channel>] [<target>]
-
-* MULTIPLE_VALUES `channel`
-* CONDITIONAL_OPTION `target` requires `channel`
-
-#### [LIST]
-```python
-client.send('LIST')
-client.send('LIST', channel='#foo-chan')
-client.send('LIST', channel=['#foo-chan', '#other'])
-client.send('LIST', channel=['#foo-chan', '#other'], target='remote.*.edu')
-```
-
- LIST [<channel>] [<target>]
-
-* MULTIPLE_VALUES `channel`
-* CONDITIONAL_OPTION `target` requires `channel`
-
-#### [INVITE]
-```python
-client.send('INVITE', nick='WiZ-friend', channel='#bar-chan')
-```
-
- INVITE <nick> <channel>
-
-* PARAM_RENAME `nickname -> nick`
-
-#### [KICK]
-```python
-client.send('KICK', channel='#foo-chan', nick='WiZ')
-client.send('KICK', channel='#foo-chan', nick='WiZ', message='Spamming')
-client.send('KICK', channel='#foo-chan', nick=['WiZ', 'WiZ-friend'])
-client.send('KICK', channel=['#foo', '#bar'], nick=['WiZ', 'WiZ-friend'])
-```
-
- KICK <channel> <nick> :[<message>]
-
-* PARAM_RENAME `nickname -> nick`
-* PARAM_RENAME `comment -> message`
-* MULTIPLE_VALUES `channel` and `nick`
-* If `nick` has n > 1 values, channel MUST have 1 or n values
-* `channel` can have n > 1 values IFF `nick` has n values
-
-## Sending Messages
-#### [PRIVMSG]
-```python
-client.send('PRIVMSG', target='WiZ-friend', message='Hello, friend!')
-```
-
- PRIVMSG <target> :<message>
-
-* PARAM_RENAME `msgtarget -> target`
-* PARAM_RENAME `text to be sent -> message`
-
-#### [NOTICE]
-```python
-client.send('NOTICE', target='#foo-chan', message='Maintenance in 5 mins')
-```
-
- NOTICE <target> :<message>
-
-* PARAM_RENAME `msgtarget -> target`
-* PARAM_RENAME `text -> message`
-
-## Server Queries and Commands
-#### [MOTD]
-```python
-client.send('MOTD')
-client.send('MOTD', target='remote.*.edu')
-```
-
- MOTD [<target>]
-
-#### [LUSERS]
-```python
-client.send('LUSERS')
-client.send('LUSERS', mask='*.edu')
-client.send('LUSERS', mask='*.edu', target='remote.*.edu')
-```
-
- LUSERS [<mask>] [<target>]
-
-* CONDITIONAL_OPTION `target` requires `mask`
-
-#### [VERSION]
-```python
-client.send('VERSION')
-```
-
- VERSION [<target>]
-
-#### [STATS]
-```python
-client.send('STATS')
-client.send('STATS', query='m')
-client.send('STATS', query='m', target='remote.*.edu')
-```
-
- STATS [<query>] [<target>]
-
-* CONDITIONAL_OPTION `target` requires `query`
-
-#### [LINKS]
-```python
-client.send('LINKS')
-client.send('LINKS', mask='*.bu.edu')
-client.send('LINKS', remote='*.edu', mask='*.bu.edu')
-```
-
- LINKS [<remote>] [<mask>]
-
-* PARAM_RENAME `remote server -> remote`
-* PARAM_RENAME `server mask -> mask`
-* CONDITIONAL_OPTION `remote` requires `mask`
-
-#### [TIME]
-```python
-client.send('TIME')
-client.send('TIME', target='remote.*.edu')
-```
-
- TIME [<target>]
-
-#### [CONNECT]
-```python
-client.send('CONNECT', target='tolsun.oulu.fi', port=6667)
-client.send('CONNECT', target='tolsun.oulu.fi', port=6667, remote='*.edu')
-```
-
- CONNECT <target> <port> [<remote>]
-
-* PARAM_RENAME `target server -> target`
-* PARAM_RENAME `remote server -> remote`
-
-#### [TRACE]
-```python
-client.send('TRACE')
-client.send('TRACE', target='remote.*.edu')
-```
-
- TRACE [<target>]
-
-#### [ADMIN]
-```python
-client.send('ADMIN')
-client.send('ADMIN', target='remote.*.edu')
-```
-
- ADMIN [<target>]
-
-#### [INFO]
-```python
-client.send('INFO')
-client.send('INFO', target='remote.*.edu')
-```
-
- INFO [<target>]
-
-## Service Query and Commands
-#### [SERVLIST]
-```python
-client.send('SERVLIST', mask='*SERV')
-client.send('SERVLIST', mask='*SERV', type=3)
-```
-
- SERVLIST [<mask>] [<type>]
-
-* CONDITIONAL_OPTION `type` requires `mask`
-
-#### [SQUERY]
-```python
-client.send('SQUERY', target='irchelp', message='HELP privmsg')
-```
-
- SQUERY <target> :<message>
-
-* PARAM_RENAME `servicename -> target`
-* PARAM_RENAME `text -> message`
-
-## User Based Queries
-#### [WHO]
-```python
-client.send('WHO')
-client.send('WHO', mask='*.fi')
-client.send('WHO', mask='*.fi', o=True)
-```
-
- WHO [<mask>] ["o"]
-
-* Optional positional parameter "o" is included if the kwarg "o" is Truthy
-
-#### [WHOIS]
-```python
-client.send('WHOIS', mask='*.fi')
-client.send('WHOIS', mask=['*.fi', '*.edu'], target='remote.*.edu')
-```
-
- WHOIS <mask> [<target>]
-
-* MULTIPLE_VALUES `mask`
-
-#### [WHOWAS]
-```python
-client.send('WHOWAS', nick='WiZ')
-client.send('WHOWAS', nick='WiZ', count=10)
-client.send('WHOWAS', nick=['WiZ', 'WiZ-friend'], count=10)
-client.send('WHOWAS', nick='WiZ', count=10, target='remote.*.edu')
-```
-
- WHOWAS <nick> [<count>] [<target>]
-
-* PARAM_RENAME `nickname -> nick`
-* MULTIPLE_VALUES `nick`
-* CONDITIONAL_OPTION `target` requires `count`
-
-## Miscellaneous Messages
-#### [KILL]
-```python
-client.send('KILL', nick='WiZ', message='Spamming Joins')
-```
-
- KILL <nick> :<message>
-
-* PARAM_RENAME `nickname -> nick`
-* PARAM_RENAME `comment -> message`
-
-#### [PING]
-```python
-client.send('PING', message='Test..')
-client.send('PING', server2='tolsun.oulu.fi')
-client.send('PING', server1='WiZ', server2='tolsun.oulu.fi')
-```
-
- PING [<server1>] [<server2>] :[<message>]
-
-* RFC_DELTA `server1` is optional
-* RFC_DELTA `message` is new, and optional
-* CONDITIONAL_OPTION `server2` requires `server1`
-
-#### [PONG]
-```python
-client.send('PONG', message='Test..')
-client.send('PONG', server2='tolsun.oulu.fi')
-client.send('PONG', server1='WiZ', server2='tolsun.oulu.fi')
-```
-
- PONG [<server1>] [<server2>] :[<message>]
-
-* RFC_DELTA `server1` is optional
-* RFC_DELTA `message` is new, and optional
-* CONDITIONAL_OPTION `server2` requires `server1`
-
-## Optional Features
-#### [AWAY]
-```python
-client.send('AWAY')
-client.send('AWAY', message='Gone to Lunch')
-```
-
- AWAY :[<message>]
-
-* PARAM_RENAME `text -> message`
-
-#### [REHASH]
-```python
-client.send('REHASH')
-```
-
- REHASH
-
-#### [DIE]
-```python
-client.send('DIE')
-```
-
- DIE
-
-#### [RESTART]
-```python
-client.send('RESTART')
-```
-
- RESTART
-
-#### [SUMMON]
-```python
-client.send('SUMMON', nick='WiZ')
-client.send('SUMMON', nick='WiZ', target='remote.*.edu')
-client.send('SUMMON', nick='WiZ', target='remote.*.edu', channel='#foo-chan')
-```
-
- SUMMON <nick> [<target>] [<channel>]
-
-* PARAM_RENAME `user -> nick`
-* CONDITIONAL_OPTION `channel` requires `target`
-
-#### [USERS]
-```python
-client.send('USERS')
-client.send('USERS', target='remote.*.edu')
-```
-
- USERS [<target>]
-
-#### [WALLOPS]
-```python
-client.send('WALLOPS', message='Maintenance in 5 minutes')
-```
-
- WALLOPS :<message>
-
-* PARAM_RENAME `Text to be sent -> message`
-
-#### [USERHOST]
-```python
-client.send('USERHOST', nick='WiZ')
-client.send('USERHOST', nick=['WiZ', 'WiZ-friend'])
-```
-
- USERHOST <nick>
-
-* PARAM_RENAME `nickname -> nick`
-* MULTIPLE_VALUES `nick`
-
-#### [ISON]
-```python
-client.send('ISON', nick='WiZ')
-client.send('ISON', nick=['WiZ', 'WiZ-friend'])
-```
-
- ISON <nick>
-
-* PARAM_RENAME `nickname -> nick`
-* MULTIPLE_VALUES `nick`
-
-[PASS]: https://tools.ietf.org/html/rfc2812#section-3.1.1
-[NICK]: https://tools.ietf.org/html/rfc2812#section-3.1.2
-[USER]: https://tools.ietf.org/html/rfc2812#section-3.1.3
-[OPER]: https://tools.ietf.org/html/rfc2812#section-3.1.4
-[USERMODE]: https://tools.ietf.org/html/rfc2812#section-3.1.5
-[SERVICE]: https://tools.ietf.org/html/rfc2812#section-3.1.6
-[QUIT]: https://tools.ietf.org/html/rfc2812#section-3.1.7
-[SQUIT]: https://tools.ietf.org/html/rfc2812#section-3.1.8
-
-[JOIN]: https://tools.ietf.org/html/rfc2812#section-3.2.1
-[PART]: https://tools.ietf.org/html/rfc2812#section-3.2.2
-[CHANNELMODE]: https://tools.ietf.org/html/rfc2812#section-3.2.3
-[TOPIC]: https://tools.ietf.org/html/rfc2812#section-3.2.4
-[NAMES]: https://tools.ietf.org/html/rfc2812#section-3.2.5
-[LIST]: https://tools.ietf.org/html/rfc2812#section-3.2.6
-[INVITE]: https://tools.ietf.org/html/rfc2812#section-3.2.7
-[KICK]: https://tools.ietf.org/html/rfc2812#section-3.2.8
-
-[PRIVMSG]: https://tools.ietf.org/html/rfc2812#section-3.3.1
-[NOTICE]: https://tools.ietf.org/html/rfc2812#section-3.3.2
-
-[MOTD]: https://tools.ietf.org/html/rfc2812#section-3.4.1
-[LUSERS]: https://tools.ietf.org/html/rfc2812#section-3.4.2
-[VERSION]: https://tools.ietf.org/html/rfc2812#section-3.4.3
-[STATS]: https://tools.ietf.org/html/rfc2812#section-3.4.4
-[LINKS]: https://tools.ietf.org/html/rfc2812#section-3.4.5
-[TIME]: https://tools.ietf.org/html/rfc2812#section-3.4.6
-[CONNECT]: https://tools.ietf.org/html/rfc2812#section-3.4.7
-[TRACE]: https://tools.ietf.org/html/rfc2812#section-3.4.8
-[ADMIN]: https://tools.ietf.org/html/rfc2812#section-3.4.9
-[INFO]: https://tools.ietf.org/html/rfc2812#section-3.4.10
-
-[SERVLIST]: https://tools.ietf.org/html/rfc2812#section-3.5.1
-[SQUERY]: https://tools.ietf.org/html/rfc2812#section-3.5.2
-
-[WHO]: https://tools.ietf.org/html/rfc2812#section-3.6.1
-[WHOIS]: https://tools.ietf.org/html/rfc2812#section-3.6.2
-[WHOWAS]: https://tools.ietf.org/html/rfc2812#section-3.6.3
-
-[KILL]: https://tools.ietf.org/html/rfc2812#section-3.7.1
-[PING]: https://tools.ietf.org/html/rfc2812#section-3.7.2
-[PONG]: https://tools.ietf.org/html/rfc2812#section-3.7.3
-
-[AWAY]: https://tools.ietf.org/html/rfc2812#section-4.1
-[REHASH]: https://tools.ietf.org/html/rfc2812#section-4.2
-[DIE]: https://tools.ietf.org/html/rfc2812#section-4.3
-[RESTART]: https://tools.ietf.org/html/rfc2812#section-4.4
-[SUMMON]: https://tools.ietf.org/html/rfc2812#section-4.5
-[USERS]: https://tools.ietf.org/html/rfc2812#section-4.6
-[WALLOPS]: https://tools.ietf.org/html/rfc2812#section-4.7
-[USERHOST]: https://tools.ietf.org/html/rfc2812#section-4.8
-[ISON]: https://tools.ietf.org/html/rfc2812#section-4.9
-
-
-# Supported Events
-
-These commands are received from the server, or dispatched using `Client.trigger(...)`.
-
-For sending commands, see [`Supported Commands`](#supported-commands) above.
-
-* PING
-* JOIN
-* PART
-* PRIVMSG
-* NOTICE
-* RPL_WELCOME (001)
-* RPL_YOURHOST (002)
-* RPL_CREATED (003)
-* RPL_MYINFO (004)
-* RPL_BOUNCE (005)
-* RPL_MOTDSTART (375)
-* RPL_MOTD (372)
-* RPL_ENDOFMOTD (376)
-* RPL_LUSERCLIENT (251)
-* RPL_LUSERME (255)
-* RPL_LUSEROP (252)
-* RPL_LUSERUNKNOWN (253)
-* RPL_LUSERCHANNELS (254)
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..8331488
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,524 @@
+.. image:: https://img.shields.io/travis/numberoverzero/bottom/master.svg?style=flat-square
+ :target: https://travis-ci.org/numberoverzero/bottom
+.. image:: https://img.shields.io/coveralls/numberoverzero/bottom/master.svg?style=flat-square
+ :target: https://coveralls.io/github/numberoverzero/bottom
+.. image:: https://img.shields.io/pypi/v/bottom.svg?style=flat-square
+ :target: https://pypi.python.org/pypi/bottom
+.. image:: https://img.shields.io/github/issues-raw/numberoverzero/bottom.svg?style=flat-square
+ :target: https://github.com/numberoverzero/bottom/issues
+.. image:: https://img.shields.io/pypi/l/bottom.svg?style=flat-square
+ :target: https://github.com/numberoverzero/bottom/blob/master/LICENSE
+
+asyncio-based rfc2812-compliant IRC Client (3.5+)
+
+Installation
+============
+::
+
+ pip install bottom
+
+Getting Started
+===============
+
+bottom isn't a kitchen-sink library. Instead, it provides a consistent API with a small surface area, tuned for performance and ease of extension. Similar to the routing style of bottle.py, hooking into events is one line.
+
+::
+
+ import bottom
+ import asyncio
+
+ NICK = 'bottom-bot'
+ CHANNEL = '#python'
+
+ bot = bottom.Client('localhost', 6697)
+
+
+ @bot.on('CLIENT_CONNECT')
+ def connect():
+ bot.send('NICK', nick=NICK)
+ bot.send('USER', user=NICK, realname='Bot using bottom.py')
+ bot.send('JOIN', channel=CHANNEL)
+
+
+ @bot.on('PING')
+ def keepalive(message):
+ bot.send('PONG', message=message)
+
+
+ @bot.on('PRIVMSG')
+ def message(nick, target, message):
+ ''' Echo all messages '''
+
+ # Don't echo ourselves
+ if nick == NICK:
+ return
+ # Direct message to bot
+ if target == NICK:
+ bot.send("PRIVMSG", target=nick, message=message)
+ # Message in channel
+ else:
+ bot.send("PRIVMSG", target=target, message=message)
+
+ asyncio.get_event_loop().run_until_complete(bot.run())
+
+Versioning and RFC2812
+=======================
+
+* Bottom follows semver for its **public** API.
+
+ * Currently, ``Client`` is the only public member of bottom.
+ * IRC replies/codes which are not yet implemented may be added at any time, and will correspond to a patch - the function contract of ``@on`` method does not change.
+ * You should not rely on the internal api staying the same between minor versions.
+ * Over time, private apis may be raised to become public. The reverse will never occur.
+
+* There are a number of changes from RFC2812 - none should noticeably change how you interact with a standard IRC server. For specific adjustments, see the notes above each command in supported_commands_.
+
+Contributing
+============
+
+Contributions welcome! When reporting issues, please provide enough detail to reproduce the bug - sample code is ideal. When submitting a PR, please make sure ``tox`` passes (including flake8).
+
+Development
+-----------
+
+bottom uses ``tox``, ``pytest`` and ``flake8``. To get everything set up::
+
+ # RECOMMENDED: create a virtualenv with:
+ # mkvirtualenv bottom
+ git clone https://github.com/numberoverzero/bottom.git
+ pip install tox
+ tox
+
+
+TODO
+----
+
+* Better `Client` docstrings
+* Add missing replies/errors to `unpack.py:unpack_command`
+
+ * Add reply/error parameters to `unpack.py:parameters`
+ * Document supported_events_
+
+
+Contributors
+------------
+* `fahhem <https://github.com/fahhem>`_
+* `thebigmunch <https://github.com/thebigmunch>`_
+* `tilal6991 <https://github.com/tilal6991>`_
+* `AMorporkian <https://github.com/AMorporkian>`_
+
+API
+===
+
+Client.run()
+------------
+
+*This is a coroutine.*
+
+Start the magic. This will connect the client, and then read until it disconnects. The ``CLIENT_DISCONNECT`` event will fire before the loop exits, allowing you to ``await Client.connect()`` and keep the client running.
+
+If you want to call this synchronously (block until it's complete) use the following::
+
+ import asyncio
+ # ... client is defined somewhere
+
+ loop = asyncio.get_event_loop()
+ task = client.run()
+ loop.run_until_complete(task)
+
+
+Client.on(event)(func)
+----------------------
+
+This ``@decorator`` is the main way you'll interact with a ``Client``. It takes a string, returning a function wrapper that validates the function and registers it for the given event. When that event occurs, the function will be called, mapping any arguments the function may expect from the set of available arguments for the event.
+
+Not all available arguments need to be used. For instance, both of the following are valid::
+
+ @bot.on('PRIVMSG')
+ def event(nick, message, target):
+ ''' Doesn't use user, host. argument order is different '''
+ # message sent to bot - echo message
+ if target == bot.nick:
+ bot.send('PRIVMSG', target, message=message)
+ # Some channel we're watching
+ elif target == bot.monitored_channel:
+ logger.info("{} -> {}: {}".format(nick, target, message))
+
+
+ @bot.on('PRIVMSG')
+ def func(message, target):
+ ''' Just waiting for the signal '''
+ if message == codeword && target == secret_channel:
+ execute_heist()
+
+
+VAR_KWARGS can be used, as long as the name doesn't mask an actual parameter. VAR_ARGS may not be used.
+
+::
+
+ # OK - kwargs, no masking
+ @bot.on('PRIVMSG')
+ def event(message, **everything_else):
+ logger.log(everything_else['nick'] + " said " + message)
+
+
+ # NOT OK - kwargs, masking parameter <nick>
+ @bot.on('PRIVMSG')
+ def event(message, **nick):
+ logger.log(nick['target'])
+
+
+ # NOT OK - uses VAR_ARGS
+ @bot.on('PRIVMSG')
+ def event(message, *args):
+ logger.log(args)
+
+
+Decorated functions will be invoked asynchronously, and may optionally use the ``await`` syntax. Functions do not need to be wrapped with ``@ayncio.coroutine`` - this is handled as part of the function caching process.
+
+Client.trigger(event, \*\*kwargs)
+-------------------------------
+
+Manually inject a command or reply as if it came from the server. This is useful for invoking other handlers.
+Note that because trigger doesn't block, registered callbacks for the event won't run until
+the event loop yields to them.
+
+::
+
+ # Manually trigger `PRIVMSG` handlers:
+ bot.trigger('privmsg', nick="always_says_no", message="yes")
+
+::
+
+ # Rename !commands to !help
+ @bot.on('privmsg')
+ def parse(nick, target, message):
+ if message == '!commands':
+ bot.send('privmsg', target=nick,
+ message="!commands was renamed to !help in 1.2")
+ # Don't make them retype it, just make it happen
+ bot.trigger('privmsg', nick=nick,
+ target=target, message="!help")
+
+::
+
+ # While testing the auto-reconnect module, simulate a disconnect:
+ def test_reconnect(bot):
+ bot.trigger("client_disconnect")
+ # Clear out the pending callbacks
+ bot.loop.run_until_complete(asyncio.sleep(0, loop=bot.loop))
+ assert bot.connected
+
+Client.connect()
+----------------
+
+*This is a coroutine.*
+
+Attempt to reconnect using the client's host, port::
+
+ @bot.on('client_disconnect')
+ async def reconnect():
+ # Wait a few seconds
+ await asyncio.sleep(3)
+ await bot.connect()
+
+
+Client.disconnect()
+-------------------
+
+*This is a coroutine.*
+
+Disconnect from the server if connected::
+
+ @bot.on('privmsg')
+ async def suicide_pill(nick, message):
+ if nick == "spy_handler" and message == "last stop":
+ await bot.disconnect()
+
+Client.send(command, \*\*kwargs)
+------------------------------
+
+Send a command to the server.
+
+.. _supported_commands:
+
+Supported Commands
+==================
+
+::
+
+ client.send('PASS', password='hunter2')
+
+::
+
+ client.send('NICK', nick='WiZ')
+
+::
+
+ # mode is optional, default is 0
+ client.send('USER', user='WiZ-user', realname='Ronnie')
+ client.send('USER', user='WiZ-user', mode='8', realname='Ronnie')
+
+::
+
+ client.send('OPER', user='WiZ', password='hunter2')
+
+::
+
+ # Renamed from MODE
+ client.send('USERMODE', nick='WiZ')
+ client.send('USERMODE', nick='WiZ', modes='+io')
+
+::
+
+ client.send('SERVICE', nick='CHANSERV', distribution='*.en',
+ type='0', info='manages channels')
+
+::
+
+ client.send('QUIT')
+ client.send('QUIT', message='Gone to Lunch')
+
+::
+
+ client.send('SQUIT', server='tolsun.oulu.fi')
+ client.send('SQUIT', server='tolsun.oulu.fi', message='Bad Link')
+
+::
+
+ # If channel has n > 1 values, key MUST have 1 or n values
+ client.send('JOIN', channel='0') # send PART to all joined channels
+ client.send('JOIN', channel='#foo-chan')
+ client.send('JOIN', channel='#foo-chan', key='foo-key')
+ client.send('JOIN', channel=['#foo-chan', '#other'], key='key-for-both')
+ client.send('JOIN', channel=['#foo-chan', '#other'], key=['foo-key', 'other-key'])
+
+::
+
+ client.send('PART', channel='#foo-chan')
+ client.send('PART', channel=['#foo-chan', '#other'])
+ client.send('PART', channel='#foo-chan', message='I lost')
+
+::
+
+ # Renamed from MODE
+ client.send('CHANNELMODE', channel='#foo-chan', modes='+b')
+ client.send('CHANNELMODE', channel='#foo-chan', modes='+l', params='10')
+
+::
+
+ client.send('TOPIC', channel='#foo-chan')
+ client.send('TOPIC', channel='#foo-chan', message='') # Clear channel message
+ client.send('TOPIC', channel='#foo-chan', message='Yes, this is dog')
+
+::
+
+ # target requires channel
+ client.send('NAMES')
+ client.send('NAMES', channel='#foo-chan')
+ client.send('NAMES', channel=['#foo-chan', '#other'])
+ client.send('NAMES', channel=['#foo-chan', '#other'], target='remote.*.edu')
+
+::
+
+ # target requires channel
+ client.send('LIST')
+ client.send('LIST', channel='#foo-chan')
+ client.send('LIST', channel=['#foo-chan', '#other'])
+ client.send('LIST', channel=['#foo-chan', '#other'], target='remote.*.edu')
+
+::
+
+ client.send('INVITE', nick='WiZ-friend', channel='#bar-chan')
+
+::
+
+ # nick and channel must have the same number of elements
+ client.send('KICK', channel='#foo-chan', nick='WiZ')
+ client.send('KICK', channel='#foo-chan', nick='WiZ', message='Spamming')
+ client.send('KICK', channel='#foo-chan', nick=['WiZ', 'WiZ-friend'])
+ client.send('KICK', channel=['#foo', '#bar'], nick=['WiZ', 'WiZ-friend'])
+
+::
+
+ client.send('PRIVMSG', target='WiZ-friend', message='Hello, friend!')
+
+::
+
+ client.send('NOTICE', target='#foo-chan', message='Maintenance in 5 mins')
+
+::
+
+ client.send('MOTD')
+ client.send('MOTD', target='remote.*.edu')
+
+::
+
+ client.send('LUSERS')
+ client.send('LUSERS', mask='*.edu')
+ client.send('LUSERS', mask='*.edu', target='remote.*.edu')
+
+::
+
+ client.send('VERSION')
+
+::
+
+ # target requires query
+ client.send('STATS')
+ client.send('STATS', query='m')
+ client.send('STATS', query='m', target='remote.*.edu')
+
+::
+
+ # remote requires mask
+ client.send('LINKS')
+ client.send('LINKS', mask='*.bu.edu')
+ client.send('LINKS', remote='*.edu', mask='*.bu.edu')
+
+::
+
+ client.send('TIME')
+ client.send('TIME', target='remote.*.edu')
+
+::
+
+ client.send('CONNECT', target='tolsun.oulu.fi', port=6667)
+ client.send('CONNECT', target='tolsun.oulu.fi', port=6667, remote='*.edu')
+
+::
+
+ client.send('TRACE')
+ client.send('TRACE', target='remote.*.edu')
+
+::
+
+ client.send('ADMIN')
+ client.send('ADMIN', target='remote.*.edu')
+
+::
+
+ client.send('INFO')
+ client.send('INFO', target='remote.*.edu')
+
+::
+
+ # type requires mask
+ client.send('SERVLIST', mask='*SERV')
+ client.send('SERVLIST', mask='*SERV', type=3)
+
+::
+
+ client.send('SQUERY', target='irchelp', message='HELP privmsg')
+
+::
+
+ client.send('WHO')
+ client.send('WHO', mask='*.fi')
+ client.send('WHO', mask='*.fi', o=True)
+
+::
+
+ client.send('WHOIS', mask='*.fi')
+ client.send('WHOIS', mask=['*.fi', '*.edu'], target='remote.*.edu')
+
+::
+
+ # target requires count
+ client.send('WHOWAS', nick='WiZ')
+ client.send('WHOWAS', nick='WiZ', count=10)
+ client.send('WHOWAS', nick=['WiZ', 'WiZ-friend'], count=10)
+ client.send('WHOWAS', nick='WiZ', count=10, target='remote.*.edu')
+
+::
+
+ client.send('KILL', nick='WiZ', message='Spamming Joins')
+
+::
+
+ # server2 requires server1
+ client.send('PING', message='Test..')
+ client.send('PING', server2='tolsun.oulu.fi')
+ client.send('PING', server1='WiZ', server2='tolsun.oulu.fi')
+
+::
+
+ # server2 requires server1
+ client.send('PONG', message='Test..')
+ client.send('PONG', server2='tolsun.oulu.fi')
+ client.send('PONG', server1='WiZ', server2='tolsun.oulu.fi')
+
+::
+
+ client.send('AWAY')
+ client.send('AWAY', message='Gone to Lunch')
+
+::
+
+ client.send('REHASH')
+
+::
+
+ client.send('DIE')
+
+::
+
+ client.send('RESTART')
+
+::
+
+ # target requires channel
+ client.send('SUMMON', nick='WiZ')
+ client.send('SUMMON', nick='WiZ', target='remote.*.edu')
+ client.send('SUMMON', nick='WiZ', target='remote.*.edu', channel='#foo-chan')
+
+::
+
+ client.send('USERS')
+ client.send('USERS', target='remote.*.edu')
+
+::
+
+ client.send('WALLOPS', message='Maintenance in 5 minutes')
+
+::
+
+ client.send('USERHOST', nick='WiZ')
+ client.send('USERHOST', nick=['WiZ', 'WiZ-friend'])
+
+::
+
+ client.send('ISON', nick='WiZ')
+ client.send('ISON', nick=['WiZ', 'WiZ-friend'])
+
+.. _supported_events:
+
+Supported Events
+================
+
+These commands are received from the server, or dispatched using ``Client.trigger(...)``.
+
+::
+
+ # Local only events
+ client.trigger('CLIENT_CONNECT', host='localhost', port=6697)
+ client.trigger('CLIENT_DISCONNECT', host='localhost', port=6697)
+
+* PING
+* JOIN
+* PART
+* PRIVMSG
+* NOTICE
+* RPL_WELCOME (001)
+* RPL_YOURHOST (002)
+* RPL_CREATED (003)
+* RPL_MYINFO (004)
+* RPL_BOUNCE (005)
+* RPL_MOTDSTART (375)
+* RPL_MOTD (372)
+* RPL_ENDOFMOTD (376)
+* RPL_LUSERCLIENT (251)
+* RPL_LUSERME (255)
+* RPL_LUSEROP (252)
+* RPL_LUSERUNKNOWN (253)
+* RPL_LUSERCHANNELS (254)
diff --git a/bottom/__init__.py b/bottom/__init__.py
index 90729cf..5974c2e 100644
--- a/bottom/__init__.py
+++ b/bottom/__init__.py
@@ -1,25 +1,22 @@
""" asyncio-based rfc2812-compliant IRC Client """
-import logging
import asyncio
from . import connection
from . import event
from . import pack
from . import unpack
__all__ = ["Client"]
-logger = logging.getLogger(__name__)
+__version__ = "1.0.0"
class Client(event.EventsMixin):
- __conn_cls__ = connection.Connection
+ __conn_cls = connection.Connection
- def __init__(self, host, port, encoding='UTF-8', ssl=True):
- # It's ok that unpack.parameters isn't cached, since it's only
- # called when adding an event handler (which should __usually__
- # only occur during setup)
- super().__init__(unpack.parameters)
- # trigger events on the client
- self.connection = self.__conn_cls__(host, port, self,
- encoding=encoding, ssl=ssl)
+ def __init__(self, host, port, *, encoding='UTF-8', ssl=True, loop=None):
+ if loop is None:
+ loop = asyncio.get_event_loop()
+ super().__init__(unpack.parameters, loop=loop)
+ self.connection = self.__conn_cls(host, port, self, ssl=ssl,
+ encoding=encoding, loop=loop)
def send(self, command, **kwargs):
'''
@@ -34,22 +31,19 @@ class Client(event.EventsMixin):
packed_command = pack.pack_command(command, **kwargs)
self.connection.send(packed_command)
- @asyncio.coroutine
- def connect(self):
- yield from self.connection.connect()
+ async def connect(self):
+ await self.connection.connect()
- @asyncio.coroutine
- def disconnect(self):
- yield from self.connection.disconnect()
+ async def disconnect(self):
+ await self.connection.disconnect()
@property
def connected(self):
return self.connection.connected
- @asyncio.coroutine
- def run(self, loop=None):
+ async def run(self):
''' Run the client until it disconnects (without reconnecting) '''
- yield from self.connection.run(loop=loop)
+ await self.connection.run()
def on(self, command):
'''
diff --git a/bottom/connection.py b/bottom/connection.py
index 72434fa..f50b2f3 100644
--- a/bottom/connection.py
+++ b/bottom/connection.py
@@ -3,65 +3,61 @@ from . import unpack
class Connection(object):
- def __init__(self, host, port, events, encoding, ssl):
+ def __init__(self, host, port, events, encoding, ssl, *, loop):
self.events = events
self._connected = False
self.host, self.port = host, port
self.reader, self.writer = None, None
self.encoding = encoding
self.ssl = ssl
+ self.loop = loop
- @asyncio.coroutine
- def connect(self, loop=None):
+ async def connect(self):
if self.connected:
return
- self.reader, self.writer = yield from asyncio.open_connection(
- self.host, self.port, ssl=self.ssl, loop=loop)
+ self.reader, self.writer = await asyncio.open_connection(
+ self.host, self.port, ssl=self.ssl, loop=self.loop)
self._connected = True
- yield from self.events.trigger(
- "CLIENT_CONNECT", host=self.host, port=self.port)
+ self.events.trigger("CLIENT_CONNECT", host=self.host, port=self.port)
- @asyncio.coroutine
- def disconnect(self):
+ async def disconnect(self):
if not self.connected:
return
self.writer.close()
self.writer = None
self.reader = None
self._connected = False
- yield from self.events.trigger(
+ self.events.trigger(
"CLIENT_DISCONNECT", host=self.host, port=self.port)
@property
def connected(self):
return self._connected
- @asyncio.coroutine
- def run(self, loop=None):
- yield from self.connect(loop=loop)
+ async def run(self):
+ await self.connect()
while self.connected:
- msg = yield from self.read()
+ msg = await self.read()
if msg:
try:
event, kwargs = unpack.unpack_command(msg)
except ValueError:
print("PARSE ERROR {}".format(msg))
else:
- yield from self.events.trigger(event, **kwargs)
+ self.events.trigger(event, **kwargs)
else:
# Lost connection
- yield from self.disconnect()
+ await self.disconnect()
def send(self, msg):
if self.writer:
self.writer.write((msg.strip() + '\n').encode(self.encoding))
- @asyncio.coroutine
- def read(self):
+ async def read(self):
if not self.reader:
return ''
try:
- msg = yield from self.reader.readline()
+ msg = await self.reader.readline()
return msg.decode(self.encoding, 'ignore').strip()
except EOFError:
return ''
diff --git a/bottom/event.py b/bottom/event.py
index 79370da..f94cbf1 100644
--- a/bottom/event.py
+++ b/bottom/event.py
@@ -5,7 +5,7 @@ missing = object()
class EventsMixin(object):
- def __init__(self, getparams):
+ def __init__(self, getparams, *, loop):
'''
getparams is a function that takes a single argument (event) and
returns a list of parameters for the event. It should raise on unknown
@@ -15,28 +15,25 @@ class EventsMixin(object):
# where event is a string, and list(func) is the list of functions
# (wrapped and decorated) that will be invoked when the given event
# is triggered.
- self.__partials__ = collections.defaultdict(list)
- self.__getparams__ = getparams
+ self._partials = collections.defaultdict(list)
+ self._getparams = getparams
+ self.loop = loop
- def __add_event__(self, event, func):
+ def _add_event(self, event, func):
'''
Validate the func's signature, then partial_bind the function to speed
up argument injection.
'''
- parameters = self.__getparams__(event)
+ parameters = self._getparams(event)
validate_func(event, func, parameters)
- self.__partials__[event].append(partial_bind(func))
+ self._partials[event].append(partial_bind(func))
return func
- @asyncio.coroutine
def trigger(self, event, **kwargs):
- ''' This is a coroutine so that we can `yield from` its execution '''
- partials = self.__partials__[event]
- tasks = [func(**kwargs) for func in partials]
- if not tasks:
- return
- yield from asyncio.wait(tasks)
+ partials = self._partials[event]
+ for func in partials:
+ self.loop.create_task(func(**kwargs))
def on(self, event):
'''
@@ -60,12 +57,15 @@ class EventsMixin(object):
event = 'test'
kwargs = {'one': 1, 'two': 2, 'arg': 'arg'}
+ events.trigger(event, **kwargs)
loop = asyncio.get_event_loop()
- loop.run_until_complete(events.trigger(event, **kwargs))
+ # Run all queued events
+ loop.stop()
+ loop.run_forever()
'''
def wrap_function(func):
- self.__add_event__(event, func)
+ self._add_event(event, func)
return func
return wrap_function
@@ -110,7 +110,7 @@ def validate_func(event, func, parameters):
def partial_bind(func):
sig = inspect.signature(func)
- # Wrap non-coroutines so we can always `yield from func(**kw)`
+ # Wrap non-coroutines so we can always `await func(**kw)`
if not asyncio.iscoroutinefunction(func):
func = asyncio.coroutine(func)
base = {}
@@ -122,8 +122,7 @@ def partial_bind(func):
else:
base[key] = default
- @asyncio.coroutine
- def wrapper(**kwargs):
+ async def wrapper(**kwargs):
unbound = base.copy()
# Only map params this function expects
for key in base:
@@ -131,6 +130,6 @@ def partial_bind(func):
if new_value is not missing:
unbound[key] = new_value
bound = sig.bind(**unbound)
- yield from func(*bound.args, **bound.kwargs)
+ await func(*bound.args, **bound.kwargs)
return wrapper
diff --git a/bottom/plugins/router.py b/bottom/plugins/router.py
index b345e25..cd367d5 100644
--- a/bottom/plugins/router.py
+++ b/bottom/plugins/router.py
@@ -33,13 +33,13 @@ class Router(object):
self.routes = {}
bot.on("PRIVMSG")(self.handle)
- async def handle(self, nick, target, message):
+ def handle(self, nick, target, message):
''' bot callback entrance '''
for regex, (func, pattern) in self.routes.items():
match = regex.match(message)
if match:
fields = match.groupdict()
- await func(nick, target, fields)
+ self.bot.loop.create_task(func(nick, target, fields))
def route(self, pattern, **kwargs):
'''
diff --git a/bottom/unpack.py b/bottom/unpack.py
index 9c481f2..d5ecc8d 100644
--- a/bottom/unpack.py
+++ b/bottom/unpack.py
@@ -1,7 +1,6 @@
""" Simplified support for rfc2812 """
# https://tools.ietf.org/html/rfc2812
import re
-missing = object()
RE_IRCLINE = re.compile(
"""
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index 8f19ef8..0000000
--- a/setup.cfg
+++ /dev/null
@@ -1,6 +0,0 @@
-[run]
-source = bottom
-branch = True
-
-[flake8]
-exclude = .tox, dist, doc, build, *.egg
diff --git a/setup.py b/setup.py
index 34a90cc..f79ea10 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,13 @@ import os
from setuptools import setup, find_packages
HERE = os.path.abspath(os.path.dirname(__file__))
-README = open(os.path.join(HERE, 'README.markdown')).read()
+README = open(os.path.join(HERE, 'README.rst')).read()
+
+def get_version():
+ with open("bottom/__init__.py") as f:
+ for line in f:
+ if line.startswith("__version__"):
+ return eval(line.split("=")[-1])
REQUIREMENTS = [
'simplex'
@@ -20,7 +26,7 @@ TEST_REQUIREMENTS = [
if __name__ == "__main__":
setup(
name='bottom',
- version='0.9.13',
+ version=get_version(),
description="asyncio-based rfc2812-compliant IRC Client",
long_description=README,
classifiers=[
diff --git a/tox.ini b/tox.ini
index 37f47e8..8dd916b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -2,11 +2,10 @@
envlist = py35
[testenv]
-deps = git+https://github.com/pytest-dev/pytest
+deps = pytest
flake8
- coverage>=4.0a1
+ coverage
commands =
coverage run --branch --source=bottom -m py.test
coverage report -m
- # disabled until flake8 supports py3.5
- # flake8 bottom tests
+ flake8 bottom tests
| Dispatch blocks, incorrect async
After unpacking, the reading loop will [`yield from`](https://github.com/numberoverzero/bottom/blob/master/bottom/connection.py#L50) the event dispatch, which means it waits until all handlers run for the event, before continuing.
Instead, `events.trigger` should be a non-blocking call. That is, it should push the event into a queue, then return execution to the caller.
---
I'm working on [accordian](https://github.com/numberoverzero/accordian) to fix this - however, in trying to use the 3.5 syntax (`await` and `async` as keywords) I've run into other problems. Once I've finished testing the library, I'll integrate it into bottom. This will require 3.5+ to use bottom, but the migration from 3.4 (current requirement) to 3.5 is virtually painless (I haven't seen any reports of problems migrating). | numberoverzero/bottom | diff --git a/tests/conftest.py b/tests/conftest.py
index 03eea58..3bf0c0f 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,3 +1,5 @@
+from bottom import Client
+from bottom.connection import Connection
from bottom.event import EventsMixin
import pytest
import asyncio
@@ -5,38 +7,39 @@ import collections
@pytest.fixture
-def run():
+def loop():
'''
- Run a coro until it completes.
-
- Returns result from coro, if it produces one.
+ Keep things clean by using a new event loop
'''
- def run_in_loop(coro):
- # For more details on what's going on:
- # https://docs.python.org/3/library/asyncio-task.html\
- # #example-future-with-run-until-complete
- def capture_return(future):
- ''' Push coro result into future for return '''
- result = yield from coro
- future.set_result(result)
- # Kick off the coro, wrapped in the future above
- future = asyncio.Future()
- asyncio.async(capture_return(future))
-
- # Block until coro completes and dumps return in future
- loop = asyncio.get_event_loop()
- loop.run_until_complete(future)
-
- # Hand result back
- return future.result()
- return run_in_loop
+ loop = asyncio.new_event_loop()
+ loop.set_debug(True)
+ return loop
@pytest.fixture
-def loop():
- # TODO: fix to use a new event loop. Because the loop fix will require
- # touching a lot of code, this is an easy way to get the build green again
- return asyncio.new_event_loop()
+def flush(loop):
+ """Run loop once, to execute any pending tasks"""
+
+ async def sentinel():
+ pass
+
+ def _flush():
+ loop.run_until_complete(sentinel())
+ return _flush
+
+
[email protected]
+def schedule(loop):
+ def _schedule(*coros):
+ for coro in coros:
+ loop.create_task(coro)
+ return _schedule
+
+
[email protected]
+def connection(patch_connection, events, loop):
+ print("connection")
+ return Connection("host", "port", events, "UTF-8", True, loop=loop)
@pytest.fixture
@@ -45,9 +48,9 @@ def eventparams():
@pytest.fixture
-def events(eventparams):
+def events(eventparams, loop):
''' Return a no-op EventsMixin that tracks triggers '''
- return MockEvents(lambda e: eventparams[e])
+ return MockEvents(lambda e: eventparams[e], loop=loop)
@pytest.fixture
@@ -60,6 +63,17 @@ def writer():
return MockStreamWriter()
[email protected]
+def client(patch_connection, loop):
+ '''
+ Return a client with mocked out asyncio.
+
+ Pulling in patch_connection here mocks out asyncio.open_connection,
+ so that we can use reader, writer, run in tests.
+ '''
+ return Client("host", "port", loop=loop)
+
+
@pytest.fixture
def patch_connection(reader, writer, monkeypatch):
'''
@@ -75,13 +89,13 @@ def patch_connection(reader, writer, monkeypatch):
class MockEvents(EventsMixin):
- def __init__(self, getparams):
+ def __init__(self, getparams, *, loop=None):
self.triggered_events = collections.defaultdict(int)
- super().__init__(getparams)
+ super().__init__(getparams, loop=loop)
def trigger(self, event, **kwargs):
self.triggered_events[event] += 1
- yield from super().trigger(event, **kwargs)
+ super().trigger(event, **kwargs)
def triggered(self, event, n=1):
'''
@@ -105,8 +119,7 @@ class MockStreamReader():
self.encoding = encoding
self.used = False
- @asyncio.coroutine
- def readline(self):
+ async def readline(self):
self.used = True
try:
line = self.lines.pop(0)
diff --git a/tests/test_client.py b/tests/test_client.py
index 1b38970..bfda26b 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -1,36 +1,32 @@
-from bottom import Client
+import asyncio
import pytest
+from bottom import Client
[email protected]
-def client(patch_connection, run):
- '''
- Return a client with mocked out asyncio.
-
- Pulling in patch_connection here mocks out asyncio.open_connection,
- so that we can use reader, writer, run in tests.
- '''
- return Client("host", "port")
+def test_default_event_loop():
+ default_loop = asyncio.get_event_loop()
+ client = Client(host="host", port="port")
+ assert client.loop is default_loop
-def test_send_unknown_command(client, run):
+def test_send_unknown_command(client, loop):
''' Sending an unknown command raises '''
- run(client.connect())
+ loop.run_until_complete(client.connect())
assert client.connected
with pytest.raises(ValueError):
client.send("Unknown_Command")
-def test_send_before_connected(client, writer, run):
+def test_send_before_connected(client, writer):
''' Sending before connected does not invoke writer '''
client.send("PONG")
assert not writer.used
-def test_send_after_disconnected(client, writer, run):
+def test_send_after_disconnected(client, writer, loop):
''' Sending after disconnect does not invoke writer '''
- run(client.connect())
- run(client.disconnect())
+ loop.run_until_complete(client.connect())
+ loop.run_until_complete(client.disconnect())
client.send("PONG")
assert not writer.used
@@ -41,13 +37,13 @@ def test_on(client):
@client.on('privmsg')
def route(nick, target, message):
pass
- assert len(client.__partials__["PRIVMSG"]) == 1
+ assert len(client._partials["PRIVMSG"]) == 1
with pytest.raises(ValueError):
client.on("UNKNOWN_COMMAND")(route)
-def test_run_(client, reader, eventparams, run):
+def test_run_(client, reader, eventparams, loop):
''' run delegates to Connection, which triggers events on the Client '''
reader.push(":nick!user@host PRIVMSG #target :this is message")
received = []
@@ -56,7 +52,7 @@ def test_run_(client, reader, eventparams, run):
def receive(nick, user, host, target, message):
received.extend([nick, user, host, target, message])
- run(client.run())
+ loop.run_until_complete(client.run())
assert reader.has_read(":nick!user@host PRIVMSG #target :this is message")
assert received == ["nick", "user", "host", "#target", "this is message"]
diff --git a/tests/test_connection.py b/tests/test_connection.py
index 6eb4cb3..cce800e 100644
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
@@ -1,130 +1,115 @@
-from bottom.connection import Connection
-import pytest
-
-
[email protected]
-def conn(patch_connection, events, run):
- ''' Generic connection that is ready to read/send '''
- conn = Connection("host", "port", events, "UTF-8", True)
- run(conn.connect())
- assert conn.connected
- return conn
-
-
-def test_connect(patch_connection, writer, events, run):
+def test_connect(connection, events, writer, schedule, flush):
''' Connection.Connect opens a writer, triggers CLIENT_CONNECT '''
- conn = Connection("host", "port", events, "UTF-8", True)
- run(conn.connect())
- assert conn.connected
+ schedule(connection.connect())
+ flush()
+ assert connection.connected
assert not writer.closed
assert events.triggered("CLIENT_CONNECT")
-def test_connect_already_connected(patch_connection, writer, events, run):
+def test_already_connected(connection, events, writer, schedule, flush):
''' Does not trigger CLIENT_CONNECT multiple times '''
- conn = Connection("host", "port", events, "UTF-8", True)
- run(conn.connect())
- run(conn.connect())
+ schedule(connection.connect(), connection.connect())
+ flush()
assert not writer.closed
assert events.triggered("CLIENT_CONNECT")
-def test_disconnect_before_connect(patch_connection, events, run):
+def test_disconnect_before_connect(connection, events, schedule, flush):
''' disconnect before connect does nothing '''
- conn = Connection("host", "port", events, "UTF-8", True)
- run(conn.disconnect())
- assert not conn.connected
+ schedule(connection.disconnect())
+ flush()
+ assert not connection.connected
assert not events.triggered("CLIENT_CONNECT")
assert not events.triggered("CLIENT_DISCONNECT")
-def test_disconnect(writer, patch_connection, events, run):
+def test_disconnect(writer, patch_connection, events, connection,
+ schedule, flush):
''' Connection.disconnect closes writer, triggers CLIENT_DISCONNECT '''
- conn = Connection("host", "port", events, "UTF-8", True)
- run(conn.connect())
- run(conn.disconnect())
- assert not conn.connected
+ schedule(connection.connect(), connection.disconnect())
+ flush()
+ assert not connection.connected
assert writer.closed
- assert conn.writer is None
+ assert connection.writer is None
assert events.triggered("CLIENT_CONNECT")
assert events.triggered("CLIENT_DISCONNECT")
-def test_disconnect_already_disconnected(patch_connection, events, run):
+def test_already_disconnected(connection, events, schedule, flush):
''' Does not trigger CLIENT_DISCONNECT multiple times '''
- conn = Connection("host", "port", events, "UTF-8", True)
- run(conn.connect())
- run(conn.disconnect())
- run(conn.disconnect())
+ schedule(connection.connect(),
+ connection.disconnect(),
+ connection.disconnect())
+ flush()
assert events.triggered("CLIENT_CONNECT")
assert events.triggered("CLIENT_DISCONNECT")
-def test_send_before_connected(patch_connection, writer, events, run):
+def test_send_before_connected(connection, writer):
''' Nothing happens when sending before connecting '''
- conn = Connection("host", "port", events, "UTF-8", True)
- assert not conn.connected
- conn.send("test")
+ assert not connection.connected
+ connection.send("test")
assert not writer.used
-def test_send_disconnected(patch_connection, writer, events, run):
+def test_send_disconnected(connection, writer, schedule, flush):
''' Nothing happens when sending after disconnecting '''
- conn = Connection("host", "port", events, "UTF-8", True)
- run(conn.connect())
- run(conn.disconnect())
- conn.send("test")
+ schedule(connection.connect(), connection.disconnect())
+ flush()
+ connection.send("test")
assert not writer.used
-def test_send_strips(conn, writer):
+def test_send_strips(connection, writer, loop):
''' Send strips whitespace from string '''
- conn.send(" a b c | @#$ d ")
+ loop.run_until_complete(connection.connect())
+ connection.send(" a b c | @#$ d ")
assert writer.used
assert writer.has_written("a b c | @#$ d\n")
-def test_read_before_connected(patch_connection, reader, events, run):
+def test_read_before_connected(connection, reader, loop):
''' Nothing happens when reading before connecting '''
- conn = Connection("host", "port", events, "UTF-8", True)
- value = run(conn.read())
+ value = loop.run_until_complete(connection.read())
assert not value
assert not reader.used
-def test_read_disconnected(patch_connection, reader, events, run):
+def test_read_disconnected(connection, reader, schedule, flush, loop):
''' Nothing happens when reading after disconnecting '''
- conn = Connection("host", "port", events, "UTF-8", True)
- run(conn.connect())
- run(conn.disconnect())
- value = run(conn.read())
+ schedule(connection.connect(), connection.disconnect())
+ flush()
+ value = loop.run_until_complete(connection.read())
assert not value
assert not reader.used
-def test_read_eoferror(conn, reader, run):
+def test_read_eoferror(connection, reader, loop):
''' Nothing to read '''
- value = run(conn.read())
+ loop.run_until_complete(connection.connect())
+ value = loop.run_until_complete(connection.read())
assert not value
assert reader.used
-def test_read_strips(conn, reader, run):
+def test_read_strips(connection, reader, loop):
''' newline and space characters are stripped off '''
reader.push(" a b c | @#$ d \n")
- value = run(conn.read())
+ loop.run_until_complete(connection.connect())
+ value = loop.run_until_complete(connection.read())
assert value == "a b c | @#$ d"
assert reader.has_read(" a b c | @#$ d \n")
-def test_run_without_message(conn, events, run):
+def test_run_without_message(connection, events, loop):
''' Connection.run should connect, read empty, disconnect, return '''
- run(conn.run())
+ loop.run_until_complete(connection.run())
assert events.triggered("CLIENT_CONNECT")
assert events.triggered("CLIENT_DISCONNECT")
-def test_run_trigger_command(conn, reader, events, eventparams, run):
+def test_run_trigger_command(connection, reader, events, eventparams, loop):
eventparams["PRIVMSG"] = ["nick", "user", "host", "target", "message"]
reader.push(":nick!user@host PRIVMSG #target :this is message")
received = []
@@ -133,16 +118,15 @@ def test_run_trigger_command(conn, reader, events, eventparams, run):
def receive(nick, user, host, target, message):
received.extend([nick, user, host, target, message])
- run(conn.run())
-
+ loop.run_until_complete(connection.run())
assert reader.has_read(":nick!user@host PRIVMSG #target :this is message")
assert events.triggered("PRIVMSG")
assert received == ["nick", "user", "host", "#target", "this is message"]
-def test_run_trigger_unknown_command(conn, reader, events, run):
+def test_run_trigger_unknown_command(connection, reader, events, loop):
reader.push("unknown_command")
- run(conn.run())
+ loop.run_until_complete(connection.run())
assert reader.has_read("unknown_command")
assert not events.triggered("unknown_command")
diff --git a/tests/test_event.py b/tests/test_event.py
index 1859eb3..5d496fe 100644
--- a/tests/test_event.py
+++ b/tests/test_event.py
@@ -14,14 +14,8 @@ def getparams():
@pytest.fixture
-def events(getparams):
- return event.EventsMixin(getparams)
-
-
[email protected]
-def run():
- loop = asyncio.get_event_loop()
- return lambda coro: loop.run_until_complete(coro)
+def events(getparams, loop):
+ return event.EventsMixin(getparams, loop=loop)
@pytest.fixture
@@ -43,7 +37,6 @@ def watch():
# EventsMixin.on
# ==============
-
def test_on_subset(events):
''' register a handler with a subset of available parameters '''
for e in ["0", "1", "2"]:
@@ -90,7 +83,8 @@ def test_defaults(events):
def test_on_coroutine(events):
''' coroutines are fine '''
- handle = asyncio.coroutine(lambda one: None)
+ async def handle(one):
+ pass
events.on("1")(handle)
@@ -98,95 +92,84 @@ def test_on_coroutine(events):
# EventsMixin.trigger
# ===================
-def test_trigger(events, run, watch):
+def test_trigger(events, watch, flush):
''' trigger calls registered handler '''
w = watch()
- # Register handler - increment call counter when called
events.on("0")(lambda: w.call())
- # Trigger handler
- run(events.trigger("0"))
- # Make sure we called once
+ events.trigger("0")
+ flush()
assert w.called
-def test_trigger_multiple_calls(events, run, watch):
+def test_trigger_multiple_calls(events, watch, flush):
''' trigger calls re-registered handler twice '''
w = watch()
- # Register handler twice - increment call counter when called
events.on("0")(lambda: w.call())
events.on("0")(lambda: w.call())
- # Trigger handler
- run(events.trigger("0"))
- # Make sure we called twice
+ events.trigger("0")
+ flush()
assert w.calls == 2
-def test_trigger_multiple_handlers(events, run, watch):
+def test_trigger_multiple_handlers(events, watch, flush):
''' trigger calls re-registered handler twice '''
w1 = watch()
w2 = watch()
- # Register two handlers
events.on("0")(lambda: w1.call())
events.on("0")(lambda: w2.call())
- # Trigger handler
- run(events.trigger("0"))
- # Make sure we called each once
+ events.trigger("0")
+ flush()
assert w1.calls == 1
assert w2.calls == 1
-def test_trigger_no_handlers(events, run):
+def test_trigger_no_handlers(events, flush):
''' trigger an event with no handlers '''
- run(events.trigger("some event"))
+ events.trigger("some event")
+ flush()
-def test_trigger_superset_params(events, run):
+def test_trigger_superset_params(events, flush):
''' trigger an event with kwarg keys that aren't in event params '''
params = {}
def func(one, two):
params["one"] = one
params["two"] = two
-
events.on("2")(func)
-
kwargs = {"one": 1, "two": 2, "unused": "value"}
- run(events.trigger("2", **kwargs))
-
+ events.trigger("2", **kwargs)
+ flush()
assert params["one"] == 1
assert params["two"] == 2
-def test_trigger_subset_params(events, run):
+def test_trigger_subset_params(events, flush):
''' trigger an event with missing kwargs pads with None '''
params = {}
def func(one, two):
params["one"] = one
params["two"] = two
-
events.on("2")(func)
-
kwargs = {"one": 1}
- run(events.trigger("2", **kwargs))
-
+ events.trigger("2", **kwargs)
+ flush()
assert params["one"] == 1
assert params["two"] is None
-def test_trigger_subset_params_with_defaults(events, run):
+def test_trigger_subset_params_with_defaults(events, flush):
''' trigger an event with missing kwargs uses function defaults '''
params = {}
def func(one, two="default"):
params["one"] = one
params["two"] = two
-
events.on("2")(func)
-
kwargs = {"one": 1}
- run(events.trigger("2", **kwargs))
-
+ events.trigger("2", **kwargs)
+ flush()
assert params["one"] == 1
assert params["two"] == "default"
@@ -196,7 +179,7 @@ def test_trigger_subset_params_with_defaults(events, run):
# ===================
-def test_bound_method_of_instance(events, run):
+def test_bound_method_of_instance(events, flush):
''' verify bound methods are correctly inspected '''
params = {}
@@ -207,9 +190,38 @@ def test_bound_method_of_instance(events, run):
instance = Class()
bound_method = instance.method
events.on("2")(bound_method)
-
kwargs = {"one": 1}
- run(events.trigger("2", **kwargs))
-
+ events.trigger("2", **kwargs)
+ flush()
assert params["one"] == 1
assert params["two"] == "default"
+
+
+# ===================
+# Ordering + Blocking
+# ===================
+
+
+def test_callback_ordering(events, flush, loop):
+ ''' Callbacks for a second event don't queue behind the first event '''
+ second_complete = asyncio.Event(loop=loop)
+ call_order = []
+ complete_order = []
+
+ async def first():
+ call_order.append("first")
+ await second_complete.wait()
+ complete_order.append("first")
+
+ async def second():
+ call_order.append("second")
+ complete_order.append("second")
+ second_complete.set()
+
+ events.on("0")(first)
+ events.on("0")(second)
+
+ events.trigger("0")
+ flush()
+ assert call_order == ["first", "second"]
+ assert complete_order == ["second", "first"]
diff --git a/tests/test_plugins/test_router.py b/tests/test_plugins/test_router.py
index eb6a434..955becb 100644
--- a/tests/test_plugins/test_router.py
+++ b/tests/test_plugins/test_router.py
@@ -1,42 +1,12 @@
import pytest
-from bottom import Client
from bottom.plugins.router import Router
-class MockConnection():
- def __init__(self, *a, **kw):
- pass
-
-
-class MockClient(Client):
- __conn_cls__ = MockConnection
-
- def __init__(self, *args, **kwargs):
- self.handlers = []
- super().__init__(*args, **kwargs)
-
- def on(self, command):
- def wrap(function):
- self.handlers.append((command, function))
- return function
- return wrap
-
-
[email protected]
-def client():
- return MockClient("host", "port")
-
-
@pytest.fixture
def router(client):
return Router(client)
-def test_init_registers_privmsg(client):
- router = Router(client)
- assert ("PRIVMSG", router.handle) in client.handlers
-
-
def test_decorator_returns_original(router):
def original_func(nick, target, fields):
pass
@@ -45,43 +15,42 @@ def test_decorator_returns_original(router):
assert wrapped_func is original_func
-def test_handle_no_routes(router, loop):
- loop.run_until_complete(
- router.handle("nick", "target", "message"))
+def test_handle_no_routes(router, loop, flush):
+ router.handle("nick", "target", "message")
+ flush()
-def test_handle_no_matching_route(router, loop):
+def test_handle_no_matching_route(router, loop, flush):
@router.route("hello, [name]")
async def handle(nick, target, fields):
# Should not be called
assert False
- loop.run_until_complete(
- router.handle("nick", "target", "does not match"))
+ router.handle("nick", "target", "does not match")
+ flush()
-def test_handle_with_matching_route(router, loop):
+def test_handle_with_matching_route(router, loop, flush):
names = []
@router.route("hello, [name]")
def handle(nick, target, fields):
names.append(fields['name'])
- loop.run_until_complete(
- router.handle("nick", "target", "hello, jack"))
- loop.run_until_complete(
- router.handle("nick", "target", "hello, hello, recursion"))
+ router.handle("nick", "target", "hello, jack")
+ router.handle("nick", "target", "hello, hello, recursion")
+ flush()
assert ["jack", "hello, recursion"] == names
-def test_back_reference(router, loop):
+def test_back_reference(router, loop, flush):
actual_fields = {}
@router.route("<[tag]>[field]</[:ref(tag)]>")
def handle(nick, target, fields):
actual_fields.update(fields)
- loop.run_until_complete(
- router.handle("nick", "target", "<element>some value here</element>"))
+ router.handle("nick", "target", "<element>some value here</element>")
+ flush()
assert {"field": "some value here", "tag": "element"} == actual_fields
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 10
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.10",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/numberoverzero/bottom.git@4293d2726d2a7222faa55ca509871ee03f4e66e1#egg=bottom
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
colorama==0.4.6
coverage==7.8.0
exceptiongroup==1.2.2
flake8==7.2.0
idna==3.10
iniconfig==2.1.0
mccabe==0.7.0
packaging==24.2
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pytest==8.3.5
pytest-cov==6.0.0
python-dotenv==1.1.0
requests==2.32.3
simplex==1.2.55
tomli==2.2.1
urllib3==2.3.0
| name: bottom
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- colorama==0.4.6
- coverage==7.8.0
- exceptiongroup==1.2.2
- flake8==7.2.0
- idna==3.10
- iniconfig==2.1.0
- mccabe==0.7.0
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dotenv==1.1.0
- requests==2.32.3
- simplex==1.2.55
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/bottom
| [
"tests/test_client.py::test_default_event_loop",
"tests/test_client.py::test_send_unknown_command",
"tests/test_client.py::test_send_before_connected",
"tests/test_client.py::test_send_after_disconnected",
"tests/test_client.py::test_on",
"tests/test_client.py::test_run_",
"tests/test_connection.py::test_connect",
"tests/test_connection.py::test_already_connected",
"tests/test_connection.py::test_disconnect_before_connect",
"tests/test_connection.py::test_disconnect",
"tests/test_connection.py::test_already_disconnected",
"tests/test_connection.py::test_send_before_connected",
"tests/test_connection.py::test_send_disconnected",
"tests/test_connection.py::test_send_strips",
"tests/test_connection.py::test_read_before_connected",
"tests/test_connection.py::test_read_disconnected",
"tests/test_connection.py::test_read_eoferror",
"tests/test_connection.py::test_read_strips",
"tests/test_connection.py::test_run_without_message",
"tests/test_connection.py::test_run_trigger_command",
"tests/test_connection.py::test_run_trigger_unknown_command",
"tests/test_event.py::test_on_subset",
"tests/test_event.py::test_on_all",
"tests/test_event.py::test_on_superset",
"tests/test_event.py::test_on_ordering",
"tests/test_event.py::test_with_kwargs",
"tests/test_event.py::test_with_kwargs_masking",
"tests/test_event.py::test_var_args",
"tests/test_event.py::test_defaults",
"tests/test_event.py::test_on_coroutine",
"tests/test_event.py::test_trigger",
"tests/test_event.py::test_trigger_multiple_calls",
"tests/test_event.py::test_trigger_multiple_handlers",
"tests/test_event.py::test_trigger_no_handlers",
"tests/test_event.py::test_trigger_superset_params",
"tests/test_event.py::test_trigger_subset_params",
"tests/test_event.py::test_trigger_subset_params_with_defaults",
"tests/test_event.py::test_bound_method_of_instance",
"tests/test_plugins/test_router.py::test_handle_no_routes"
]
| [
"tests/test_event.py::test_callback_ordering",
"tests/test_plugins/test_router.py::test_decorator_returns_original",
"tests/test_plugins/test_router.py::test_handle_no_matching_route",
"tests/test_plugins/test_router.py::test_handle_with_matching_route",
"tests/test_plugins/test_router.py::test_back_reference"
]
| []
| []
| MIT License | 363 | [
"README.rst",
"bottom/event.py",
"MANIFEST.in",
"bottom/connection.py",
"setup.py",
".python-version",
"bottom/__init__.py",
".travis.yml",
"bottom/plugins/router.py",
"setup.cfg",
"tox.ini",
"README.markdown",
"bottom/unpack.py"
]
| [
"README.rst",
"bottom/event.py",
"MANIFEST.in",
"bottom/connection.py",
"setup.py",
".python-version",
"bottom/__init__.py",
".travis.yml",
"bottom/plugins/router.py",
"setup.cfg",
"tox.ini",
"README.markdown",
"bottom/unpack.py"
]
|
|
cmc-python__modelmachine-20 | fa9275d64498c7cbe24f02357bbb1bc971670756 | 2016-01-02 19:08:24 | fa9275d64498c7cbe24f02357bbb1bc971670756 | diff --git a/Makefile b/Makefile
index ed9fdc1..f83193b 100644
--- a/Makefile
+++ b/Makefile
@@ -3,7 +3,7 @@ GENERATED = build dist *.egg-info
all : test lint pep257 dist
-twine :
+twine : dist
twine upload dist/*
clean :
diff --git a/README.md b/README.md
index 0c15024..5460313 100644
--- a/README.md
+++ b/README.md
@@ -1,22 +1,16 @@
# modelmachine
Model machine emulator
-[](https://travis-ci.org/cmc-python/modelmachine)
+[](https://travis-ci.org/vslutov/modelmachine)
## TODO
-* УМ-Р (регистровая)
-* УМ с модификацией адресов ???
-
-* Изменить базовую концепцию memory->integer и переписать модули
-* Добавить test.alu.swap
+* Проверить test.alu.swap
* Работа с плавающей запятой
* Подумать еще о mock в тестах
-* Подумать о команде остановки halt
* Переделать документацию модулей
* Исправить опечатки в документации
* Расширить howto
-* ГУИ
## Модельная машина
@@ -91,8 +85,8 @@ Model machine emulator
* `numeric.py` - целочисленная арифметика с фиксированным числом
двоичных знаков
* `alu.py` - арифметико-логическое устройство, работает с четко
- специализированными регистрами: `R1`, `R2`, `S`, `FLAGS` и `IP`.
-* `cu.py` *в процессе реализации* - контролирующее устройство, выполняющее
+ специализированными регистрами: `R1`, `R2`, `S`, `FLAGS` и `PC`.
+* `cu.py` - контролирующее устройство, выполняющее
считывание команд из памяти и запускающее необходимые методы в
арифметико-логическом устройстве
* `io.py` - устройство ввода-вывода
@@ -131,7 +125,7 @@ Model machine emulator
* `R1`, `R2`, `S` для арифметических операций.
* `FLAGS` для хранения флагов состояния.
-* `IP` *только* для пересылки туда адреса из регистра `R1` при условных
+* `PC` *только* для пересылки туда адреса из регистра `R1` при условных
переходах.
Схема работы:
@@ -148,7 +142,7 @@ Model machine emulator
остатком. `S := R1 / R2; R1 := R1 % R2`.
* Команда пересылки `move`: `S := R1`.
* Команды безусловного перехода `jump` и условного перехода `cond_jump`
- работают по схеме `IP := R1`, режим работы `cond_jump` зависит от того,
+ работают по схеме `PC := R1`, режим работы `cond_jump` зависит от того,
какие дополнительные аргументы будут переданы.
* Команда останова `halt` просто выставляет флаг остановки HALT в
регистре флагов
@@ -163,8 +157,8 @@ Model machine emulator
1. `fetch_and_decode` - загрузка и расшифровка очередной команды.
Содержимое ячейки оперативной памяти с адресом записанным
- в регистре `IP` загружается в регистр `IR`, затем из него извлекается
- код операции и адреса операндов, затем счетчик `IP` увеличивается на
+ в регистре `PC` загружается в регистр `RI`, затем из него извлекается
+ код операции и адреса операндов, затем счетчик `PC` увеличивается на
длину только что считанной команды.
2. `load` - данные по только что считанным адресам загружаются в регистры
процессора `R1` и `R2`
@@ -225,9 +219,9 @@ Model machine emulator
### Таблица команд модельных машин
-|OPCODE|mm-3 |mm-2 |mm-v |mm-1 |mm-st |
+|OPCODE|mm-3 |mm-2 |mm-v |mm-1 |mm-m |
|:-----|:---:|:---:|:---:|:---:|:----:|
-|0x00 |move |move |move |load | |
+|0x00 |move |move |move |load | load |
|0x01 | add | add | add | add | add |
|0x02 | sub | sub | sub | sub | sub |
|0x03 |smul |smul |smul |smul | smul |
@@ -235,12 +229,15 @@ Model machine emulator
|0x05 | |comp |comp |comp | comp |
|0x13 |umul |umul |umul |umul | umul |
|0x14 |udiv |udiv |udiv |udiv | udiv |
-|0x10 | | | |store| |
-|0x20 | | | |swap | |
-|0x5A | | | | |stpush|
-|0x5B | | | | |stpop |
-|0x5C | | | | |stdup |
-|0x5D | | | | |stswap|
+|0x10 | | | |store|store |
+|0x20 | | | |swap | move |
+|0x21 | | | | | radd |
+|0x22 | | | | | rsub |
+|0x23 | | | | |rsmul |
+|0x24 | | | | |rsdiv |
+|0x25 | | | | |rcomp |
+|0x33 | | | | |rumul |
+|0x34 | | | | |rudiv |
|0x80 |jump |jump |jump |jump | jump |
|0x81 | jeq | jeq | jeq | jeq | jeq |
|0x82 |jneq |jneq |jneq |jneq | jneq |
@@ -273,7 +270,7 @@ Model machine emulator
|ujleq | <= u |unsigned jump if less or equal |
|ujg | > u |unsigned jump if greater |
-### mm3
+### mm-3
Архитектура трехадресной модельной машины.
@@ -281,15 +278,15 @@ Model machine emulator
* Размер адреса: 2 байта.
* Арифметические вычисления производятся с одной ячейкой оперативной памяти.
* Код команды помещается в одну ячейку оперативной памяти `КОП А1 А2 А3`.
-* Регистры: `S`, `R1`, `R2`, `FLAGS`, `IP`, `IR`, `ADDR`.
+* Регистры: `S`, `R1`, `R2`, `FLAGS`, `PC`, `RI`, `ADDR`.
Назначение регистров:
* `S` - регистр сумматор, в него записывается результат арифметической операции.
* `R1`, `R2` - регистры операндов арифметических операций.
* `FLAGS` - регистр флагов.
-* `IP` - регистр указатель инструкции.
-* `IR` - регистр для хранения инструкции.
+* `PC` - регистр указатель инструкции.
+* `RI` - регистр для хранения инструкции.
* `ADDR` - регистр для хранения адреса для инструкции перехода.
Действия процессора для арифметических инструкций (`add`, `sub`,
@@ -306,12 +303,12 @@ Model machine emulator
два результата: частное – в ячейку с адресом `А3`, остаток – в следующую
ячейку, по адресу `(А3+1) mod 16^4`.
-* `jump A1 A2 A3`: `IP := A3`
+* `jump A1 A2 A3`: `PC := A3`
* Условные переходы: сравниваются `R1` и `R2`, в зависимости от результата
- происходит `IP := A3`.
+ происходит `PC := A3`.
* Команда пересылки `move`: [A3] := R1.
-### mm2
+### mm-2
Архитектура двухадресной модельной машины.
@@ -319,7 +316,7 @@ Model machine emulator
* Размер адреса: 2 байта.
* Арифметические вычисления производятся с одной ячейкой оперативной памяти.
* Код команды помещается в одну ячейку оперативной памяти `КОП А1 А2`.
-* Регистры: `R1`, `R2`, `FLAGS`, `IP`, `IR`, `ADDR`.
+* Регистры: `R1`, `R2`, `FLAGS`, `PC`, `RI`, `ADDR`.
Действия для арифметических команд `add`, `sub`, `smul`, `sdiv`, `umul`,
`udiv`:
@@ -333,12 +330,12 @@ Model machine emulator
1. `R1 := [A1], R2 := [A2]`
2. Запустить в АЛУ схему `sub`, выставить регистр `FLAGS`
-* `jump A1 A2`: `IP := A2`
+* `jump A1 A2`: `PC := A2`
* Условные переходы делаются исходя из регистра `FLAGS`
* `move A1 A2`: `[A1] := [A2]`
* Команда останова `halt` взводит флаг `HALT` в регистре `FLAGS`
-### mmv
+### mm-v
Архитектура модельной машины с переменным (variable) фарматом команд.
@@ -348,7 +345,7 @@ Model machine emulator
памяти.
* Код команды занимает разное количество ячеек в зависимости от выполняемой
операции.
-* Регистры: `R1`, `R2`, `FLAGS`, `IP`, `IR`, `ADDR`.
+* Регистры: `R1`, `R2`, `FLAGS`, `PC`, `RI`, `ADDR`.
Таблица кодов команд:
@@ -387,12 +384,12 @@ Model machine emulator
1. `R1 := [A1], R2 := [A2]`
2. Запустить в АЛУ схему `sub`, выставить регистр `FLAGS`
-* `jump A1`: `IP := A1`
+* `jump A1`: `PC := A1`
* Условные переходы делаются исходя из регистра `FLAGS`
* `move A1 A2`: `[A1] := [A2]`
* Команда останова `halt` взводит флаг `HALT` в регистре `FLAGS`
-### mm1
+### mm-1
Архитектура одноадресной модельной машины.
@@ -400,7 +397,7 @@ Model machine emulator
* Размер адреса: 2 байта.
* Арифметические вычисления производятся с одной ячейкой оперативной памяти.
* Код команды помещается в одну ячейку оперативной памяти `КОП А`.
-* Регистры: `S`, `R`, `S1`, `FLAGS`, `IP`, `IR`.
+* Регистры: `S`, `R`, `S1`, `FLAGS`, `PC`, `RI`.
Регистры `S` и `S1` хранят информацию постоянно, а не затираются при
выполнении очередной команды, как было раньше. В регистр `R` закгружается
@@ -422,108 +419,123 @@ Model machine emulator
1. `R := [A]`
2. Запустить в АЛУ схему `sub`, выставить регистр `FLAGS`
-* `jump A`: `IP := A`
+* `jump A`: `PC := A`
* Условные переходы делаются исходя из регистра `FLAGS`
* `load A`: `S := [A]`
* `store A`: `[A] := S`
* `swap`: `S, S1 := S1, S`
* Команда останова `halt` взводит флаг `HALT` в регистре `FLAGS`
-### mmst
+### mm-m
-Архитектура стековой (stack) модельной машины.
+Архитектура модельной машины с модификацией адресов (modification).
-* Размер ячейки оперативной памяти: 1 байт.
+* Размер ячейки оперативной памяти: 2 байта.
* Размер адреса: 2 байта.
-* Арифметические вычисления производятся со словом в 3 байта.
+* Арифметические вычисления производятся со словом в 4 байта.
* Код команды занимает разное количество ячеек в зависимости от выполняемой
- операции. Большинство команд безадресные, имеют формат `КОП` и занимают
- 1 байт. Некоторые команды работы со стеком и команды перехода имеют
- один операнд, формат `КОП А` и занимают 3 байта.
-* Регистры: `R1`, `R2`, `FLAGS`, `IP`, `IR`, `SP`.
-
-Регистр `SP` - указатель стека (stack pointer) указывает на вершину стека.
+ операции. Арифметические команды имеют формы регистр-регистр и регистр-память.
+ Команды регистр-регистр имеют формат `КОП RA1 RA2` и занимают 2 байта.
+ Команды регистр-память имеют формат `КОП R M A` и занимают 4 байта.
+ Команды перехода имеют формат `КОП 0 0 A` и занимают 4 байта.
+* Регистры: `R0-RF`, `S`, `RZ`, `FLAGS`, `PC`, `RI`.
+
+Основное отличие этой машины от предыдущих - наличие адресуемых регистров
+общего назначения `R0-RF`, используемых для арифметических
+вычислений и адресации памяти. `S`, `RZ` - неадресуемые регистры для работы
+АЛУ.
+
+Также адресация данных теперь производится таким алгоритмом:
+
+1. Возьмем содержимое адресуемого регистра с номером `M` (от 0x0 до 0xF): `[M]`.
+ Если номер регистра `M` равен нулю, значение `[M]` также равно нулю вне
+ зависимости от содержимого регистра `R0`.
+2. Добавим к нему адрес `A` (от 0x0000 до 0xFFFF): `[M] + A`.
+3. Возьмем остаток от деления этого адреса на 2^16: `([M] + A) % 2^16`.
+4. Возьмем из ОЗУ данные по полученному адресу: `[[M] + A]`.
Таблица кодов команд:
-|Код команды|Мнемоник|Формат |Длина (в байтах)|
-|:----------|:------:|:--------|---------------:|
-|0x01 |add |add | 1|
-|0x02 |sub |sub | 1|
-|0x03 |smul |smul | 1|
-|0x04 |sdiv |sdiv | 1|
-|0x05 |comp |comp | 1|
-|0x13 |umul |umul | 1|
-|0x14 |udiv |udiv | 1|
-|0x5A |stpush |stpush A | 3|
-|0x5B |stpop |stpop A | 3|
-|0x5C |stdup |stdup | 1|
-|0x5D |stswap |stswap | 1|
-|0x80 |jump |jump A | 3|
-|0x81 |jeq |jeq A | 3|
-|0x82 |jneq |jneq A | 3|
-|0x83 |sjl |sjl A | 3|
-|0x84 |sjgeq |sjgeq A | 3|
-|0x85 |sjleq |sjleq A | 3|
-|0x86 |sjg |sjg A | 3|
-|0x93 |ujl |ujl A | 3|
-|0x94 |ujgeq |ujgeq A | 3|
-|0x95 |ujleq |ujleq A | 3|
-|0x96 |ujg |ujg A | 3|
-|0x99 |halt |halt | 1|
-
-Как действует метод `push(value)`:
-
-1. `SP -= value_size`
-2. `[SP] := value`
-
-Как действует метод `pop()`:
-
-1. `value := [SP]`
-2. `SP += value_size`
-2. `return value`
-
-Принцип работы стековых команд:
-
-В стек `stpush A`:
-
-1. `R1 := [A]`
-2. `push(R1)`
-
-Из стека `stpop A`:
-
-1. `R1 := pop()`
-2. `[A] := R1`
-
-Дублирование `stdup`:
-
-1. `R1 := pop()`
-2. `push(R1); push(R1)`
-
-Обмен `stswap`:
-
-1. `R1 := pop(); R2 := pop()`
-2. `push(R1); push(R2)`
-
-Действия для арифметических команд (исключая деление) `add`, `sub`, `smul`,
-`umul`:
-
-1. `R2 := pop(); R1 := pop()`
-2. `R1 := R1 op R2`
-3. `push(R1)`
-
-Действия для команд деления `sdivmod` и `udivmod`:
-
-1. `R2 := pop(); R1 := pop()`
-2. `R1, R2 := R1 / R2, R1 % R2`
-3. `push(R1); push(R2)`
-
-Действия для команды сравнения `cmp`:
-
-1. `R2 := pop(), R1 := pop()`
-2. Запустить в АЛУ схему `sub`, выставить регистр `FLAGS`
-
-* `jump A1`: `IP := A1`
+|Код команды|Мнемоник|Формат |Длина (в байтах)|
+|:----------|:------:|:----------|---------------:|
+|0x00 |load |load R M A | 4|
+|0x01 |add |add R M A | 4|
+|0x02 |sub |sub R M A | 4|
+|0x03 |smul |smul R M A | 4|
+|0x04 |sdiv |sdiv R M A | 4|
+|0x05 |comp |comp R M A | 4|
+|0x13 |umul |umul R M A | 4|
+|0x14 |udiv |udiv R M A | 4|
+|0x10 |store |store R M A| 4|
+|0x20 |rmove |rmove RX RY| 2|
+|0x21 |radd |radd RX RY | 2|
+|0x22 |rsub |rsub RX RY | 2|
+|0x23 |rsmul |rsmul RX RY| 2|
+|0x24 |rsdiv |rsdiv RX RY| 2|
+|0x25 |rcomp |rcomp RX RY| 2|
+|0x33 |rumul |rumul RX RY| 2|
+|0x34 |rudiv |rudiv RX RY| 2|
+|0x80 |jump |jump 0 M A | 4|
+|0x81 |jeq |jeq 0 M A | 4|
+|0x82 |jneq |jneq 0 M A | 4|
+|0x83 |sjl |sjl 0 M A | 4|
+|0x84 |sjgeq |sjgeq 0 M A| 4|
+|0x85 |sjleq |sjleq 0 M A| 4|
+|0x86 |sjg |sjg 0 M A | 4|
+|0x93 |ujl |ujl 0 M A | 4|
+|0x94 |ujgeq |ujgeq 0 M A| 4|
+|0x95 |ujleq |ujleq 0 M A| 4|
+|0x96 |ujg |ujg 0 M A | 4|
+|0x99 |halt |halt 00 | 2|
+
+Действия для арифметических команд регистр-память (исключая деление) `add`,
+`sub`, `smul`, `umul` (формат `op R M A`):
+
+1. `S, RZ := R, [[M] + A]`
+2. `S := S op RZ`
+3. `R := S`
+
+Действия для команд деления регистр-память `sdivmod` и `udivmod`
+(формат `op R M A`, `R_next` - регистр, следующий за регистром `R`):
+
+1. `S, RZ := S, [[M] + A]`
+2. `S, RZ := S / RZ, S % RZ`
+3. `R, R_next := S, RZ`
+
+Действия для команды сравнения `comp R M A`:
+
+1. `S, RZ := S, [[M] + A]`
+2. Запустить в АЛУ схему `sub S RZ`, выставить регистр `FLAGS`
+
+Действия для команды загрузки `load R M A`:
+
+1. `R := [[M] + A]`
+
+Действия для команды выгрузки `store R M A`:
+
+1. `[[M] + A] := R`
+
+Действия для арифметических команд регистр-регистр (исключая деление) `radd`,
+`rsub`, `rsmul`, `rumul` (формат `op RX RY`):
+
+1. `S, RZ := RX, RY`
+2. `S := S op RZ`
+3. `RX := S`
+
+Действия для команд деления регистр-регистр `rsdiv` и `rudiv`
+(формат - `op RX RY`; `RX_next` - регистр, следующий за регистром `RX`, если
+`RX = RF`, то `RX_next = R0`):
+
+1. `S, RZ := RX, RY`
+2. `S, RZ := S / RZ, S % RZ`
+3. `RX, RX_next := S, RZ`
+
+Действия для команды сравнения `rcomp RX RY`:
+
+1. `S, RZ := RX, RY`
+2. Запустить в АЛУ схему `sub S RZ`, выставить регистр `FLAGS`
+
+* `jump 00 A`: `PC := A`
* Условные переходы делаются исходя из регистра `FLAGS`
* Команда останова `halt` взводит флаг `HALT` в регистре `FLAGS`
diff --git a/modelmachine/__main__.py b/modelmachine/__main__.py
index 852943e..ffaaf6a 100644
--- a/modelmachine/__main__.py
+++ b/modelmachine/__main__.py
@@ -5,7 +5,7 @@
from modelmachine.ide import get_program, get_cpu, debug
import pytest, os, sys, argparse
-VERSION = "0.0.6" # Don't forget fix in setup.py
+VERSION = "0.1.0" # Don't forget fix in setup.py
def run_program(args):
cpu = get_program(args.filename, args.protect_memory)
@@ -32,7 +32,7 @@ def main(argv, stdout):
help='print version and exit')
parser.add_argument('-m', '--protect_memory', action='store_true', default=False,
- help='raise an error if try to read dirty memory')
+ help='raise an error, if program tries read dirty memory')
subparsers = parser.add_subparsers(title='commands',
help='commands for model machine emulator')
diff --git a/modelmachine/cpu.py b/modelmachine/cpu.py
index 6eaec7e..6be0540 100644
--- a/modelmachine/cpu.py
+++ b/modelmachine/cpu.py
@@ -16,7 +16,7 @@ from modelmachine.cu import ControlUnit3 as BCU3
from modelmachine.cu import ControlUnit2 as BCU2
from modelmachine.cu import ControlUnitV as BCUV
from modelmachine.cu import ControlUnit1 as BCU1
-from modelmachine.cu import ControlUnitS as BCUS
+from modelmachine.cu import ControlUnitM as BCUM
from modelmachine.alu import ArithmeticLogicUnit
from modelmachine.io import InputOutputUnit
@@ -107,7 +107,7 @@ class CPUMM3(AbstractCPU):
register_names=self.register_names,
operand_size=word_size,
address_size=address_size)
- self.control_unit = BCU3(instruction_size=word_size,
+ self.control_unit = BCU3(ir_size=word_size,
registers=self.registers,
ram=self.ram,
alu=self.alu,
@@ -136,7 +136,7 @@ class CPUMM2(AbstractCPU):
register_names=self.register_names,
operand_size=word_size,
address_size=address_size)
- self.control_unit = BCU2(instruction_size=word_size,
+ self.control_unit = BCU2(ir_size=word_size,
registers=self.registers,
ram=self.ram,
alu=self.alu,
@@ -195,7 +195,7 @@ class CPUMM1(AbstractCPU):
register_names=self.register_names,
operand_size=word_size,
address_size=address_size)
- self.control_unit = BCU1(instruction_size=word_size,
+ self.control_unit = BCU1(ir_size=word_size,
registers=self.registers,
ram=self.ram,
alu=self.alu,
@@ -206,39 +206,39 @@ class CPUMM1(AbstractCPU):
word_size=word_size)
-class CPUMMS(AbstractCPU):
+class CPUMMM(AbstractCPU):
- """CPU stack model machine."""
+ """CPU address modification model machine."""
def __init__(self, protect_memory):
"""See help(type(x))."""
byte_size = 8
- word_size = 3 * byte_size
- address_size = 2 * byte_size
+ address_size = word_size = 2 * byte_size
+ operand_size = ir_size = 4 * byte_size
memory_size = 2 ** address_size
- self.ram = RandomAccessMemory(word_size=byte_size,
+ self.ram = RandomAccessMemory(word_size=word_size,
memory_size=memory_size,
endianess='big', # Unused
is_protected=protect_memory)
self.registers = RegisterMemory()
- self.register_names = BCUS.register_names
+ self.register_names = BCUM.register_names
self.alu = ArithmeticLogicUnit(registers=self.registers,
register_names=self.register_names,
- operand_size=word_size,
+ operand_size=operand_size,
address_size=address_size)
- self.control_unit = BCUS(ir_size=word_size,
+ self.control_unit = BCUM(ir_size=ir_size,
registers=self.registers,
ram=self.ram,
alu=self.alu,
- operand_size=word_size,
+ operand_size=operand_size,
address_size=address_size)
self.io_unit = InputOutputUnit(ram=self.ram,
start_address=0,
- word_size=word_size)
+ word_size=operand_size)
CPU_LIST = {'mm3': CPUMM3,
'mm2': CPUMM2,
'mmv': CPUMMV,
'mm1': CPUMM1,
- 'mms': CPUMMS}
+ 'mmm': CPUMMM}
diff --git a/modelmachine/cu.py b/modelmachine/cu.py
index 33ae4dc..86ee0c7 100644
--- a/modelmachine/cu.py
+++ b/modelmachine/cu.py
@@ -3,6 +3,7 @@
"""Control unit parse instruction and give the commands to another part of computer."""
from modelmachine.alu import HALT, LESS, GREATER, EQUAL
+from modelmachine.numeric import Integer
RUNNING = 1
HALTED = 2
@@ -80,6 +81,15 @@ class ControlUnit(AbstractControlUnit):
"udivmod": 0x14,
"swap": 0x20,
+ "rmove": 0x20,
+
+ "radd": 0x21,
+ "rsub": 0x22,
+ "rsmul": 0x23,
+ "rsdivmod": 0x24,
+ "rcomp": 0x25,
+ "rumul": 0x33,
+ "rudivmod": 0x34,
"stpush": 0x5A,
"stpop": 0x5B,
@@ -119,7 +129,6 @@ class ControlUnit(AbstractControlUnit):
OPCODES["stdup"], OPCODES["stswap"]}
BINAR_OPCODES = ARITHMETIC_OPCODES | {OPCODES["comp"]}
- MONAR_OPCODES = {OPCODES["halt"]}
register_names = {"PC": "PC", "ADDR": "ADDR", "RI": "RI"}
opcode = 0
@@ -231,10 +240,11 @@ class ControlUnit3(ControlUnit):
"R1": "R1", "R2": "R2", "S": "S", "RES": "R1",
"FLAGS": "FLAGS"}
- def __init__(self, instruction_size, *vargs, **kvargs):
+ def __init__(self, ir_size, *vargs, **kvargs):
"""See help(type(x))."""
- super().__init__(instruction_size, *vargs, **kvargs)
- self.instruction_size = instruction_size
+ super().__init__(ir_size, *vargs, **kvargs)
+
+ self.instruction_size = ir_size
self.opcodes = (self.ARITHMETIC_OPCODES | self.JUMP_OPCODES |
{self.OPCODES["move"],
self.OPCODES["halt"]})
@@ -309,12 +319,11 @@ class ControlUnit2(ControlUnit):
"R1": "R1", "R2": "R2", "S": "R1", "RES": "R2",
"FLAGS": "FLAGS"}
- def __init__(self, instruction_size, *vargs, **kvargs):
+ def __init__(self, ir_size, *vargs, **kvargs):
"""See help(type(x))."""
- super().__init__(instruction_size, *vargs, **kvargs)
- self.instruction_size = instruction_size
+ super().__init__(ir_size, *vargs, **kvargs)
- self.instruction_size = instruction_size
+ self.instruction_size = ir_size
self.opcodes = (self.ARITHMETIC_OPCODES | self.JUMP_OPCODES |
{self.OPCODES["move"],
self.OPCODES["halt"],
@@ -473,12 +482,11 @@ class ControlUnit1(ControlUnit):
"R1": "S", "R2": "R", "S": "S", "RES": "S1",
"FLAGS": "FLAGS"}
- def __init__(self, instruction_size, *vargs, **kvargs):
+ def __init__(self, ir_size, *vargs, **kvargs):
"""See help(type(x))."""
- super().__init__(instruction_size, *vargs, **kvargs)
- self.instruction_size = instruction_size
+ super().__init__(ir_size, *vargs, **kvargs)
- self.instruction_size = instruction_size
+ self.instruction_size = ir_size
self.opcodes = (self.ARITHMETIC_OPCODES | self.JUMP_OPCODES |
{self.OPCODES["load"],
self.OPCODES["store"],
@@ -537,137 +545,153 @@ class ControlUnit1(ControlUnit):
self.operand_size)
self.ram.put(self.address, value, self.operand_size)
-class ControlUnitS(ControlUnit):
+class ControlUnitM(ControlUnit):
- """Control unit for stack model machine."""
+ """Control unit for address modification model machine."""
- address = None
+ address = 0
+ register1 = ''
+ register2 = ''
- register_names = {"PC": "PC", "ADDR": "ADDR", "RI": "RI", "SP": "SP",
- "R1": "R1", "R2": "R2", "S": "R1", "RES": "R2",
+ register_names = {"PC": "PC", "ADDR": "ADDR", "RI": "RI",
+ "R1": "S", "R2": "RZ", "S": "S", "RES": "RZ",
"FLAGS": "FLAGS"}
+
+ REGISTER_OPCODES = {ControlUnit.OPCODES["radd"],
+ ControlUnit.OPCODES["rsub"],
+ ControlUnit.OPCODES["rsmul"],
+ ControlUnit.OPCODES["rsdivmod"],
+ ControlUnit.OPCODES["rumul"],
+ ControlUnit.OPCODES["rudivmod"],
+ ControlUnit.OPCODES["rmove"],
+ ControlUnit.OPCODES["rcomp"]}
+
+ ARITHMETIC_OPCODES = (ControlUnit.ARITHMETIC_OPCODES
+ | {ControlUnit.OPCODES["radd"],
+ ControlUnit.OPCODES["rsub"],
+ ControlUnit.OPCODES["rsmul"],
+ ControlUnit.OPCODES["rsdivmod"],
+ ControlUnit.OPCODES["rumul"],
+ ControlUnit.OPCODES["rudivmod"]})
+
def __init__(self, ir_size, *vargs, **kvargs):
"""See help(type(x))."""
# dynamic instruction size
super().__init__(ir_size, *vargs, **kvargs)
- self.opcodes = (self.ARITHMETIC_OPCODES | self.JUMP_OPCODES |
- {self.OPCODES["stpush"], self.OPCODES["stpop"],
- self.OPCODES["stdup"], self.OPCODES["stswap"],
- self.OPCODES["halt"],
- self.OPCODES["comp"]})
+ self.reg_addr_size = 4
- for reg in {"R1", "R2", "FLAGS"}:
+ self.opcodes = (self.ARITHMETIC_OPCODES
+ | self.JUMP_OPCODES
+ | self.REGISTER_OPCODES
+ | {self.OPCODES["load"],
+ self.OPCODES["store"],
+ self.OPCODES["halt"],
+ self.OPCODES["comp"]})
+
+ for reg in {"S", "RZ", "FLAGS", "R0", "R1", "R2", "R3", "R4",
+ "R5", "R6", "R7", "R8", "R9", "RA", "RB", "RC",
+ "RD", "RE", "RF"}:
self.registers.add_register(reg, self.operand_size)
- for reg in {"SP"}:
- self.registers.add_register(reg, self.address_size)
- self.registers.put("SP", 0, self.address_size)
-
- def push(self, value):
- """Push value to stack."""
- stack_pointer = self.registers.fetch(self.register_names["SP"],
- self.address_size)
- stack_pointer -= self.operand_size // self.ram.word_size
- stack_pointer %= self.ram.memory_size
- self.registers.put(self.register_names["SP"],
- stack_pointer,
- self.address_size)
- self.ram.put(stack_pointer, value, self.operand_size)
-
- def pop(self):
- """Pop value from the stack."""
- stack_pointer = self.registers.fetch(self.register_names["SP"],
- self.address_size)
- value = self.ram.fetch(stack_pointer, self.operand_size)
- stack_pointer += self.operand_size // self.ram.word_size
- stack_pointer %= self.ram.memory_size
- self.registers.put(self.register_names["SP"],
- stack_pointer,
- self.address_size)
- return value
def fetch_and_decode(self):
"""Fetch 3 addresses."""
- mask = 2 ** self.address_size - 1
- one_operand = self.JUMP_OPCODES | {self.OPCODES["stpush"],
- self.OPCODES["stpop"]}
+ addr_mask = 2 ** self.address_size - 1
+ reg_mask = 2 ** self.reg_addr_size - 1
instruction_pointer = self.registers.fetch(self.register_names["PC"],
self.address_size)
- self.opcode = self.ram.fetch(instruction_pointer, self.OPCODE_SIZE)
- if self.opcode in one_operand:
- instruction_size = self.OPCODE_SIZE + self.address_size
+ batch_size = max(self.ram.word_size, self.OPCODE_SIZE)
+ self.opcode = self.ram.fetch(instruction_pointer, batch_size)
+ space_size = batch_size - self.OPCODE_SIZE
+ self.opcode = Integer(self.opcode, batch_size, False)[space_size:].get_value()
+
+ if self.opcode in self.opcodes - (self.REGISTER_OPCODES | {self.OPCODES["halt"]}):
+ instruction_size = self.OPCODE_SIZE + 2 * self.reg_addr_size + self.address_size
else:
- instruction_size = self.OPCODE_SIZE
+ instruction_size = self.OPCODE_SIZE + 2 * self.reg_addr_size
instruction = self.fetch_instruction(instruction_size)
- if self.opcode in one_operand:
- self.address = instruction & mask
+ if self.opcode in self.REGISTER_OPCODES:
+ r_x = (instruction >> self.reg_addr_size) & reg_mask
+ self.register1 = "R" + hex(r_x).upper()[2:]
+
+ r_y = instruction & reg_mask
+ self.register2 = "R" + hex(r_y).upper()[2:]
+ elif self.opcode != self.OPCODES["halt"]:
+ r_x = (instruction >> (self.reg_addr_size + self.address_size)) & reg_mask
+ self.register1 = "R" + hex(r_x).upper()[2:]
+
+ modificator = "R" + hex((instruction >> self.address_size) & reg_mask).upper()[2:]
+ if modificator != "R0":
+ modificator = self.registers.fetch(modificator, self.operand_size)
+ else:
+ modificator = 0
+ self.address = (instruction + modificator) & addr_mask
def load(self):
"""Load registers R1 and R2."""
- if self.opcode in self.BINAR_OPCODES | {self.OPCODES["stswap"]}:
- operand2 = self.pop()
- self.registers.put(self.register_names["R2"],
- operand2,
- self.operand_size)
- operand1 = self.pop()
+ if self.opcode == self.OPCODES["store"]:
+ operand1 = self.registers.fetch(self.register1, self.operand_size)
self.registers.put(self.register_names["R1"],
operand1,
self.operand_size)
-
- elif self.opcode == self.OPCODES["stpush"]:
- operand = self.ram.fetch(self.address, self.operand_size)
+ elif self.opcode in self.REGISTER_OPCODES:
+ operand1 = self.registers.fetch(self.register1, self.operand_size)
self.registers.put(self.register_names["R1"],
- operand,
+ operand1,
self.operand_size)
- elif self.opcode in {self.OPCODES["stdup"], self.OPCODES["stpop"]}:
- operand = self.pop()
+ operand2 = self.registers.fetch(self.register2, self.operand_size)
+ self.registers.put(self.register_names["R2"],
+ operand2,
+ self.operand_size)
+ elif self.opcode in (self.ARITHMETIC_OPCODES |
+ {self.OPCODES["comp"], self.OPCODES["load"]}):
+ operand1 = self.registers.fetch(self.register1, self.operand_size)
self.registers.put(self.register_names["R1"],
- operand,
+ operand1,
+ self.operand_size)
+ operand2 = self.ram.fetch(self.address, self.operand_size)
+ self.registers.put(self.register_names["R2"],
+ operand2,
self.operand_size)
elif self.opcode in self.JUMP_OPCODES:
self.registers.put(self.register_names["ADDR"],
self.address,
self.address_size)
+ if self.opcode in self.REGISTER_OPCODES:
+ self.opcode ^= 0x20
+
def execute(self):
"""Add specific commands: conditional jumps and cmp."""
if self.opcode == self.OPCODES["comp"]:
self.alu.sub()
+ elif self.opcode == self.OPCODES["load"]:
+ self.alu.move("R2", "S")
+ elif self.opcode == self.OPCODES["store"]:
+ self.alu.move("R1", "S")
elif self.opcode in self.JUMP_OPCODES:
self.execute_jump()
- elif self.opcode == self.OPCODES["stswap"]:
- self.alu.swap()
- elif self.opcode == self.OPCODES["stdup"]:
- self.alu.move(source="R1", dest="R2")
- elif self.opcode in self.STACK_OPCODES:
- pass
- elif self.opcode == self.OPCODES["move"]:
- raise ValueError('Invalid opcode `{opcode}`'
- .format(opcode=hex(self.opcode)))
else:
super().execute()
def write_back(self):
"""Write result back."""
- if self.opcode in self.ARITHMETIC_OPCODES | {self.OPCODES["stpush"],
- self.OPCODES["stswap"],
- self.OPCODES["stdup"]}:
+ if self.opcode in self.ARITHMETIC_OPCODES | {self.OPCODES["load"]}:
value = self.registers.fetch(self.register_names["S"],
self.operand_size)
- self.push(value)
- if self.opcode in self.DIVMOD_OPCODES | {self.OPCODES["stswap"],
- self.OPCODES["stdup"]}:
+ self.registers.put(self.register1, value, self.operand_size)
+ if self.opcode in self.DIVMOD_OPCODES:
+ next_register = (int(self.register1[1:], 0x10) + 1) % 0x10
+ next_register = "R" + hex(next_register).upper()[2:]
value = self.registers.fetch(self.register_names["RES"],
self.operand_size)
- self.push(value)
- elif self.opcode == self.OPCODES["stpop"]:
- value = self.registers.fetch(self.register_names["R1"],
+ self.registers.put(next_register, value, self.operand_size)
+ elif self.opcode == self.OPCODES["store"]:
+ value = self.registers.fetch(self.register_names["S"],
self.operand_size)
self.ram.put(self.address, value, self.operand_size)
-
-
diff --git a/modelmachine/ide.py b/modelmachine/ide.py
index a10f040..a3fddad 100644
--- a/modelmachine/ide.py
+++ b/modelmachine/ide.py
@@ -136,6 +136,7 @@ def exec_continue(cpu, step):
def exec_print(cpu, step):
"""Print contents of registers."""
+ print("RAM access count:", cpu.ram.access_count)
print("Register states:")
registers = {cpu.register_names[name] for name in cpu.register_names}
for reg in sorted(list(registers)):
diff --git a/modelmachine/io.py b/modelmachine/io.py
index 3328907..0367def 100644
--- a/modelmachine/io.py
+++ b/modelmachine/io.py
@@ -44,11 +44,11 @@ class InputOutputUnit:
def store_hex(self, start, size):
"""Save data to string."""
- if size % self.word_size != 0:
+ if size % self.ram.word_size != 0:
raise KeyError('Cannot save {size} bits, word size is {word_size}'
- .format(size=size, word_size=self.word_size))
+ .format(size=size, word_size=self.ram.word_size))
result = []
- block_size = self.word_size
+ block_size = self.ram.word_size
size //= block_size
for i in range(start, start + size):
data = self.ram.fetch(i, block_size)
diff --git a/modelmachine/memory.py b/modelmachine/memory.py
index c0ec03a..4fe3c7d 100644
--- a/modelmachine/memory.py
+++ b/modelmachine/memory.py
@@ -58,6 +58,7 @@ class AbstractMemory(dict):
super().__init__(addresses)
self.word_size = word_size
+ self.access_count = 0
if endianess == "big":
self.decode, self.encode = big_endian_decode, big_endian_encode
@@ -106,6 +107,8 @@ class AbstractMemory(dict):
self.check_address(address)
self.check_bits_count(address, bits)
+ self.access_count += 1
+
size = bits // self.word_size
if size == 1: # Address not always is integer, sometimes string
return self[address]
@@ -123,6 +126,8 @@ class AbstractMemory(dict):
enc_value = self.encode(value, self.word_size, bits)
+ self.access_count += 1
+
size = bits // self.word_size
if size == 1: # Address not always is integer, sometimes string
self[address] = value
diff --git a/modelmachine/numeric.py b/modelmachine/numeric.py
index 62058b1..54c2c7b 100644
--- a/modelmachine/numeric.py
+++ b/modelmachine/numeric.py
@@ -87,3 +87,20 @@ class Integer(Number):
"""Test if two integer is equal."""
self.check_compability(other)
return self.get_value() == other.get_value()
+
+ def __getitem__(self, key):
+ """Get bits of unsigned representation.
+
+ Zero-indexed bit is minor.
+ """
+ representation = [(self.value >> i) & 1 for i in range(self.size)]
+ representation = representation[key]
+ if isinstance(representation, int):
+ return Integer(representation, 1, False)
+ elif isinstance(representation, list):
+ value = 0
+ for i in range(len(representation)):
+ value += representation[i] << i
+ return Integer(value, len(representation), False)
+ else:
+ raise TypeError("Integer indeces must be integers")
diff --git a/samples/mmm_sample.mmach b/samples/mmm_sample.mmach
new file mode 100644
index 0000000..26516ea
--- /dev/null
+++ b/samples/mmm_sample.mmach
@@ -0,0 +1,21 @@
+mmm
+
+[config]
+input = 0x100,0x102
+output = 0x104
+
+[code]
+; x = ((a * -21) % 50 - b) ** 2 == 178929
+00 0 0 0100 ; R0 := a
+03 0 0 000C ; R0 := a * -21
+04 0 0 000E ; R0 := (a * -21) / 50, R1 := x = (a * -21) % 50
+02 1 0 0102 ; R1 := x - b
+23 1 1 ; R1 := (x - b) ** 2
+10 1 0 0104 ; [0104] := R1
+99 0 0 ; halt
+; ---------------------
+FFFFFFEB ; -21
+00000032 ; 50
+
+[input]
+-123 456
diff --git a/setup.py b/setup.py
index 069ec0b..abf27a9 100644
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@ Read the doc: <https://github.com/vslutov/modelmachine>
from setuptools import setup, find_packages
-VERSION = "0.0.6" # Don't forget fix in __main__.py
+VERSION = "0.1.0" # Don't forget fix in __main__.py
setup(name='modelmachine',
version=VERSION,
| Регистровая машина | cmc-python/modelmachine | diff --git a/modelmachine/tests/test_cpu.py b/modelmachine/tests/test_cpu.py
index 6976f72..b6cee81 100644
--- a/modelmachine/tests/test_cpu.py
+++ b/modelmachine/tests/test_cpu.py
@@ -5,7 +5,7 @@
from modelmachine.cpu import AbstractCPU
from modelmachine.cpu import CPUMM3, CPUMM2
from modelmachine.cpu import CPUMMV, CPUMM1
-from modelmachine.cpu import CPUMMS
+from modelmachine.cpu import CPUMMM
from modelmachine.memory import RandomAccessMemory, RegisterMemory
@@ -236,35 +236,30 @@ class TestCPUMM1:
assert out.read() == "298\n"
+class TestCPUMMM:
-class TestCPUMMS:
-
- """Smoke test for mm-s."""
+ """Smoke test for mm-m."""
cpu = None
source = None
def setup(self):
"""Init state."""
- self.cpu = CPUMMS(protect_memory=False)
+ self.cpu = CPUMMM(protect_memory=False)
self.source = ("[config]\n" +
- "input=0x100,0x103\n" +
- "output=0x106\n" +
+ "input=0x100,0x102\n" +
+ "output=0x104\n" +
"[code]\n" +
- "5A 0100\n" +
- "5A 0103\n" +
- "01\n" +
- "5C\n" +
- "5A 0103\n" +
- "05\n" +
- "86 0011\n" +
- "5C\n" +
- "02 ; never be used\n" +
- "5A 001b\n" +
- "02\n" +
- "5B 0106\n" +
- "99 0000\n" +
- "000002\n" +
+ "00 0 0 0100\n" +
+ "03 0 0 000C\n" +
+ "04 0 0 000E\n" +
+ "02 1 0 0102\n" +
+ "23 1 1; coment never be used\n" +
+ "10 1 0 0104\n" +
+ "99 0 0\n" +
+ "; -----------\n"
+ "ffffffeb\n" +
+ "00000032\n" +
"[input]\n" +
"100 200\n")
@@ -276,5 +271,5 @@ class TestCPUMMS:
with open(str(out), 'w') as output:
self.cpu.run_file(str(source), output=output)
- assert out.read() == "298\n"
+ assert out.read() == "40000\n"
diff --git a/modelmachine/tests/test_cu_abstract.py b/modelmachine/tests/test_cu_abstract.py
index 3abb258..6169d0e 100644
--- a/modelmachine/tests/test_cu_abstract.py
+++ b/modelmachine/tests/test_cu_abstract.py
@@ -21,6 +21,11 @@ OP_COMP = 0x05
OP_STORE = 0x10
OP_UMUL, OP_UDIVMOD = 0x13, 0x14
OP_SWAP = 0x20
+OP_RMOVE = 0x20
+OP_RADD, OP_RSUB = 0x21, 0x22
+OP_RSMUL, OP_RSDIVMOD = 0x23, 0x24
+OP_RCOMP = 0x25
+OP_RUMUL, OP_RUDIVMOD = 0x33, 0x34
OP_STPUSH, OP_STPOP, OP_STDUP, OP_STSWAP = 0x5A, 0x5B, 0x5C, 0x5D
OP_JUMP = 0x80
OP_JEQ, OP_JNEQ = 0x81, 0x82
@@ -32,33 +37,9 @@ ARITHMETIC_OPCODES = {OP_ADD, OP_SUB, OP_SMUL, OP_SDIVMOD, OP_UMUL, OP_UDIVMOD}
CONDJUMP_OPCODES = {OP_JEQ, OP_JNEQ,
OP_SJL, OP_SJGEQ, OP_SJLEQ, OP_SJG,
OP_UJL, OP_UJGEQ, OP_UJLEQ, OP_UJG}
-
-def run_fetch(test_case, value, opcode, instruction_size, and_decode=True):
- """Run one fetch test."""
- address = 10
- test_case.ram.put(address, value, instruction_size)
- increment = instruction_size // test_case.ram.word_size
-
- test_case.registers.fetch.reset_mock()
- test_case.registers.put.reset_mock()
-
- def get_register(name, size):
- """Get PC."""
- assert name == "PC"
- assert size == BYTE_SIZE
- return address
- test_case.registers.fetch.side_effect = get_register
-
- if and_decode:
- test_case.control_unit.fetch_and_decode()
- else:
- test_case.control_unit.fetch_instruction(instruction_size)
- test_case.registers.fetch.assert_any_call("PC", BYTE_SIZE)
- test_case.registers.put.assert_has_calls([call("RI", value, WORD_SIZE),
- call("PC", address + increment,
- BYTE_SIZE)])
- assert test_case.control_unit.opcode == opcode
-
+JUMP_OPCODES = CONDJUMP_OPCODES | {OP_JUMP}
+REGISTER_OPCODES = {OP_RMOVE, OP_RADD, OP_RSUB, OP_RSMUL,
+ OP_RSDIVMOD, OP_RCOMP, OP_RUMUL, OP_RUDIVMOD}
class TestAbstractControlUnit:
@@ -137,6 +118,10 @@ class TestControlUnit:
arithmetic_opcodes = None
condjump_opcodes = None
+ ir_size = 32
+ operand_size = WORD_SIZE
+ address_size = BYTE_SIZE
+
def setup(self):
"""Init state."""
self.ram = RandomAccessMemory(WORD_SIZE, 256, 'big')
@@ -154,9 +139,9 @@ class TestControlUnit:
"""Test internal constants."""
assert isinstance(self.control_unit, AbstractControlUnit)
assert isinstance(self.control_unit, ControlUnit)
- assert self.control_unit.ir_size == 32
- assert self.control_unit.operand_size == WORD_SIZE
- assert self.control_unit.address_size == BYTE_SIZE
+ assert self.control_unit.ir_size == self.ir_size
+ assert self.control_unit.operand_size == self.operand_size
+ assert self.control_unit.address_size == self.address_size
assert self.control_unit.OPCODE_SIZE == BYTE_SIZE
assert self.control_unit.OPCODES["move"] == OP_MOVE
assert self.control_unit.OPCODES["load"] == OP_LOAD
@@ -201,9 +186,36 @@ class TestControlUnit:
with raises(NotImplementedError):
self.control_unit.write_back()
+ def run_fetch(self, value, opcode, instruction_size, and_decode=True,
+ address_size=BYTE_SIZE, ir_size=WORD_SIZE):
+ """Run one fetch test."""
+ address = 10
+ self.ram.put(address, value, instruction_size)
+ increment = instruction_size // self.ram.word_size
+
+ self.registers.fetch.reset_mock()
+ self.registers.put.reset_mock()
+
+ def get_register(name, size):
+ """Get PC."""
+ assert name == "PC"
+ assert size == self.control_unit.address_size
+ return address
+ self.registers.fetch.side_effect = get_register
+
+ if and_decode:
+ self.control_unit.fetch_and_decode()
+ else:
+ self.control_unit.fetch_instruction(instruction_size)
+ self.registers.fetch.assert_any_call("PC", address_size)
+ self.registers.put.assert_has_calls([call("RI", value, ir_size),
+ call("PC", address + increment,
+ address_size)])
+ assert self.control_unit.opcode == opcode
+
def test_fetch_instruction(self):
"""Right fetch and decode is a half of business."""
- run_fetch(self, 0x01020304, 0x01, WORD_SIZE, False)
+ self.run_fetch(0x01020304, 0x01, WORD_SIZE, False)
def test_basic_execute(self, should_move=True):
"""Test basic operations."""
diff --git a/modelmachine/tests/test_cu_fixed.py b/modelmachine/tests/test_cu_fixed.py
index 7e4ace0..ba59481 100644
--- a/modelmachine/tests/test_cu_fixed.py
+++ b/modelmachine/tests/test_cu_fixed.py
@@ -17,7 +17,7 @@ from .test_cu_abstract import (BYTE_SIZE, WORD_SIZE, OP_MOVE, OP_SDIVMOD,
OP_LOAD, OP_STORE, OP_SWAP,
OP_JNEQ, OP_SJL, OP_SJGEQ, OP_SJLEQ, OP_SJG,
OP_UJL, OP_UJGEQ, OP_UJLEQ, OP_UJG, OP_HALT,
- ARITHMETIC_OPCODES, CONDJUMP_OPCODES, run_fetch)
+ ARITHMETIC_OPCODES, CONDJUMP_OPCODES)
from .test_cu_abstract import TestControlUnit as TBCU
class TestControlUnit3(TBCU):
@@ -45,13 +45,13 @@ class TestControlUnit3(TBCU):
"""Right fetch and decode is a half of business."""
for opcode in self.control_unit.opcodes:
self.control_unit.address1, self.control_unit.address2 = None, None
- run_fetch(self, opcode << 24 | 0x020304, opcode, WORD_SIZE)
+ self.run_fetch(opcode << 24 | 0x020304, opcode, WORD_SIZE)
assert self.control_unit.address1 == 0x02
assert self.control_unit.address2 == 0x03
assert self.control_unit.address3 == 0x04
for opcode in set(range(2 ** BYTE_SIZE)) - self.control_unit.opcodes:
with raises(ValueError):
- run_fetch(self, opcode << 24 | 0x020304, opcode, WORD_SIZE)
+ self.run_fetch(opcode << 24 | 0x020304, opcode, WORD_SIZE)
def test_load(self):
"""R1 := [A1], R2 := [A2]."""
@@ -131,14 +131,14 @@ class TestControlUnit3(TBCU):
self.run_cond_jump(OP_UJLEQ, False, LESS, True)
self.run_cond_jump(OP_UJG, False, GREATER, False)
- def test_jump_halt(self):
+ def test_execute_jump_halt(self):
"""Test for jump and halt."""
self.alu.cond_jump.reset_mock()
self.alu.sub.reset_mock()
self.registers.put.reset_mock()
+
self.control_unit.opcode = OP_JUMP
self.control_unit.execute()
-
assert not self.alu.sub.called
assert not self.registers.put.called
self.alu.jump.assert_called_once_with()
@@ -264,12 +264,12 @@ class TestControlUnit2(TestControlUnit3):
"""Right fetch and decode is a half of business."""
for opcode in self.control_unit.opcodes:
self.control_unit.address1, self.control_unit.address2 = None, None
- run_fetch(self, opcode << 24 | 0x0203, opcode, WORD_SIZE)
+ self.run_fetch(opcode << 24 | 0x0203, opcode, WORD_SIZE)
assert self.control_unit.address1 == 0x02
assert self.control_unit.address2 == 0x03
for opcode in set(range(2 ** BYTE_SIZE)) - self.control_unit.opcodes:
with raises(ValueError):
- run_fetch(self, opcode << 24 | 0x0203, opcode, WORD_SIZE)
+ self.run_fetch(opcode << 24 | 0x0203, opcode, WORD_SIZE)
def test_load(self):
"""R1 := [A1], R2 := [A2]."""
@@ -319,24 +319,6 @@ class TestControlUnit2(TestControlUnit3):
assert not self.registers.put.called
self.alu.cond_jump.assert_called_once_with(signed, mol, equal)
- def test_execute_jump_halt(self):
- """Test for jump and halt."""
- self.alu.cond_jump.reset_mock()
- self.alu.sub.reset_mock()
- self.registers.put.reset_mock()
-
- self.control_unit.opcode = OP_JUMP
- self.control_unit.execute()
- assert not self.alu.sub.called
- assert not self.registers.put.called
- self.alu.jump.assert_called_once_with()
-
- self.control_unit.opcode = OP_HALT
- self.control_unit.execute()
- assert not self.alu.sub.called
- assert not self.registers.put.called
- self.alu.halt.assert_called_once_with()
-
def test_execute_comp(self):
"""Test for comp."""
self.alu.cond_jump.reset_mock()
@@ -472,11 +454,11 @@ class TestControlUnit1(TestControlUnit2):
"""Right fetch and decode is a half of business."""
for opcode in set(range(2 ** BYTE_SIZE)) - self.control_unit.opcodes:
with raises(ValueError):
- run_fetch(self, opcode << 24, opcode, WORD_SIZE)
+ self.run_fetch(opcode << 24, opcode, WORD_SIZE)
for opcode in self.control_unit.opcodes:
self.control_unit.address = None
- run_fetch(self, opcode << 24 | 0x02, opcode, WORD_SIZE)
+ self.run_fetch(opcode << 24 | 0x02, opcode, WORD_SIZE)
assert self.control_unit.address == 0x02
def test_load(self):
diff --git a/modelmachine/tests/test_cu_variable.py b/modelmachine/tests/test_cu_variable.py
index f06a70c..4b500ca 100644
--- a/modelmachine/tests/test_cu_variable.py
+++ b/modelmachine/tests/test_cu_variable.py
@@ -4,7 +4,7 @@
from modelmachine.cu import RUNNING, HALTED
from modelmachine.cu import ControlUnitV
-from modelmachine.cu import ControlUnitS
+from modelmachine.cu import ControlUnitM
from modelmachine.memory import RegisterMemory, RandomAccessMemory
from modelmachine.alu import ArithmeticLogicUnit
@@ -14,9 +14,14 @@ from pytest import raises
from .test_cu_abstract import (BYTE_SIZE, WORD_SIZE, OP_MOVE, OP_COMP,
OP_SDIVMOD, OP_UDIVMOD,
OP_STPUSH, OP_STPOP,
+ OP_LOAD, OP_STORE, OP_RMOVE,
+ OP_RADD, OP_RSUB, OP_RSMUL, OP_RSDIVMOD,
+ OP_RCOMP, OP_RUMUL, OP_RUDIVMOD,
OP_STDUP, OP_STSWAP, OP_JUMP, OP_HALT,
- ARITHMETIC_OPCODES, CONDJUMP_OPCODES, run_fetch)
+ ARITHMETIC_OPCODES, CONDJUMP_OPCODES,
+ JUMP_OPCODES, REGISTER_OPCODES)
from .test_cu_fixed import TestControlUnit2 as TBCU2
+from .test_cu_abstract import TestControlUnit as TBCU
class TestControlUnitV(TBCU2):
@@ -45,23 +50,23 @@ class TestControlUnitV(TBCU2):
"""Right fetch and decode is a half of business."""
for opcode in set(range(2 ** BYTE_SIZE)) - self.control_unit.opcodes:
with raises(ValueError):
- run_fetch(self, opcode, opcode, BYTE_SIZE)
+ self.run_fetch(opcode, opcode, BYTE_SIZE)
for opcode in ARITHMETIC_OPCODES | {OP_COMP, OP_MOVE}:
self.control_unit.address1, self.control_unit.address2 = None, None
- run_fetch(self, opcode << 16 | 0x0203, opcode, 24)
+ self.run_fetch(opcode << 16 | 0x0203, opcode, 24)
assert self.control_unit.address1 == 0x02
assert self.control_unit.address2 == 0x03
for opcode in CONDJUMP_OPCODES | {OP_JUMP}:
self.control_unit.address1, self.control_unit.address2 = None, None
- run_fetch(self, opcode << 8 | 0x02, opcode, 16)
+ self.run_fetch(opcode << 8 | 0x02, opcode, 16)
assert self.control_unit.address1 == 0x02
assert self.control_unit.address2 == None
for opcode in {OP_HALT}:
self.control_unit.address1, self.control_unit.address2 = None, None
- run_fetch(self, opcode, opcode, 8)
+ self.run_fetch(opcode, opcode, 8)
assert self.control_unit.address1 == None
assert self.control_unit.address2 == None
@@ -175,322 +180,387 @@ class TestControlUnitV(TBCU2):
assert self.registers.fetch("PC", BYTE_SIZE) == 0x01
assert self.control_unit.get_status() == HALTED
+class TestControlUnitM(TBCU2):
-class TestControlUnitS(TBCU2):
-
- """Test case for Stack Model Machine Control Unit."""
+ """Test case for Address Modification Model Machine Control Unit."""
def setup(self):
"""Init state."""
super().setup()
- self.ram = RandomAccessMemory(BYTE_SIZE, 256, 'big', is_protected=True)
- self.control_unit = ControlUnitS(WORD_SIZE,
- BYTE_SIZE,
+ self.ram = RandomAccessMemory(2 * BYTE_SIZE, 2 ** WORD_SIZE, 'big', is_protected=True)
+ self.control_unit = ControlUnitM(WORD_SIZE,
+ 2 * BYTE_SIZE,
self.registers,
self.ram,
self.alu,
WORD_SIZE)
- assert self.control_unit.opcodes == {0x01, 0x02, 0x03, 0x04,
- 0x13, 0x14,
- 0x05,
- 0x5A, 0x5B, 0x5C, 0x5D,
+ self.operand_size = WORD_SIZE
+ self.address_size = 2 * BYTE_SIZE
+ assert self.control_unit.opcodes == {0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x10, 0x13, 0x14,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
+ 0x33, 0x34,
0x80, 0x81, 0x82,
0x83, 0x84, 0x85, 0x86,
0x93, 0x94, 0x95, 0x96,
0x99}
+ def test_const(self):
+ super().test_const()
+ assert self.control_unit.OPCODES["rmove"] == OP_RMOVE
+ assert self.control_unit.OPCODES["radd"] == OP_RADD
+ assert self.control_unit.OPCODES["rsub"] == OP_RSUB
+ assert self.control_unit.OPCODES["rsmul"] == OP_RSMUL
+ assert self.control_unit.OPCODES["rsdivmod"] == OP_RSDIVMOD
+ assert self.control_unit.OPCODES["rcomp"] == OP_RCOMP
+ assert self.control_unit.OPCODES["rumul"] == OP_RUMUL
+ assert self.control_unit.OPCODES["rudivmod"] == OP_RUDIVMOD
+
+ def run_fetch(self, value, opcode, instruction_size, r2=True):
+ """Run one fetch test."""
+ address1 = 10
+ address2=42
+ self.ram.put(address1, value, instruction_size)
+ increment = instruction_size // self.ram.word_size
+
+ self.registers.fetch.reset_mock()
+ self.registers.put.reset_mock()
+
+ def get_register(name, size):
+ """Get PC."""
+ if name == "PC":
+ assert size == 2 * BYTE_SIZE
+ return address1
+ elif name=="R2":
+ assert size == WORD_SIZE
+ return address2
+ else:
+ raise KeyError()
+
+ self.registers.fetch.side_effect = get_register
+
+ self.control_unit.fetch_and_decode()
+ if r2:
+ self.registers.fetch.assert_has_calls([call("PC", 2 * BYTE_SIZE),
+ call("R2", WORD_SIZE)])
+ else:
+ self.registers.fetch.assert_any_call("PC", 2 * BYTE_SIZE)
+ self.registers.put.assert_has_calls([call("RI", value, WORD_SIZE),
+ call("PC", address1 + increment,
+ 2 * BYTE_SIZE)])
+ assert self.control_unit.opcode == opcode
+
def test_fetch_and_decode(self):
"""Right fetch and decode is a half of business."""
for opcode in set(range(2 ** BYTE_SIZE)) - self.control_unit.opcodes:
with raises(ValueError):
- run_fetch(self, opcode, opcode, BYTE_SIZE)
+ self.run_fetch(opcode << BYTE_SIZE, opcode, 2 * BYTE_SIZE)
- for opcode in ARITHMETIC_OPCODES | {OP_COMP, OP_STDUP, OP_STSWAP,
- OP_HALT}:
+ for opcode in ARITHMETIC_OPCODES | JUMP_OPCODES | {OP_COMP, OP_LOAD, OP_STORE}:
+ self.control_unit.register1 = None
+ self.control_unit.register2 = None
+ self.control_unit.address = None
+ self.run_fetch(opcode << 24 | 0x120014, opcode, 32)
+
+ assert self.control_unit.register1 == 'R1'
+ assert self.control_unit.register2 is None
+ assert self.control_unit.address == 0x14 + 42
+
+ for opcode in REGISTER_OPCODES:
+ self.control_unit.register1 = None
+ self.control_unit.register2 = None
self.control_unit.address = None
- run_fetch(self, opcode, opcode, BYTE_SIZE)
- assert self.control_unit.address == None
- for opcode in CONDJUMP_OPCODES | {OP_STPUSH, OP_STPOP, OP_JUMP}:
+ self.run_fetch(opcode << 8 | 0x12, opcode, 16, r2=False)
+
+ assert self.control_unit.register1 == 'R1'
+ assert self.control_unit.register2 == 'R2'
+ assert self.control_unit.address is None
+
+ for opcode in {OP_HALT}:
+ self.control_unit.register1 = None
+ self.control_unit.register2 = None
self.control_unit.address = None
- run_fetch(self, opcode << 8 | 0x02, opcode, 16)
- assert self.control_unit.address == 0x02
- def test_push(self):
- """Test basic stack operation."""
- self.registers.put.reset_mock()
- self.registers.fetch.reset_mock()
- address, value, size = 10, 123, WORD_SIZE // self.ram.word_size
- self.registers.fetch.return_value = address
- self.control_unit.push(value)
- assert self.ram.fetch(address - size, WORD_SIZE) == value
- self.registers.fetch.assert_called_once_with("SP", BYTE_SIZE)
- self.registers.put.assert_called_once_with("SP",
- address - size,
- BYTE_SIZE)
-
- def test_pop(self):
- """Test basic stack operation."""
- self.registers.put.reset_mock()
- self.registers.fetch.reset_mock()
- address, value, size = 10, 123, WORD_SIZE // self.ram.word_size
- self.ram.put(address, value, WORD_SIZE)
- self.registers.fetch.return_value = address
- assert self.control_unit.pop() == value
- self.registers.fetch.assert_called_once_with("SP", BYTE_SIZE)
- self.registers.put.assert_called_once_with("SP",
- address + size,
- BYTE_SIZE)
+ self.run_fetch(opcode << 8 | 0x12, opcode, 16, r2=False)
+
+
+ assert self.control_unit.register1 is None
+ assert self.control_unit.register2 is None
+ assert self.control_unit.address is None
def test_load(self):
"""R1 := [A1], R2 := [A2]."""
- address, val1, val2, val3 = 10, 1, 2, 3
- stack = []
- def pop():
- """Pop mock."""
- return stack.pop()
- self.control_unit.pop = create_autospec(self.control_unit.pop)
- self.control_unit.pop.side_effect = pop
+ register1, val1 = 'R3', 123456
+ register2, val2 = 'R4', 654321
+ address, val3 = 10, 111111
+
+ def get_register(name, size):
+ """Get PC."""
+ assert size == WORD_SIZE
+ if name == register1:
+ return val1
+ elif name == register2:
+ return val2
+ else:
+ raise KeyError()
+
+ self.registers.fetch.side_effect = get_register
self.control_unit.address = address
+ self.control_unit.register1 = register1
+ self.control_unit.register2 = register2
self.ram.put(address, val3, WORD_SIZE)
- for opcode in ARITHMETIC_OPCODES | {OP_COMP, OP_STSWAP}:
+ for opcode in ARITHMETIC_OPCODES | {OP_LOAD, OP_COMP}:
+ self.registers.fetch.reset_mock()
self.registers.put.reset_mock()
- self.control_unit.pop.reset_mock()
- stack = [val1, val2]
+
self.control_unit.opcode = opcode
self.control_unit.load()
- self.control_unit.pop.assert_has_calls([call(), call()])
- self.registers.put.assert_has_calls([call("R1", val1, WORD_SIZE),
- call("R2", val2, WORD_SIZE)],
- True)
+ self.registers.fetch.assert_called_once_with(register1, WORD_SIZE)
+ self.registers.put.assert_has_calls([call("S", val1, WORD_SIZE),
+ call("RZ", val3, WORD_SIZE)])
- for opcode in {OP_STPOP, OP_STDUP}:
+ for opcode in {OP_STORE}:
+ self.registers.fetch.reset_mock()
self.registers.put.reset_mock()
- self.control_unit.pop.reset_mock()
- stack = [val1]
+
self.control_unit.opcode = opcode
self.control_unit.load()
- self.control_unit.pop.assert_called_once_with()
- self.registers.put.assert_called_once_with("R1", val1, WORD_SIZE)
+ self.registers.fetch.assert_called_once_with(register1, WORD_SIZE)
+ self.registers.put.assert_called_once_with("S", val1, WORD_SIZE)
- for opcode in CONDJUMP_OPCODES | {OP_JUMP}:
+ for opcode in REGISTER_OPCODES:
+ self.registers.fetch.reset_mock()
self.registers.put.reset_mock()
- self.control_unit.pop.reset_mock()
+
self.control_unit.opcode = opcode
self.control_unit.load()
- assert not self.control_unit.pop.called
- self.registers.put.assert_called_once_with("ADDR", address, BYTE_SIZE)
+ self.registers.fetch.assert_has_calls([call(register1, WORD_SIZE),
+ call(register2, WORD_SIZE)])
+ self.registers.put.assert_has_calls([call("S", val1, WORD_SIZE),
+ call("RZ", val2, WORD_SIZE)])
- for opcode in {OP_STPUSH}:
+ for opcode in CONDJUMP_OPCODES | {OP_JUMP}:
+ self.registers.fetch.reset_mock()
self.registers.put.reset_mock()
- self.control_unit.pop.reset_mock()
+
self.control_unit.opcode = opcode
self.control_unit.load()
- assert not self.control_unit.pop.called
- self.registers.put.assert_called_once_with("R1", val3, WORD_SIZE)
+
+ assert not self.registers.fetch.called
+ self.registers.put.assert_called_once_with("ADDR", address, 2 * BYTE_SIZE)
for opcode in {OP_HALT}:
+ self.registers.fetch.reset_mock()
self.registers.put.reset_mock()
- self.control_unit.pop.reset_mock()
+
self.control_unit.opcode = opcode
+
self.control_unit.load()
- assert not self.control_unit.pop.called
+ assert not self.registers.fetch.called
assert not self.registers.put.called
def test_basic_execute(self, should_move=None):
"""Test basic operations."""
- super().test_basic_execute(should_move)
+ super().test_basic_execute(should_move=should_move)
- def test_execute_stack(self):
- """stpush, stpop, stdup and stswap."""
- self.alu.cond_jump.reset_mock()
- self.alu.sub.reset_mock()
- self.registers.put.reset_mock()
-
- self.control_unit.opcode = OP_STPUSH
- self.control_unit.execute()
- self.control_unit.opcode = OP_STPOP
- self.control_unit.execute()
- assert not self.alu.move.called
- assert not self.alu.swap.called
-
- self.control_unit.opcode = OP_STDUP
- self.control_unit.execute()
- self.alu.move.assert_called_once_with(source="R1", dest="R2")
+ self.control_unit.opcode = OP_MOVE
self.alu.move.reset_mock()
- assert not self.alu.swap.called
-
- self.control_unit.opcode = OP_STSWAP
self.control_unit.execute()
- self.alu.swap.assert_called_once_with()
- assert not self.alu.move.called
-
- assert not self.alu.sub.called
- assert not self.registers.put.called
+ self.alu.move.assert_called_once_with('R2', 'S')
def run_write_back(self, should, opcode):
"""Run write back method for specific opcode."""
- first, second, third, address = 11111111, 22222222, 3333333, 10
+
+ print(hex(opcode))
+
+ register1, next_register1, register2 = 'R5', 'R6', 'R8'
+ res_register1, val1 = 'S', 123456
+ res_register2, val2 = 'RZ', 654321
+ address, canary = 10, 0
+
def get_register(name, size):
- """Get result."""
- assert name in {"R1", "R2"}
- assert size == WORD_SIZE
- if name == "R1":
- return second
- elif name == "R2":
- return third
+ """Get PC."""
+ assert size == self.operand_size
+ if name == res_register1:
+ return val1
+ elif name == res_register2:
+ return val2
+ else:
+ raise KeyError()
+
self.registers.fetch.side_effect = get_register
- self.registers.fetch.reset_mock()
- self.ram.put(address, first, WORD_SIZE)
self.control_unit.address = address
- self.control_unit.push.reset_mock()
+ self.control_unit.register1 = register1
+ self.control_unit.register2 = register2
+ self.ram.put(address, canary, self.operand_size)
+
+ self.registers.fetch.reset_mock()
+ self.registers.put.reset_mock()
self.control_unit.opcode = opcode
self.control_unit.write_back()
- if should:
- if opcode == OP_STPOP:
- assert self.ram.fetch(address, WORD_SIZE) == second
- elif opcode in {OP_SDIVMOD, OP_UDIVMOD, OP_STSWAP, OP_STDUP}:
- self.control_unit.push.assert_has_calls([call(second),
- call(third)])
- self.registers.fetch.assert_has_calls([call("R1", WORD_SIZE),
- call("R2", WORD_SIZE)])
- assert self.ram.fetch(address, WORD_SIZE) == first
- else:
- self.control_unit.push.assert_called_once_with(second)
- self.registers.fetch.assert_called_once_with("R1", WORD_SIZE)
- assert self.ram.fetch(address, WORD_SIZE) == first
+ if should == 'two_registers':
+ self.registers.fetch.assert_has_calls([call(res_register1, self.operand_size),
+ call(res_register2, self.operand_size)])
+ self.registers.put.assert_has_calls([call(register1, val1, self.operand_size),
+ call(next_register1, val2, self.operand_size)])
+ assert self.ram.fetch(address, self.operand_size) == canary
+
+ elif should == 'register':
+ self.registers.fetch.assert_called_once_with(res_register1, self.operand_size)
+ self.registers.put.assert_called_once_with(register1, val1, self.operand_size)
+ assert self.ram.fetch(address, self.operand_size) == canary
+
+ elif should == 'memory':
+ self.registers.fetch.assert_called_once_with(res_register1, self.operand_size)
+ assert not self.registers.put.called
+ assert self.ram.fetch(address, self.operand_size) == val1
+
else:
- assert not self.control_unit.push.called
assert not self.registers.fetch.called
- assert self.ram.fetch(address, WORD_SIZE) == first
+ assert not self.registers.put.called
+ assert self.ram.fetch(address, self.operand_size) == canary
def test_write_back(self):
"""Test write back result to the memory."""
- self.control_unit.push = create_autospec(self.control_unit.push)
- for opcode in ARITHMETIC_OPCODES | {OP_STPOP, OP_STPUSH, OP_STSWAP,
- OP_STDUP,}:
- self.run_write_back(True, opcode)
+ for opcode in {OP_SDIVMOD, OP_UDIVMOD}:
+ self.run_write_back('two_registers', opcode)
+
+ for opcode in (ARITHMETIC_OPCODES | {OP_LOAD}) - {OP_SDIVMOD, OP_UDIVMOD}:
+ self.run_write_back('register', opcode)
+
+ for opcode in {OP_STORE}:
+ self.run_write_back('memory', opcode)
for opcode in (CONDJUMP_OPCODES |
- {OP_HALT,
- OP_JUMP,
- OP_COMP}):
- self.run_write_back(False, opcode)
+ {OP_HALT, OP_JUMP, OP_COMP}):
+ self.run_write_back('nothing', opcode)
def test_step(self):
"""Test step cycle."""
- size = WORD_SIZE // 8
-
self.control_unit.registers = self.registers = RegisterMemory()
- self.registers.add_register("RI", WORD_SIZE)
- self.registers.add_register("SP", BYTE_SIZE)
- self.registers.put("SP", 0, BYTE_SIZE)
+ for register in {'RI', 'RZ', 'S', 'R0', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R9', 'RA', 'RB', 'RC', 'RD', 'RE', 'RF'}:
+ self.registers.add_register(register, self.operand_size)
self.alu = ArithmeticLogicUnit(self.registers,
self.control_unit.register_names,
- WORD_SIZE,
- BYTE_SIZE)
+ self.operand_size,
+ self.address_size)
self.control_unit.alu = self.alu
- self.ram.put(0x00, 0x5a0b, 2 * BYTE_SIZE)
- self.ram.put(0x02, 0x5a0f, 2 * BYTE_SIZE)
- self.ram.put(0x04, 0x01, 1 * BYTE_SIZE)
- self.ram.put(0x05, 0x5c, 1 * BYTE_SIZE)
- self.ram.put(0x06, 0x5a13, 2 * BYTE_SIZE)
- self.ram.put(0x08, 0x05, 1 * BYTE_SIZE)
- self.ram.put(0x09, 0x8617, 2 * BYTE_SIZE)
- self.ram.put(0x0b, 12, WORD_SIZE)
- self.ram.put(0x0f, 10, WORD_SIZE)
- self.ram.put(0x13, 20, WORD_SIZE)
- self.ram.put(0x17, 0x5b0b, 2 * BYTE_SIZE)
- self.ram.put(0x19, 0x99, BYTE_SIZE)
- self.registers.put("PC", 0, BYTE_SIZE)
+ canary = 0
+ self.ram.put(0x0000, 0x00000100, WORD_SIZE)
+ self.ram.put(0x0002, 0x0300000C, WORD_SIZE)
+ self.ram.put(0x0004, 0x0400000E, WORD_SIZE)
+ self.ram.put(0x0006, 0x02100102, WORD_SIZE)
+ self.ram.put(0x0008, 0x2311, 2 * BYTE_SIZE)
+ self.ram.put(0x0009, 0x10100104, WORD_SIZE)
+ self.ram.put(0x000B, 0x9900, 2 * BYTE_SIZE)
+ self.ram.put(0x000C, 0xffffffeb, WORD_SIZE)
+ self.ram.put(0x000E, 0x00000032, WORD_SIZE)
+ self.ram.put(0x0100, -123 % 2 ** WORD_SIZE, WORD_SIZE)
+ self.ram.put(0x0102, 456, WORD_SIZE)
+ self.ram.put(0x0104, canary, WORD_SIZE)
+ self.registers.put("PC", 0, 2 * BYTE_SIZE)
self.control_unit.step()
- assert self.registers.fetch("PC", BYTE_SIZE) == 0x02
- assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - size
- self.control_unit.step()
- assert self.registers.fetch("PC", BYTE_SIZE) == 0x04
- assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - 2 * size
- self.control_unit.step()
- assert self.registers.fetch("PC", BYTE_SIZE) == 0x05
- assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - size
+ assert self.registers.fetch("R0", WORD_SIZE) == -123 % 2 ** WORD_SIZE
+ assert self.registers.fetch("R1", WORD_SIZE) == 0
+ assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x02
+ assert self.ram.fetch(0x0104, WORD_SIZE) == canary
+ assert self.control_unit.get_status() == RUNNING
+
self.control_unit.step()
- assert self.registers.fetch("PC", BYTE_SIZE) == 0x06
- assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - 2 * size
+ assert self.registers.fetch("R0", WORD_SIZE) == (21 * 123)
+ assert self.registers.fetch("R1", WORD_SIZE) == 0
+ assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x04
+ assert self.ram.fetch(0x0104, WORD_SIZE) == canary
+ assert self.control_unit.get_status() == RUNNING
+
self.control_unit.step()
- assert self.registers.fetch("PC", BYTE_SIZE) == 0x08
- assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - 3 * size
+ assert self.registers.fetch("R0", WORD_SIZE) == (21 * 123) // 50
+ x = 21 * 123 % 50
+ assert self.registers.fetch("R1", WORD_SIZE) == x
+ assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x06
+ assert self.ram.fetch(0x0104, WORD_SIZE) == canary
+ assert self.control_unit.get_status() == RUNNING
+
self.control_unit.step()
- assert self.registers.fetch("PC", BYTE_SIZE) == 0x09
- assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - size
+ assert self.registers.fetch("R0", WORD_SIZE) == (21 * 123) // 50
+ assert self.registers.fetch("R1", WORD_SIZE) == (x - 456) % 2 ** WORD_SIZE
+ assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x08
+ assert self.ram.fetch(0x0104, WORD_SIZE) == canary
+ assert self.control_unit.get_status() == RUNNING
+
self.control_unit.step()
- assert self.ram.fetch(0x0b, WORD_SIZE) == 12
- assert self.registers.fetch("PC", BYTE_SIZE) == 0x17
- assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - size
+ assert self.registers.fetch("R0", WORD_SIZE) == (21 * 123) // 50
+ assert self.registers.fetch("R1", WORD_SIZE) == (x - 456) ** 2
+ assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x09
+ assert self.ram.fetch(0x0104, WORD_SIZE) == canary
+ assert self.control_unit.get_status() == RUNNING
+
self.control_unit.step()
- assert self.ram.fetch(0x0b, WORD_SIZE) == 22
- assert self.registers.fetch("PC", BYTE_SIZE) == 0x19
- assert self.registers.fetch("SP", BYTE_SIZE) == 0
+ assert self.registers.fetch("R0", WORD_SIZE) == (21 * 123) // 50
+ assert self.registers.fetch("R1", WORD_SIZE) == (x - 456) ** 2
+ assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x0b
+ assert self.ram.fetch(0x0104, WORD_SIZE) == (x - 456) ** 2
assert self.control_unit.get_status() == RUNNING
+
self.control_unit.step()
- assert self.ram.fetch(0x0b, WORD_SIZE) == 22
- assert self.registers.fetch("PC", BYTE_SIZE) == 0x1a
- assert self.registers.fetch("SP", BYTE_SIZE) == 0
+ assert self.registers.fetch("R0", WORD_SIZE) == (21 * 123) // 50
+ assert self.registers.fetch("R1", WORD_SIZE) == (x - 456) ** 2
+ assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x0C
assert self.control_unit.get_status() == HALTED
def test_run(self):
"""Very simple program."""
self.control_unit.registers = self.registers = RegisterMemory()
- self.registers.add_register("RI", WORD_SIZE)
- self.registers.add_register("SP", BYTE_SIZE)
- self.registers.put("SP", 0, BYTE_SIZE)
+ for register in {'RI', 'RZ', 'S', 'R0', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R9', 'RA', 'RB', 'RC', 'RD', 'RE', 'RF'}:
+ self.registers.add_register(register, self.operand_size)
+
self.alu = ArithmeticLogicUnit(self.registers,
self.control_unit.register_names,
- WORD_SIZE,
- BYTE_SIZE)
+ self.operand_size,
+ self.address_size)
self.control_unit.alu = self.alu
- self.ram.put(0x00, 0x5a0b, 2 * BYTE_SIZE)
- self.ram.put(0x02, 0x5a0f, 2 * BYTE_SIZE)
- self.ram.put(0x04, 0x01, 1 * BYTE_SIZE)
- self.ram.put(0x05, 0x5c, 1 * BYTE_SIZE)
- self.ram.put(0x06, 0x5a13, 2 * BYTE_SIZE)
- self.ram.put(0x08, 0x05, 1 * BYTE_SIZE)
- self.ram.put(0x09, 0x8617, 2 * BYTE_SIZE)
- self.ram.put(0x0b, 12, WORD_SIZE)
- self.ram.put(0x0f, 10, WORD_SIZE)
- self.ram.put(0x13, 20, WORD_SIZE)
- self.ram.put(0x17, 0x5b0b, 2 * BYTE_SIZE)
- self.ram.put(0x19, 0x99, BYTE_SIZE)
- self.registers.put("PC", 0, BYTE_SIZE)
+ self.ram.put(0x0000, 0x00000100, WORD_SIZE)
+ self.ram.put(0x0002, 0x0300000C, WORD_SIZE)
+ self.ram.put(0x0004, 0x0400000E, WORD_SIZE)
+ self.ram.put(0x0006, 0x02100102, WORD_SIZE)
+ self.ram.put(0x0008, 0x2311, 2 * BYTE_SIZE)
+ self.ram.put(0x0009, 0x10100104, WORD_SIZE)
+ self.ram.put(0x000B, 0x9900, 2 * BYTE_SIZE)
+ self.ram.put(0x000C, 0xffffffeb, WORD_SIZE)
+ self.ram.put(0x000E, 0x00000032, WORD_SIZE)
+ self.ram.put(0x0100, 0xffffff85, WORD_SIZE)
+ self.ram.put(0x0102, 0x000001c8, WORD_SIZE)
+ self.registers.put("PC", 0, 2 * BYTE_SIZE)
self.control_unit.run()
- assert self.ram.fetch(0x0b, WORD_SIZE) == 22
- assert self.registers.fetch("PC", BYTE_SIZE) == 0x1a
- assert self.registers.fetch("SP", BYTE_SIZE) == 0
+ assert self.ram.fetch(0x0104, WORD_SIZE) == 178929
+ assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x000C
assert self.control_unit.get_status() == HALTED
def test_minimal_run(self):
- """Very simple program."""
+ """Minimal program."""
self.control_unit.registers = self.registers = RegisterMemory()
- self.registers.add_register("RI", WORD_SIZE)
- self.registers.add_register("SP", BYTE_SIZE)
- self.registers.put("SP", 0, BYTE_SIZE)
+ self.registers.add_register('RI', self.operand_size)
+
self.alu = ArithmeticLogicUnit(self.registers,
self.control_unit.register_names,
- WORD_SIZE,
- BYTE_SIZE)
+ self.operand_size,
+ self.address_size)
self.control_unit.alu = self.alu
- self.ram.put(0x00, 0x99, BYTE_SIZE)
- self.registers.put("PC", 0, BYTE_SIZE)
+ self.ram.put(0x00, 0x9900, 2 * BYTE_SIZE)
+ self.registers.put("PC", 0, 2 * BYTE_SIZE)
self.control_unit.run()
- assert self.registers.fetch("PC", BYTE_SIZE) == 0x01
- assert self.registers.fetch("SP", BYTE_SIZE) == 0
+ assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x01
assert self.control_unit.get_status() == HALTED
+
diff --git a/modelmachine/tests/test_numeric.py b/modelmachine/tests/test_numeric.py
index df42766..b96f034 100644
--- a/modelmachine/tests/test_numeric.py
+++ b/modelmachine/tests/test_numeric.py
@@ -211,3 +211,21 @@ class TestNumeric:
assert dic[self.first] == 10
assert dic[self.second] == 11
assert dic[third] == 10
+
+ def test_getitem(self):
+ """Test if we can get Integer bits."""
+ assert self.first[0] == Integer(0, 1, False)
+ assert self.first[1] == Integer(1, 1, False)
+ assert self.first[2] == Integer(0, 1, False)
+ assert self.first[3] == Integer(1, 1, False)
+ assert self.first[4] == Integer(0, 1, False)
+ assert self.first[5] == Integer(0, 1, False)
+ assert self.second[0] == Integer(0, 1, False)
+ assert self.second[1] == Integer(0, 1, False)
+ assert self.second[2] == Integer(1, 1, False)
+ assert self.second[3] == Integer(1, 1, False)
+ assert self.second[4] == Integer(0, 1, False)
+ assert self.second[5] == Integer(0, 1, False)
+ assert self.first[0:6] == Integer(10, 6, False)
+ assert self.first[:6] == Integer(10, 6, False)
+ assert self.first[3:] == Integer(1, 32 - 3, False)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 10
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
-e git+https://github.com/cmc-python/modelmachine.git@fa9275d64498c7cbe24f02357bbb1bc971670756#egg=modelmachine
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
| name: modelmachine
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/modelmachine
| [
"modelmachine/tests/test_numeric.py::TestNumeric::test_index"
]
| [
"modelmachine/tests/test_cpu.py::TestAbstractCPU::test_load_program",
"modelmachine/tests/test_cpu.py::TestAbstractCPU::test_print_result",
"modelmachine/tests/test_cpu.py::TestAbstractCPU::test_run_file",
"modelmachine/tests/test_cpu.py::TestCPUMM3::test_smoke",
"modelmachine/tests/test_cpu.py::TestCPUMM2::test_smoke",
"modelmachine/tests/test_cpu.py::TestCPUMMV::test_smoke",
"modelmachine/tests/test_cpu.py::TestCPUMM1::test_smoke",
"modelmachine/tests/test_cpu.py::TestCPUMMM::test_smoke",
"modelmachine/tests/test_cu_abstract.py::TestAbstractControlUnit::test_get_status",
"modelmachine/tests/test_cu_abstract.py::TestAbstractControlUnit::test_abstract_methods",
"modelmachine/tests/test_cu_abstract.py::TestAbstractControlUnit::test_step_and_run",
"modelmachine/tests/test_cu_abstract.py::TestControlUnit::test_const",
"modelmachine/tests/test_cu_abstract.py::TestControlUnit::test_fetch_and_decode",
"modelmachine/tests/test_cu_abstract.py::TestControlUnit::test_load",
"modelmachine/tests/test_cu_abstract.py::TestControlUnit::test_write_back",
"modelmachine/tests/test_cu_abstract.py::TestControlUnit::test_fetch_instruction",
"modelmachine/tests/test_cu_abstract.py::TestControlUnit::test_basic_execute",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_const",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_fetch_instruction",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_fetch_and_decode",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_load",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_basic_execute",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_execute_cond_jumps",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_execute_jump_halt",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_write_back",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_step",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_run",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_const",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_fetch_instruction",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_basic_execute",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_execute_cond_jumps",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_execute_jump_halt",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_fetch_and_decode",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_load",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_execute_comp",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_write_back",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_step",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_run",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_const",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_fetch_instruction",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_execute_cond_jumps",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_execute_jump_halt",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_fetch_and_decode",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_load",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_basic_execute",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_execute_comp",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_execute_load_store_swap",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_write_back",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_step",
"modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_run",
"modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_const",
"modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_fetch_instruction",
"modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_basic_execute",
"modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_execute_cond_jumps",
"modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_execute_jump_halt",
"modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_execute_comp",
"modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_write_back",
"modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_fetch_and_decode",
"modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_load",
"modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_step",
"modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_run",
"modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_minimal_run",
"modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_fetch_instruction",
"modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_execute_cond_jumps",
"modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_execute_jump_halt",
"modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_execute_comp",
"modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_const",
"modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_fetch_and_decode",
"modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_load",
"modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_basic_execute",
"modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_write_back",
"modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_step",
"modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_run",
"modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_minimal_run",
"modelmachine/tests/test_numeric.py::TestNumeric::test_init",
"modelmachine/tests/test_numeric.py::TestNumeric::test_check_compability",
"modelmachine/tests/test_numeric.py::TestNumeric::test_get_value",
"modelmachine/tests/test_numeric.py::TestNumeric::test_add",
"modelmachine/tests/test_numeric.py::TestNumeric::test_mul",
"modelmachine/tests/test_numeric.py::TestNumeric::test_sub",
"modelmachine/tests/test_numeric.py::TestNumeric::test_eq",
"modelmachine/tests/test_numeric.py::TestNumeric::test_divmod",
"modelmachine/tests/test_numeric.py::TestNumeric::test_get_data",
"modelmachine/tests/test_numeric.py::TestNumeric::test_hash",
"modelmachine/tests/test_numeric.py::TestNumeric::test_getitem"
]
| []
| []
| Do What The F*ck You Want To Public License | 364 | [
"Makefile",
"modelmachine/__main__.py",
"setup.py",
"samples/mmm_sample.mmach",
"modelmachine/ide.py",
"README.md",
"modelmachine/cu.py",
"modelmachine/memory.py",
"modelmachine/numeric.py",
"modelmachine/io.py",
"modelmachine/cpu.py"
]
| [
"Makefile",
"modelmachine/__main__.py",
"setup.py",
"samples/mmm_sample.mmach",
"modelmachine/ide.py",
"README.md",
"modelmachine/cu.py",
"modelmachine/memory.py",
"modelmachine/numeric.py",
"modelmachine/io.py",
"modelmachine/cpu.py"
]
|
|
Pylons__webob-229 | 87a1254c1818859c066268755621254d2ab086a0 | 2016-01-03 07:23:46 | 9400c049d05c8ba350daf119aa16ded24ece31f6 | diff --git a/contributing.md b/contributing.md
deleted file mode 100644
index ad0ae99..0000000
--- a/contributing.md
+++ /dev/null
@@ -1,111 +0,0 @@
-Contributing
-============
-
-All projects under the Pylons Projects, including this one, follow the
-guidelines established at [How to
-Contribute](http://www.pylonsproject.org/community/how-to-contribute) and
-[Coding Style and
-Standards](http://docs.pylonsproject.org/en/latest/community/codestyle.html).
-
-You can contribute to this project in several ways.
-
-* [File an Issue on GitHub](https://github.com/Pylons/webob/issues)
-* Fork this project and create a branch with your suggested change. When ready,
- submit a pull request for consideration. [GitHub
- Flow](https://guides.github.com/introduction/flow/index.html) describes the
- workflow process and why it's a good practice.
-* Join the IRC channel #pyramid on irc.freenode.net.
-
-
-Git Branches
-------------
-Git branches and their purpose and status at the time of this writing are
-listed below.
-
-* [master](https://github.com/Pylons/webob/) - The branch on which further
-development takes place. The default branch on GitHub.
-* [1.5-branch](https://github.com/Pylons/webob/tree/1.5-branch) - The branch
-classified as "stable" or "latest". Actively maintained.
-* [1.4-branch](https://github.com/Pylons/webob/tree/1.4-branch) - The oldest
-actively maintained and stable branch.
-
-Older branches are not actively maintained. In general, two stable branches and
-one or two development branches are actively maintained.
-
-
-Running Tests
--------------
-
-*Note:* This section needs better instructions.
-
-Run `tox` from within your checkout. This will run the tests across all
-supported systems and attempt to build the docs.
-
-To run the tests for Python 2.x only:
-
- $ tox py2-cover
-
-To build the docs for Python 3.x only:
-
- $ tox py3-docs
-
-See the `tox.ini` file for details.
-
-
-Building documentation for a Pylons Project project
----------------------------------------------------
-
-*Note:* These instructions might not work for Windows users. Suggestions to
-improve the process for Windows users are welcome by submitting an issue or a
-pull request.
-
-1. Fork the repo on GitHub by clicking the [Fork] button.
-2. Clone your fork into a workspace on your local machine.
-
- git clone [email protected]:<username>/webob.git
-
-3. Add a git remote "upstream" for the cloned fork.
-
- git remote add upstream [email protected]:Pylons/webob.git
-
-4. Set an environment variable to your virtual environment.
-
- # Mac and Linux
- $ export VENV=~/hack-on-webob/env
-
- # Windows
- set VENV=c:\hack-on-webob\env
-
-5. Try to build the docs in your workspace.
-
- # Mac and Linux
- $ make clean html SPHINXBUILD=$VENV/bin/sphinx-build
-
- # Windows
- c:\> make clean html SPHINXBUILD=%VENV%\bin\sphinx-build
-
- If successful, then you can make changes to the documentation. You can
- load the built documentation in the `/_build/html/` directory in a web
- browser.
-
-6. From this point forward, follow the typical [git
- workflow](https://help.github.com/articles/what-is-a-good-git-workflow/).
- Start by pulling from the upstream to get the most current changes.
-
- git pull upstream master
-
-7. Make a branch, make changes to the docs, and rebuild them as indicated in
- step 5. To speed up the build process, you can omit `clean` from the above
- command to rebuild only those pages that depend on the files you have
- changed.
-
-8. Once you are satisfied with your changes and the documentation builds
- successfully without errors or warnings, then git commit and push them to
- your "origin" repository on GitHub.
-
- git commit -m "commit message"
- git push -u origin --all # first time only, subsequent can be just 'git push'.
-
-9. Create a [pull request](https://help.github.com/articles/using-pull-requests/).
-
-10. Repeat the process starting from Step 6.
\ No newline at end of file
diff --git a/docs/do-it-yourself.txt b/docs/do-it-yourself.txt
index 3b65c7d..381051c 100644
--- a/docs/do-it-yourself.txt
+++ b/docs/do-it-yourself.txt
@@ -324,7 +324,7 @@ Now we'll show a basic application. Just a hello world application for now. No
... return 'Hello %s!' % req.params['name']
... elif req.method == 'GET':
... return '''<form method="POST">
- ... Your name: <input type="text" name="name">
+ ... You're name: <input type="text" name="name">
... <input type="submit">
... </form>'''
>>> hello_world = Router()
@@ -342,7 +342,7 @@ Now let's test that application:
Content-Length: 131
<BLANKLINE>
<form method="POST">
- Your name: <input type="text" name="name">
+ You're name: <input type="text" name="name">
<input type="submit">
</form>
>>> req.method = 'POST'
@@ -421,7 +421,7 @@ Here's the hello world:
... self.request = req
... def get(self):
... return '''<form method="POST">
- ... Your name: <input type="text" name="name">
+ ... You're name: <input type="text" name="name">
... <input type="submit">
... </form>'''
... def post(self):
@@ -442,7 +442,7 @@ We'll run the same test as before:
Content-Length: 131
<BLANKLINE>
<form method="POST">
- Your name: <input type="text" name="name">
+ You're name: <input type="text" name="name">
<input type="submit">
</form>
>>> req.method = 'POST'
@@ -462,7 +462,7 @@ You can use hard-coded links in your HTML, but this can have problems. Relative
The base URL using SCRIPT_NAME is ``req.application_url``. So, if we have access to the request we can make a URL. But what if we don't have access?
-We can use thread-local variables to make it easy for any function to get access to the current request. A "thread-local" variable is a variable whose value is tracked separately for each thread, so if there are multiple requests in different threads, their requests won't clobber each other.
+We can use thread-local variables to make it easy for any function to get access to the currect request. A "thread-local" variable is a variable whose value is tracked separately for each thread, so if there are multiple requests in different threads, their requests won't clobber each other.
The basic means of using a thread-local variable is ``threading.local()``. This creates a blank object that can have thread-local attributes assigned to it. I find the best way to get *at* a thread-local value is with a function, as this makes it clear that you are fetching the object, as opposed to getting at some global object.
diff --git a/docs/index.txt b/docs/index.txt
index 49575a8..d96240b 100644
--- a/docs/index.txt
+++ b/docs/index.txt
@@ -1,5 +1,3 @@
-.. _index:
-
WebOb
+++++
diff --git a/webob/descriptors.py b/webob/descriptors.py
index 505a2b6..5fd26eb 100644
--- a/webob/descriptors.py
+++ b/webob/descriptors.py
@@ -138,6 +138,9 @@ def header_getter(header, rfc_section):
def fset(r, value):
fdel(r)
if value is not None:
+ if '\n' in value or '\r' in value:
+ raise ValueError('Header value may not contain control characters')
+
if isinstance(value, text_type) and not PY3:
value = value.encode('latin-1')
r._headerlist.append((header, value))
diff --git a/webob/exc.py b/webob/exc.py
index a67a867..57a81b5 100644
--- a/webob/exc.py
+++ b/webob/exc.py
@@ -481,6 +481,9 @@ ${html_comment}''')
detail=detail, headers=headers, comment=comment,
body_template=body_template)
if location is not None:
+ if '\n' in location or '\r' in location:
+ raise ValueError('Control characters are not allowed in location')
+
self.location = location
if add_slash:
raise TypeError(
| Possible HTTP Response Splitting Vulnerability
Hi,
Please review the published advisory, probably it's in the API WebOb which is not documented here: http://docs.webob.org/en/latest/api/exceptions.html
Probably there are other WebOb applications with similar issues.
Here is the advisory: http://www.zeroscience.mk/en/vulnerabilities/ZSL-2015-5267.php
Thanks | Pylons/webob | diff --git a/tests/test_descriptors.py b/tests/test_descriptors.py
index 7bf229f..eb3d316 100644
--- a/tests/test_descriptors.py
+++ b/tests/test_descriptors.py
@@ -155,6 +155,14 @@ def test_header_getter_fset_text():
desc.fset(resp, text_('avalue'))
eq_(desc.fget(resp), 'avalue')
+def test_header_getter_fset_text_control_chars():
+ from webob.compat import text_
+ from webob.descriptors import header_getter
+ from webob import Response
+ resp = Response('aresp')
+ desc = header_getter('AHEADER', '14.3')
+ assert_raises(ValueError, desc.fset, resp, text_('\n'))
+
def test_header_getter_fdel():
from webob.descriptors import header_getter
from webob import Response
diff --git a/tests/test_exc.py b/tests/test_exc.py
index 4f7c238..dcb1fed 100644
--- a/tests/test_exc.py
+++ b/tests/test_exc.py
@@ -259,6 +259,17 @@ def test_HTTPMove_location_not_none():
m = webob_exc._HTTPMove(location='http://example.com')
assert_equal( m( environ, start_response ), [] )
+def test_HTTPMove_location_newlines():
+ environ = {
+ 'wsgi.url_scheme': 'HTTP',
+ 'SERVER_NAME': 'localhost',
+ 'SERVER_PORT': '80',
+ 'REQUEST_METHOD': 'HEAD',
+ 'PATH_INFO': '/',
+ }
+ assert_raises(ValueError, webob_exc._HTTPMove,
+ location='http://example.com\r\nX-Test: false')
+
def test_HTTPMove_add_slash_and_location():
def start_response(status, headers, exc_info=None):
pass
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 4
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"coverage",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
nose==1.3.7
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/Pylons/webob.git@87a1254c1818859c066268755621254d2ab086a0#egg=WebOb
| name: webob
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- nose==1.3.7
prefix: /opt/conda/envs/webob
| [
"tests/test_descriptors.py::test_header_getter_fset_text_control_chars",
"tests/test_exc.py::test_HTTPMove_location_newlines"
]
| []
| [
"tests/test_descriptors.py::test_environ_getter_docstring",
"tests/test_descriptors.py::test_environ_getter_nodefault_keyerror",
"tests/test_descriptors.py::test_environ_getter_nodefault_fget",
"tests/test_descriptors.py::test_environ_getter_nodefault_fdel",
"tests/test_descriptors.py::test_environ_getter_default_fget",
"tests/test_descriptors.py::test_environ_getter_default_fset",
"tests/test_descriptors.py::test_environ_getter_default_fset_none",
"tests/test_descriptors.py::test_environ_getter_default_fdel",
"tests/test_descriptors.py::test_environ_getter_rfc_section",
"tests/test_descriptors.py::test_upath_property_fget",
"tests/test_descriptors.py::test_upath_property_fset",
"tests/test_descriptors.py::test_header_getter_doc",
"tests/test_descriptors.py::test_header_getter_fget",
"tests/test_descriptors.py::test_header_getter_fset",
"tests/test_descriptors.py::test_header_getter_fset_none",
"tests/test_descriptors.py::test_header_getter_fset_text",
"tests/test_descriptors.py::test_header_getter_fdel",
"tests/test_descriptors.py::test_header_getter_unicode_fget_none",
"tests/test_descriptors.py::test_header_getter_unicode_fget",
"tests/test_descriptors.py::test_header_getter_unicode_fset_none",
"tests/test_descriptors.py::test_header_getter_unicode_fset",
"tests/test_descriptors.py::test_header_getter_unicode_fdel",
"tests/test_descriptors.py::test_converter_not_prop",
"tests/test_descriptors.py::test_converter_with_name_docstring",
"tests/test_descriptors.py::test_converter_with_name_fget",
"tests/test_descriptors.py::test_converter_with_name_fset",
"tests/test_descriptors.py::test_converter_without_name_fget",
"tests/test_descriptors.py::test_converter_without_name_fset",
"tests/test_descriptors.py::test_converter_none_for_wrong_type",
"tests/test_descriptors.py::test_converter_delete",
"tests/test_descriptors.py::test_list_header",
"tests/test_descriptors.py::test_parse_list_single",
"tests/test_descriptors.py::test_parse_list_multiple",
"tests/test_descriptors.py::test_parse_list_none",
"tests/test_descriptors.py::test_parse_list_unicode_single",
"tests/test_descriptors.py::test_parse_list_unicode_multiple",
"tests/test_descriptors.py::test_serialize_list",
"tests/test_descriptors.py::test_serialize_list_string",
"tests/test_descriptors.py::test_serialize_list_unicode",
"tests/test_descriptors.py::test_converter_date",
"tests/test_descriptors.py::test_converter_date_docstring",
"tests/test_descriptors.py::test_date_header_fget_none",
"tests/test_descriptors.py::test_date_header_fset_fget",
"tests/test_descriptors.py::test_date_header_fdel",
"tests/test_descriptors.py::test_deprecated_property",
"tests/test_descriptors.py::test_parse_etag_response",
"tests/test_descriptors.py::test_parse_etag_response_quoted",
"tests/test_descriptors.py::test_parse_etag_response_is_none",
"tests/test_descriptors.py::test_serialize_etag_response",
"tests/test_descriptors.py::test_serialize_if_range_string",
"tests/test_descriptors.py::test_serialize_if_range_unicode",
"tests/test_descriptors.py::test_serialize_if_range_datetime",
"tests/test_descriptors.py::test_serialize_if_range_other",
"tests/test_descriptors.py::test_parse_range_none",
"tests/test_descriptors.py::test_parse_range_type",
"tests/test_descriptors.py::test_parse_range_values",
"tests/test_descriptors.py::test_serialize_range_none",
"tests/test_descriptors.py::test_serialize_range",
"tests/test_descriptors.py::test_parse_int_none",
"tests/test_descriptors.py::test_parse_int_emptystr",
"tests/test_descriptors.py::test_parse_int",
"tests/test_descriptors.py::test_parse_int_invalid",
"tests/test_descriptors.py::test_parse_int_safe_none",
"tests/test_descriptors.py::test_parse_int_safe_emptystr",
"tests/test_descriptors.py::test_parse_int_safe",
"tests/test_descriptors.py::test_parse_int_safe_invalid",
"tests/test_descriptors.py::test_serialize_int",
"tests/test_descriptors.py::test_parse_content_range_none",
"tests/test_descriptors.py::test_parse_content_range_emptystr",
"tests/test_descriptors.py::test_parse_content_range_length",
"tests/test_descriptors.py::test_parse_content_range_start",
"tests/test_descriptors.py::test_parse_content_range_stop",
"tests/test_descriptors.py::test_serialize_content_range_none",
"tests/test_descriptors.py::test_serialize_content_range_emptystr",
"tests/test_descriptors.py::test_serialize_content_range_invalid",
"tests/test_descriptors.py::test_serialize_content_range_asterisk",
"tests/test_descriptors.py::test_serialize_content_range_defined",
"tests/test_descriptors.py::test_parse_auth_params_leading_capital_letter",
"tests/test_descriptors.py::test_parse_auth_params_trailing_capital_letter",
"tests/test_descriptors.py::test_parse_auth_params_doublequotes",
"tests/test_descriptors.py::test_parse_auth_params_multiple_values",
"tests/test_descriptors.py::test_parse_auth_params_truncate_on_comma",
"tests/test_descriptors.py::test_parse_auth_params_emptystr",
"tests/test_descriptors.py::test_parse_auth_params_bad_whitespace",
"tests/test_descriptors.py::test_authorization2",
"tests/test_descriptors.py::test_parse_auth_none",
"tests/test_descriptors.py::test_parse_auth_emptystr",
"tests/test_descriptors.py::test_parse_auth_basic",
"tests/test_descriptors.py::test_parse_auth_basic_quoted",
"tests/test_descriptors.py::test_parse_auth_basic_quoted_multiple_unknown",
"tests/test_descriptors.py::test_parse_auth_basic_quoted_known_multiple",
"tests/test_descriptors.py::test_serialize_auth_none",
"tests/test_descriptors.py::test_serialize_auth_emptystr",
"tests/test_descriptors.py::test_serialize_auth_basic_quoted",
"tests/test_descriptors.py::test_serialize_auth_digest_multiple",
"tests/test_descriptors.py::test_serialize_auth_digest_tuple",
"tests/test_descriptors.py::TestEnvironDecoder::test_default_fdel",
"tests/test_descriptors.py::TestEnvironDecoder::test_default_fget",
"tests/test_descriptors.py::TestEnvironDecoder::test_default_fset",
"tests/test_descriptors.py::TestEnvironDecoder::test_default_fset_none",
"tests/test_descriptors.py::TestEnvironDecoder::test_docstring",
"tests/test_descriptors.py::TestEnvironDecoder::test_fget_nonascii",
"tests/test_descriptors.py::TestEnvironDecoder::test_fset_nonascii",
"tests/test_descriptors.py::TestEnvironDecoder::test_nodefault_fdel",
"tests/test_descriptors.py::TestEnvironDecoder::test_nodefault_fget",
"tests/test_descriptors.py::TestEnvironDecoder::test_nodefault_keyerror",
"tests/test_descriptors.py::TestEnvironDecoder::test_rfc_section",
"tests/test_descriptors.py::TestEnvironDecoderLegacy::test_default_fdel",
"tests/test_descriptors.py::TestEnvironDecoderLegacy::test_default_fget",
"tests/test_descriptors.py::TestEnvironDecoderLegacy::test_default_fget_nonascii",
"tests/test_descriptors.py::TestEnvironDecoderLegacy::test_default_fset",
"tests/test_descriptors.py::TestEnvironDecoderLegacy::test_default_fset_none",
"tests/test_descriptors.py::TestEnvironDecoderLegacy::test_docstring",
"tests/test_descriptors.py::TestEnvironDecoderLegacy::test_fget_nonascii",
"tests/test_descriptors.py::TestEnvironDecoderLegacy::test_fset_nonascii",
"tests/test_descriptors.py::TestEnvironDecoderLegacy::test_nodefault_fdel",
"tests/test_descriptors.py::TestEnvironDecoderLegacy::test_nodefault_fget",
"tests/test_descriptors.py::TestEnvironDecoderLegacy::test_nodefault_keyerror",
"tests/test_descriptors.py::TestEnvironDecoderLegacy::test_rfc_section",
"tests/test_exc.py::test_noescape_null",
"tests/test_exc.py::test_noescape_not_basestring",
"tests/test_exc.py::test_noescape_unicode",
"tests/test_exc.py::test_strip_tags_empty",
"tests/test_exc.py::test_strip_tags_newline_to_space",
"tests/test_exc.py::test_strip_tags_zaps_carriage_return",
"tests/test_exc.py::test_strip_tags_br_to_newline",
"tests/test_exc.py::test_strip_tags_zaps_comments",
"tests/test_exc.py::test_strip_tags_zaps_tags",
"tests/test_exc.py::test_HTTPException",
"tests/test_exc.py::test_exception_with_unicode_data",
"tests/test_exc.py::test_WSGIHTTPException_headers",
"tests/test_exc.py::test_WSGIHTTPException_w_body_template",
"tests/test_exc.py::test_WSGIHTTPException_w_empty_body",
"tests/test_exc.py::test_WSGIHTTPException___str__",
"tests/test_exc.py::test_WSGIHTTPException_plain_body_no_comment",
"tests/test_exc.py::test_WSGIHTTPException_html_body_w_comment",
"tests/test_exc.py::test_WSGIHTTPException_generate_response",
"tests/test_exc.py::test_WSGIHTTPException_call_w_body",
"tests/test_exc.py::test_WSGIHTTPException_wsgi_response",
"tests/test_exc.py::test_WSGIHTTPException_exception_newstyle",
"tests/test_exc.py::test_WSGIHTTPException_exception_no_newstyle",
"tests/test_exc.py::test_HTTPOk_head_of_proxied_head",
"tests/test_exc.py::test_HTTPMove",
"tests/test_exc.py::test_HTTPMove_location_not_none",
"tests/test_exc.py::test_HTTPMove_add_slash_and_location",
"tests/test_exc.py::test_HTTPMove_call_add_slash",
"tests/test_exc.py::test_HTTPMove_call_query_string",
"tests/test_exc.py::test_HTTPExceptionMiddleware_ok",
"tests/test_exc.py::test_HTTPExceptionMiddleware_exception",
"tests/test_exc.py::test_HTTPExceptionMiddleware_exception_exc_info_none",
"tests/test_exc.py::test_status_map_is_deterministic"
]
| []
| null | 365 | [
"docs/do-it-yourself.txt",
"webob/descriptors.py",
"docs/index.txt",
"contributing.md",
"webob/exc.py"
]
| [
"docs/do-it-yourself.txt",
"webob/descriptors.py",
"docs/index.txt",
"contributing.md",
"webob/exc.py"
]
|
|
pika__pika-685 | 8be81a21d8b554ee9af4fae08907956e5b8b138f | 2016-01-04 01:56:37 | f73f9bbaddd90b03583a6693f6158e56fbede948 | vitaly-krugl: Can anyone help me figure out why `assert_any_call` is failing in the python 2.6 build https://travis-ci.org/pika/pika/jobs/100035773 ? Many thanks
CC @gst, @gmr
gst: having a look into that..
gst: can't reproduce directly on my side..
gst: the only error I can trigger (in python2.6 but I think it's the same with others versions), is this one :
```
Test that poll() is properly restarted after receiving EINTR error. ... FAIL
======================================================================
FAIL: Test that poll() is properly restarted after receiving EINTR error.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mock.py", line 1201, in patched
return func(*args, **keywargs)
File "/home/gstarck/work/public/python/pika/tests/unit/select_connection_ioloop_tests.py", line 396, in test_eintr
self.assertEqual(is_resumable_mock.call_count, 1)
AssertionError: 0 != 1
-------------------- >> begin captured logging << --------------------
pika.adapters.select_connection: DEBUG: Using SelectPoller
pika.adapters.select_connection: DEBUG: Using SelectPoller
pika.adapters.select_connection: DEBUG: Starting IOLoop
pika.adapters.select_connection: DEBUG: Stopping IOLoop
--------------------- >> end captured logging << ---------------------
```
I can trigger it, "simply" by executing many instances of the test at the same time, with something like this :
`$ for i in $(seq 100) ; do ( nosetests -x tests/acceptance/blocking_adapter_test.py:TestUnroutableMessagesReturnedInNonPubackMode &>/tmp/res$i || echo res$i failed) & done`
this also triggers quite a lot of Timed out errors which is expected given the load generated by the execution of so many tests in // ..
gst: strange, when you give a look at the debug output :
```
root: DEBUG: ZZZ self.connection.callbacks.process.call_args_list: [call(0, '_on_connection_error', <pika.connection.Connection object at 0x2f92dd0>, <pika.connection.Connection object at 0x2f92dd0>, Client was disconnected at a connection stage indicating a probable denial of access to the specified virtual host: (1, 'error text')),
```
you see the mock has well been called with the, as far as i see, expected arguments..
strange..
gst: For refs/archive the actual error:
```
======================================================================
FAIL: on_disconnect invokes `ON_CONNECTION_ERROR` with `ProbableAccessDeniedError` and `ON_CONNECTION_CLOSED` callbacks
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/pika/pika/tests/unit/connection_tests.py", line 228, in test_on_disconnect_invokes_access_on_connection_error_and_closed
mock.ANY)
File "/home/travis/virtualenv/python2.6.9/lib/python2.6/site-packages/mock/mock.py", line 999, in assert_any_call
), cause)
File "/home/travis/virtualenv/python2.6.9/lib/python2.6/site-packages/six.py", line 718, in raise_from
raise value
AssertionError: mock(0, '_on_connection_error', <pika.connection.Connection object at 0x2ec4dd0>, <pika.connection.Connection object at 0x2ec4dd0>, <ANY>) call not found
-------------------- >> begin captured logging << --------------------
pika.callback: DEBUG: Added: {'callback': <bound method Connection._on_connection_error of <pika.connection.Connection object at 0x2ec4dd0>>, 'only': None, 'one_shot': False, 'arguments': None}
pika.callback: DEBUG: Added: {'callback': <bound method Connection._on_connection_start of <pika.connection.Connection object at 0x2ec4dd0>>, 'only': None, 'one_shot': True, 'arguments': None, 'calls': 1}
pika.callback: DEBUG: Added: {'callback': <bound method Connection._on_connection_closed of <pika.connection.Connection object at 0x2ec4dd0>>, 'only': None, 'one_shot': True, 'arguments': None, 'calls': 1}
pika.connection: WARNING: Disconnected from RabbitMQ at localhost:5672 from_adapter=True (1): error text
pika.connection: ERROR: Socket closed while tuning the connection indicating a probable permission error when accessing a virtual host
pika.connection: ERROR: Connection setup failed due to Client was disconnected at a connection stage indicating a probable denial of access to the specified virtual host: (1, 'error text')
pika.callback: DEBUG: Incremented callback reference counter: {'callback': <bound method Connection._on_connection_start of <pika.connection.Connection object at 0x2ec4dd0>>, 'only': None, 'one_shot': True, 'arguments': None, 'calls': 2}
pika.callback: DEBUG: Incremented callback reference counter: {'callback': <bound method Connection._on_connection_closed of <pika.connection.Connection object at 0x2ec4dd0>>, 'only': None, 'one_shot': True, 'arguments': None, 'calls': 2}
root: DEBUG: ZZZ self.connection.callbacks.process.call_args_list: [call(0, '_on_connection_error', <pika.connection.Connection object at 0x2ec4dd0>, <pika.connection.Connection object at 0x2ec4dd0>, Client was disconnected at a connection stage indicating a probable denial of access to the specified virtual host: (1, 'error text')),
call(0, '_on_connection_closed', <pika.connection.Connection object at 0x2ec4dd0>, <pika.connection.Connection object at 0x2ec4dd0>, 1, 'error text')]
--------------------- >> end captured logging << ---------------------
```
does it reproduce on travis ?
gst: could be due to mock version..
gst: @vitaly-krugl
I'd like to know the mock version used by travis, how can I know ?
in fact: it would be good if before the execution of the tests there was a "pip freeze" executed, so that we can know all versions which are in use by travis..
vitaly-krugl: @gst, I was off the grid for a few days. Just got back today. Thanks for looking at this problem.
I don't know which version of mock it uses, but https://github.com/pika/pika/blob/master/test-requirements.txt specifies `mock` without a version. This would imply that it would load the most recent version. I like your idea about adding `pip freeze`. Perhaps this is something that could be added via https://github.com/pika/pika/blob/master/.travis.yml?
vitaly-krugl: @gst, I am able to reproduce these problems with mock 1.3.0 on python 2.7.10.
I've had nothing but trouble with the latest mock.
vitaly-krugl: @gst, getting rid of the spec arg in the mock allows the tests to pass now. I also switched to a context manager, but for patching, but I don't think that the context manager is important in this case. However, the spec/autospec/spec_set are really valuable, and it's a shame that I can't get the tests to pass using them.
gst: @vitaly-krugl : good to know about the reproduce :)
and yes for having pip freeze output it's effectively within `.travis.yml` ; could be put after the ` - pip install -r test-requirements.txt` in install section..
vitaly-krugl: @gst, would you like to submit a PR with the `pip freeze` change? thx
gst: here it is.. https://github.com/pika/pika/pull/689 | diff --git a/docs/version_history.rst b/docs/version_history.rst
index 349fbe7..06530b7 100644
--- a/docs/version_history.rst
+++ b/docs/version_history.rst
@@ -10,6 +10,8 @@ Next Release
- In BaseConnection.close, call _handle_ioloop_stop only if the connection is
already closed to allow the asynchronous close operation to complete
gracefully.
+ - Pass error information from failed socket connection to user callbacks
+ on_open_error_callback and on_close_callback with result_code=-1.
0.10.0 2015-09-02
-----------------
diff --git a/pika/adapters/base_connection.py b/pika/adapters/base_connection.py
index 4bb436c..87afc99 100644
--- a/pika/adapters/base_connection.py
+++ b/pika/adapters/base_connection.py
@@ -10,7 +10,6 @@ import ssl
import pika.compat
from pika import connection
-from pika import exceptions
try:
SOL_TCP = socket.SOL_TCP
@@ -52,10 +51,10 @@ class BaseConnection(connection.Connection):
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Method to call on connection open
- :param on_open_error_callback: Method to call if the connection cant
- be opened
- :type on_open_error_callback: method
- :param method on_close_callback: Method to call on connection close
+ :param method on_open_error_callback: Called if the connection can't
+ be established: on_open_error_callback(connection, str|exception)
+ :param method on_close_callback: Called when the connection is closed:
+ on_close_callback(connection, reason_code, reason_text)
:param object ioloop: IOLoop object to use
:param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected
:raises: RuntimeError
@@ -152,38 +151,9 @@ class BaseConnection(connection.Connection):
def _adapter_disconnect(self):
"""Invoked if the connection is being told to disconnect"""
try:
- self._remove_heartbeat()
self._cleanup_socket()
- self._check_state_on_disconnect()
finally:
- # Ensure proper cleanup since _check_state_on_disconnect may raise
- # an exception
self._handle_ioloop_stop()
- self._init_connection_state()
-
- def _check_state_on_disconnect(self):
- """Checks to see if we were in opening a connection with RabbitMQ when
- we were disconnected and raises exceptions for the anticipated
- exception types.
-
- """
- if self.connection_state == self.CONNECTION_PROTOCOL:
- LOGGER.error('Incompatible Protocol Versions')
- raise exceptions.IncompatibleProtocolError
- elif self.connection_state == self.CONNECTION_START:
- LOGGER.error("Socket closed while authenticating indicating a "
- "probable authentication error")
- raise exceptions.ProbableAuthenticationError
- elif self.connection_state == self.CONNECTION_TUNE:
- LOGGER.error("Socket closed while tuning the connection indicating "
- "a probable permission error when accessing a virtual "
- "host")
- raise exceptions.ProbableAccessDeniedError
- elif self.is_open:
- LOGGER.warning("Socket closed when connection was open")
- elif not self.is_closed and not self.is_closing:
- LOGGER.warning('Unknown state on disconnect: %i',
- self.connection_state)
def _cleanup_socket(self):
"""Close the socket cleanly"""
@@ -272,11 +242,14 @@ class BaseConnection(connection.Connection):
"""
if not error_value:
return None
+
if hasattr(error_value, 'errno'): # Python >= 2.6
return error_value.errno
- elif error_value is not None:
+ else:
+ # TODO: this doesn't look right; error_value.args[0] ??? Could
+ # probably remove this code path since pika doesn't test against
+ # Python 2.5
return error_value[0] # Python <= 2.5
- return None
def _flush_outbound(self):
"""Have the state manager schedule the necessary I/O.
@@ -291,21 +264,6 @@ class BaseConnection(connection.Connection):
# called), etc., etc., etc.
self._manage_event_state()
- def _handle_disconnect(self):
- """Called internally when the socket is disconnected already
- """
- try:
- self._adapter_disconnect()
- except (exceptions.ProbableAccessDeniedError,
- exceptions.ProbableAuthenticationError) as error:
- LOGGER.error('disconnected due to %r', error)
- self.callbacks.process(0,
- self.ON_CONNECTION_ERROR,
- self,
- self, error)
-
- self._on_connection_closed(None, True)
-
def _handle_ioloop_stop(self):
"""Invoked when the connection is closed to determine if the IOLoop
should be stopped or not.
@@ -323,9 +281,10 @@ class BaseConnection(connection.Connection):
:param int|object error_value: The inbound error
"""
- if 'timed out' in str(error_value):
- raise socket.timeout
+ # TODO: doesn't seem right: docstring defines error_value as int|object,
+ # but _get_error_code expects a falsie or an exception-like object
error_code = self._get_error_code(error_value)
+
if not error_code:
LOGGER.critical("Tried to handle an error where no error existed")
return
@@ -342,6 +301,8 @@ class BaseConnection(connection.Connection):
elif self.params.ssl and isinstance(error_value, ssl.SSLError):
if error_value.args[0] == ssl.SSL_ERROR_WANT_READ:
+ # TODO: doesn't seem right: this logic updates event state, but
+ # the logic at the bottom unconditionaly disconnects anyway.
self.event_state = self.READ
elif error_value.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self.event_state = self.WRITE
@@ -353,20 +314,21 @@ class BaseConnection(connection.Connection):
LOGGER.error("Socket Error: %s", error_code)
# Disconnect from our IOLoop and let Connection know what's up
- self._handle_disconnect()
+ self._on_terminate(-1, repr(error_value))
def _handle_timeout(self):
"""Handle a socket timeout in read or write.
We don't do anything in the non-blocking handlers because we
only have the socket in a blocking state during connect."""
- pass
+ LOGGER.warning("Unexpected socket timeout")
def _handle_events(self, fd, events, error=None, write_only=False):
"""Handle IO/Event loop events, processing them.
:param int fd: The file descriptor for the events
:param int events: Events from the IO/Event loop
- :param int error: Was an error specified
+ :param int error: Was an error specified; TODO none of the current
+ adapters appear to be able to pass the `error` arg - is it needed?
:param bool write_only: Only handle write events
"""
@@ -382,10 +344,11 @@ class BaseConnection(connection.Connection):
self._handle_read()
if (self.socket and write_only and (events & self.READ) and
- (events & self.ERROR)):
- LOGGER.error('BAD libc: Write-Only but Read+Error. '
+ (events & self.ERROR)):
+ error_msg = ('BAD libc: Write-Only but Read+Error. '
'Assume socket disconnected.')
- self._handle_disconnect()
+ LOGGER.error(error_msg)
+ self._on_terminate(-1, error_msg)
if self.socket and (events & self.ERROR):
LOGGER.error('Error event %r, %r', events, error)
@@ -427,7 +390,7 @@ class BaseConnection(connection.Connection):
# Empty data, should disconnect
if not data or data == 0:
LOGGER.error('Read empty data, calling disconnect')
- return self._handle_disconnect()
+ return self._on_terminate(-1, "EOF")
# Pass the data into our top level frame dispatching method
self._on_data_available(data)
diff --git a/pika/adapters/blocking_connection.py b/pika/adapters/blocking_connection.py
index 1877dd6..f6881cd 100644
--- a/pika/adapters/blocking_connection.py
+++ b/pika/adapters/blocking_connection.py
@@ -394,7 +394,7 @@ class BlockingConnection(object): # pylint: disable=R0902
returning true when it's time to stop processing.
Their results are OR'ed together.
"""
- if self._impl.is_closed:
+ if self.is_closed:
raise exceptions.ConnectionClosed()
# Conditions for terminating the processing loop:
@@ -404,38 +404,35 @@ class BlockingConnection(object): # pylint: disable=R0902
# OR
# empty outbound buffer and any waiter is ready
is_done = (lambda:
- self._closed_result.ready or
- (not self._impl.outbound_buffer and
- (not waiters or any(ready() for ready in waiters))))
+ self._closed_result.ready or
+ (not self._impl.outbound_buffer and
+ (not waiters or any(ready() for ready in waiters))))
# Process I/O until our completion condition is satisified
while not is_done():
self._impl.ioloop.poll()
self._impl.ioloop.process_timeouts()
- if self._closed_result.ready:
+ if self._open_error_result.ready or self._closed_result.ready:
try:
- result = self._closed_result.value
- if result.reason_code not in [0, 200]:
- LOGGER.critical('Connection close detected; result=%r',
- result)
- raise exceptions.ConnectionClosed(result.reason_code,
- result.reason_text)
- elif not self._user_initiated_close:
- # NOTE: unfortunately, upon socket error, on_close_callback
- # presently passes reason_code=0, so we don't detect that as
- # an error
+ if not self._user_initiated_close:
if self._open_error_result.ready:
maybe_exception = self._open_error_result.value.error
- LOGGER.critical('Connection open failed - %r',
- maybe_exception)
+ LOGGER.error('Connection open failed - %r',
+ maybe_exception)
if isinstance(maybe_exception, Exception):
raise maybe_exception
-
- LOGGER.critical('Connection close detected')
- raise exceptions.ConnectionClosed()
+ else:
+ raise exceptions.ConnectionClosed(maybe_exception)
+ else:
+ result = self._closed_result.value
+ LOGGER.error('Connection close detected; result=%r',
+ result)
+ raise exceptions.ConnectionClosed(result.reason_code,
+ result.reason_text)
else:
- LOGGER.info('Connection closed; result=%r', result)
+ LOGGER.info('Connection closed; result=%r',
+ self._closed_result.value)
finally:
self._cleanup()
@@ -732,7 +729,8 @@ class BlockingConnection(object): # pylint: disable=R0902
@property
def is_closing(self):
"""
- Returns a boolean reporting the current connection state.
+ Returns True if connection is in the process of closing due to
+ client-initiated `close` request, but closing is not yet complete.
"""
return self._impl.is_closing
@@ -1143,7 +1141,8 @@ class BlockingChannel(object): # pylint: disable=R0904,R0902
@property
def is_closing(self):
- """Returns True if the channel is closing.
+ """Returns True if client-initiated closing of the channel is in
+ progress.
:rtype: bool
@@ -1173,7 +1172,7 @@ class BlockingChannel(object): # pylint: disable=R0904,R0902
returning true when it's time to stop processing.
Their results are OR'ed together.
"""
- if self._impl.is_closed:
+ if self.is_closed:
raise exceptions.ChannelClosed()
if not waiters:
diff --git a/pika/adapters/libev_connection.py b/pika/adapters/libev_connection.py
index ed3ec81..ce491c9 100644
--- a/pika/adapters/libev_connection.py
+++ b/pika/adapters/libev_connection.py
@@ -84,9 +84,10 @@ class LibevConnection(BaseConnection):
:param pika.connection.Parameters parameters: Connection parameters
:param on_open_callback: The method to call when the connection is open
:type on_open_callback: method
- :param on_open_error_callback: Method to call if the connection cannot
- be opened
- :type on_open_error_callback: method
+ :param method on_open_error_callback: Called if the connection can't
+ be established: on_open_error_callback(connection, str|exception)
+ :param method on_close_callback: Called when the connection is closed:
+ on_close_callback(connection, reason_code, reason_text)
:param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected
:param custom_ioloop: Override using the default IOLoop in libev
:param on_signal_callback: Method to call if SIGINT or SIGTERM occur
diff --git a/pika/adapters/select_connection.py b/pika/adapters/select_connection.py
index 64e2bbe..645cb38 100644
--- a/pika/adapters/select_connection.py
+++ b/pika/adapters/select_connection.py
@@ -75,10 +75,10 @@ class SelectConnection(BaseConnection):
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Method to call on connection open
- :param on_open_error_callback: Method to call if the connection cant
- be opened
- :type on_open_error_callback: method
- :param method on_close_callback: Method to call on connection close
+ :param method on_open_error_callback: Called if the connection can't
+ be established: on_open_error_callback(connection, str|exception)
+ :param method on_close_callback: Called when the connection is closed:
+ on_close_callback(connection, reason_code, reason_text)
:param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected
:param custom_ioloop: Override using the global IOLoop in Tornado
:raises: RuntimeError
diff --git a/pika/adapters/tornado_connection.py b/pika/adapters/tornado_connection.py
index 1c5c607..ce407d1 100644
--- a/pika/adapters/tornado_connection.py
+++ b/pika/adapters/tornado_connection.py
@@ -39,9 +39,10 @@ class TornadoConnection(base_connection.BaseConnection):
:param pika.connection.Parameters parameters: Connection parameters
:param on_open_callback: The method to call when the connection is open
:type on_open_callback: method
- :param on_open_error_callback: Method to call if the connection cant
- be opened
- :type on_open_error_callback: method
+ :param method on_open_error_callback: Called if the connection can't
+ be established: on_open_error_callback(connection, str|exception)
+ :param method on_close_callback: Called when the connection is closed:
+ on_close_callback(connection, reason_code, reason_text)
:param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected
:param custom_ioloop: Override using the global IOLoop in Tornado
@@ -55,7 +56,7 @@ class TornadoConnection(base_connection.BaseConnection):
def _adapter_connect(self):
"""Connect to the remote socket, adding the socket to the IOLoop if
- connected.
+ connected.
:rtype: bool
diff --git a/pika/adapters/twisted_connection.py b/pika/adapters/twisted_connection.py
index 2ee65b2..62e595c 100644
--- a/pika/adapters/twisted_connection.py
+++ b/pika/adapters/twisted_connection.py
@@ -105,6 +105,9 @@ class TwistedChannel(object):
try:
consumer_tag = self.__channel.basic_consume(*args, **kwargs)
+ # TODO this except without types would suppress system-exiting
+ # exceptions, such as SystemExit and KeyboardInterrupt. It should be at
+ # least `except Exception` and preferably more specific.
except:
return defer.fail()
@@ -163,6 +166,9 @@ class TwistedChannel(object):
try:
method(*args, **kwargs)
+ # TODO this except without types would suppress system-exiting
+ # exceptions, such as SystemExit and KeyboardInterrupt. It should be
+ # at least `except Exception` and preferably more specific.
except:
return defer.fail()
return d
@@ -300,13 +306,6 @@ class TwistedConnection(base_connection.BaseConnection):
self.ioloop.remove_handler(None)
self._cleanup_socket()
- def _handle_disconnect(self):
- """Do not stop the reactor, this would cause the entire process to exit,
- just fire the disconnect callbacks
-
- """
- self._on_connection_closed(None, True)
-
def _on_connected(self):
"""Call superclass and then update the event state to flush the outgoing
frame out. Commit 50d842526d9f12d32ad9f3c4910ef60b8c301f59 removed a
@@ -339,7 +338,7 @@ class TwistedConnection(base_connection.BaseConnection):
if not reason.check(error.ConnectionDone):
log.err(reason)
- self._handle_disconnect()
+ self._on_terminate(-1, str(reason))
def doRead(self):
self._handle_read()
diff --git a/pika/channel.py b/pika/channel.py
index 5c67c49..4af9a6e 100644
--- a/pika/channel.py
+++ b/pika/channel.py
@@ -29,7 +29,7 @@ class Channel(object):
CLOSED = 0
OPENING = 1
OPEN = 2
- CLOSING = 3
+ CLOSING = 3 # client-initiated close in progress
_ON_CHANNEL_CLEANUP_CB_KEY = '_on_channel_cleanup'
@@ -615,7 +615,8 @@ class Channel(object):
@property
def is_closing(self):
- """Returns True if the channel is closing.
+ """Returns True if client-initiated closing of the channel is in
+ progress.
:rtype: bool
diff --git a/pika/connection.py b/pika/connection.py
index 6f59cd0..6288628 100644
--- a/pika/connection.py
+++ b/pika/connection.py
@@ -4,6 +4,7 @@ import sys
import collections
import logging
import math
+import numbers
import platform
import threading
import warnings
@@ -586,7 +587,7 @@ class Connection(object):
CONNECTION_START = 3
CONNECTION_TUNE = 4
CONNECTION_OPEN = 5
- CONNECTION_CLOSING = 6
+ CONNECTION_CLOSING = 6 # client-initiated close in progress
def __init__(self,
parameters=None,
@@ -602,9 +603,10 @@ class Connection(object):
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Called when the connection is opened
- :param method on_open_error_callback: Called if the connection cant
- be opened
- :param method on_close_callback: Called when the connection is closed
+ :param method on_open_error_callback: Called if the connection can't
+ be established: on_open_error_callback(connection, str|exception)
+ :param method on_close_callback: Called when the connection is closed:
+ on_close_callback(connection, reason_code, reason_text)
"""
self._write_lock = threading.Lock()
@@ -770,8 +772,9 @@ class Connection(object):
self.remaining_connection_attempts -= 1
LOGGER.warning('Could not connect, %i attempts left',
self.remaining_connection_attempts)
- if self.remaining_connection_attempts:
+ if self.remaining_connection_attempts > 0:
LOGGER.info('Retrying in %i seconds', self.params.retry_delay)
+ # TODO: remove timeout if connection is closed before timer fires
self.add_timeout(self.params.retry_delay, self.connect)
else:
self.callbacks.process(0, self.ON_CONNECTION_ERROR, self, self,
@@ -813,7 +816,8 @@ class Connection(object):
@property
def is_closing(self):
"""
- Returns a boolean reporting the current connection state.
+ Returns True if connection is in the process of closing due to
+ client-initiated `close` request, but closing is not yet complete.
"""
return self.connection_state == self.CONNECTION_CLOSING
@@ -1160,6 +1164,13 @@ class Connection(object):
# Our starting point once connected, first frame received
self._add_connection_start_callback()
+ # Add a callback handler for the Broker telling us to disconnect.
+ # NOTE: As of RabbitMQ 3.6.0, RabbitMQ broker may send Connection.Close
+ # to signal error during connection setup (and wait a longish time
+ # before closing the TCP/IP stream). Earlier RabbitMQ versions
+ # simply closed the TCP/IP stream.
+ self.callbacks.add(0, spec.Connection.Close, self._on_connection_close)
+
def _is_basic_deliver_frame(self, frame_value):
"""Returns true if the frame is a Basic.Deliver
@@ -1169,17 +1180,6 @@ class Connection(object):
"""
return isinstance(frame_value, spec.Basic.Deliver)
- def _is_connection_close_frame(self, value):
- """Returns true if the frame is a Connection.Close frame.
-
- :param pika.frame.Method value: The frame to check
- :rtype: bool
-
- """
- if not value:
- return False
- return isinstance(value.method, spec.Connection.Close)
-
def _is_method_frame(self, value):
"""Returns true if the frame is a method frame.
@@ -1250,32 +1250,29 @@ class Connection(object):
# Start the communication with the RabbitMQ Broker
self._send_frame(frame.ProtocolHeader())
- def _on_connection_closed(self, method_frame, from_adapter=False):
- """Called when the connection is closed remotely. The from_adapter value
- will be true if the connection adapter has been disconnected from
- the broker and the method was invoked directly instead of by receiving
- a Connection.Close frame.
+ def _on_connection_close(self, method_frame):
+ """Called when the connection is closed remotely via Connection.Close
+ frame from broker.
- :param pika.frame.Method: The Connection.Close frame
- :param bool from_adapter: Called by the connection adapter
+ :param pika.frame.Method method_frame: The Connection.Close frame
"""
- if method_frame and self._is_connection_close_frame(method_frame):
- self.closing = (method_frame.method.reply_code,
- method_frame.method.reply_text)
+ LOGGER.debug('_on_connection_close: frame=%s', method_frame)
- # Save the codes because self.closing gets reset by _adapter_disconnect
- reply_code, reply_text = self.closing
+ self.closing = (method_frame.method.reply_code,
+ method_frame.method.reply_text)
- # Stop the heartbeat checker if it exists
- self._remove_heartbeat()
+ self._on_terminate(self.closing[0], self.closing[1])
- # If this did not come from the connection adapter, close the socket
- if not from_adapter:
- self._adapter_disconnect()
+ def _on_connection_close_ok(self, method_frame):
+ """Called when Connection.CloseOk is received from remote.
- # Invoke a method frame neutral close
- self._on_disconnect(reply_code, reply_text)
+ :param pika.frame.Method method_frame: The Connection.CloseOk frame
+
+ """
+ LOGGER.debug('_on_connection_close_ok: frame=%s', method_frame)
+
+ self._on_terminate(self.closing[0], self.closing[1])
def _on_connection_error(self, connection_unused, error_message=None):
"""Default behavior when the connecting connection can not connect.
@@ -1294,9 +1291,6 @@ class Connection(object):
"""
self.known_hosts = method_frame.method.known_hosts
- # Add a callback handler for the Broker telling us to disconnect
- self.callbacks.add(0, spec.Connection.Close, self._on_connection_closed)
-
# We're now connected at the AMQP level
self._set_connection_state(self.CONNECTION_OPEN)
@@ -1368,27 +1362,89 @@ class Connection(object):
self._trim_frame_buffer(consumed_count)
self._process_frame(frame_value)
- def _on_disconnect(self, reply_code, reply_text):
- """Invoke passing in the reply_code and reply_text from internal
- methods to the adapter. Called from on_connection_closed and Heartbeat
- timeouts.
-
- :param str reply_code: The numeric close code
- :param str reply_text: The text close reason
+ def _on_terminate(self, reason_code, reason_text):
+ """Terminate the connection and notify registered ON_CONNECTION_ERROR
+ and/or ON_CONNECTION_CLOSED callbacks
+ :param integer reason_code: HTTP error code for AMQP-reported closures
+ or -1 for other errors (such as socket errors)
+ :param str reason_text: human-readable text message describing the error
"""
- LOGGER.warning('Disconnected from RabbitMQ at %s:%i (%s): %s',
- self.params.host, self.params.port, reply_code,
- reply_text)
+ LOGGER.warning(
+ 'Disconnected from RabbitMQ at %s:%i (%s): %s',
+ self.params.host, self.params.port, reason_code,
+ reason_text)
+
+ if not isinstance(reason_code, numbers.Integral):
+ raise TypeError('reason_code must be an integer, but got %r'
+ % (reason_code,))
+
+ # Stop the heartbeat checker if it exists
+ self._remove_heartbeat()
+
+ # Remove connection management callbacks
+ # TODO: This call was moved here verbatim from legacy code and the
+ # following doesn't seem to be right: `Connection.Open` here is
+ # unexpected, we don't appear to ever register it, and the broker
+ # shouldn't be sending `Connection.Open` to us, anyway.
+ self._remove_callbacks(0, [spec.Connection.Close, spec.Connection.Start,
+ spec.Connection.Open])
+
+ # Close the socket
+ self._adapter_disconnect()
+
+ # Determine whether this was an error during connection setup
+ connection_error = None
+
+ if self.connection_state == self.CONNECTION_PROTOCOL:
+ LOGGER.error('Incompatible Protocol Versions')
+ connection_error = exceptions.IncompatibleProtocolError(
+ reason_code,
+ reason_text)
+ elif self.connection_state == self.CONNECTION_START:
+ LOGGER.error('Connection closed while authenticating indicating a '
+ 'probable authentication error')
+ connection_error = exceptions.ProbableAuthenticationError(
+ reason_code,
+ reason_text)
+ elif self.connection_state == self.CONNECTION_TUNE:
+ LOGGER.error('Connection closed while tuning the connection '
+ 'indicating a probable permission error when '
+ 'accessing a virtual host')
+ connection_error = exceptions.ProbableAccessDeniedError(
+ reason_code,
+ reason_text)
+ elif self.connection_state not in [self.CONNECTION_OPEN,
+ self.CONNECTION_CLOSED,
+ self.CONNECTION_CLOSING]:
+ LOGGER.warning('Unexpected connection state on disconnect: %i',
+ self.connection_state)
+
+ # Transition to closed state
self._set_connection_state(self.CONNECTION_CLOSED)
+
+ # Inform our channel proxies
for channel in dictkeys(self._channels):
if channel not in self._channels:
continue
- method_frame = frame.Method(channel, spec.Channel.Close(reply_code,
- reply_text))
+ method_frame = frame.Method(channel, spec.Channel.Close(
+ reason_code,
+ reason_text))
self._channels[channel]._on_close(method_frame)
- self._process_connection_closed_callbacks(reply_code, reply_text)
- self._remove_connection_callbacks()
+
+ # Inform interested parties
+ if connection_error is not None:
+ LOGGER.error('Connection setup failed due to %r', connection_error)
+ self.callbacks.process(0,
+ self.ON_CONNECTION_ERROR,
+ self, self,
+ connection_error)
+
+ self.callbacks.process(0, self.ON_CONNECTION_CLOSED, self, self,
+ reason_code, reason_text)
+
+ # Reset connection properties
+ self._init_connection_state()
def _process_callbacks(self, frame_value):
"""Process the callbacks for the frame if the frame is a method frame
@@ -1407,17 +1463,6 @@ class Connection(object):
return True
return False
- def _process_connection_closed_callbacks(self, reason_code, reason_text):
- """Process any callbacks that should be called when the connection is
- closed.
-
- :param str reason_code: The numeric code from RabbitMQ for the close
- :param str reason_text: The text reason fro closing
-
- """
- self.callbacks.process(0, self.ON_CONNECTION_CLOSED, self, self,
- reason_code, reason_text)
-
def _process_frame(self, frame_value):
"""Process an inbound frame from the socket.
@@ -1489,11 +1534,6 @@ class Connection(object):
for method_frame in method_frames:
self._remove_callback(channel_number, method_frame)
- def _remove_connection_callbacks(self):
- """Remove all callbacks for the connection"""
- self._remove_callbacks(0, [spec.Connection.Close, spec.Connection.Start,
- spec.Connection.Open])
-
def _rpc(self, channel_number, method_frame,
callback_method=None,
acceptable_replies=None):
@@ -1530,7 +1570,7 @@ class Connection(object):
"""
self._rpc(0, spec.Connection.Close(reply_code, reply_text, 0, 0),
- self._on_connection_closed, [spec.Connection.CloseOk])
+ self._on_connection_close_ok, [spec.Connection.CloseOk])
def _send_connection_open(self):
"""Send a Connection.Open frame"""
diff --git a/pika/exceptions.py b/pika/exceptions.py
index c56f6a0..f219fbb 100644
--- a/pika/exceptions.py
+++ b/pika/exceptions.py
@@ -26,7 +26,8 @@ class AMQPConnectionError(AMQPError):
class IncompatibleProtocolError(AMQPConnectionError):
def __repr__(self):
- return 'The protocol returned by the server is not supported'
+ return ('The protocol returned by the server is not supported: %s' %
+ (self.args,))
class AuthenticationError(AMQPConnectionError):
@@ -40,14 +41,15 @@ class ProbableAuthenticationError(AMQPConnectionError):
def __repr__(self):
return ('Client was disconnected at a connection stage indicating a '
- 'probable authentication error')
+ 'probable authentication error: %s' % (self.args,))
class ProbableAccessDeniedError(AMQPConnectionError):
def __repr__(self):
return ('Client was disconnected at a connection stage indicating a '
- 'probable denial of access to the specified virtual host')
+ 'probable denial of access to the specified virtual host: %s' %
+ (self.args,))
class NoFreeChannels(AMQPConnectionError):
diff --git a/pika/heartbeat.py b/pika/heartbeat.py
index bbd5c1f..64026cc 100644
--- a/pika/heartbeat.py
+++ b/pika/heartbeat.py
@@ -115,10 +115,14 @@ class HeartbeatChecker(object):
self._idle_byte_intervals)
duration = self._max_idle_count * self._interval
text = HeartbeatChecker._STALE_CONNECTION % duration
+
+ # NOTE: this won't achieve the perceived effect of sending
+ # Connection.Close to broker, because the frame will only get buffered
+ # in memory before the next statement terminates the connection.
self._connection.close(HeartbeatChecker._CONNECTION_FORCED, text)
- self._connection._adapter_disconnect()
- self._connection._on_disconnect(HeartbeatChecker._CONNECTION_FORCED,
- text)
+
+ self._connection._on_terminate(HeartbeatChecker._CONNECTION_FORCED,
+ text)
@property
def _has_received_data(self):
| Pika drops Connection.Close from broker before Connection.Open-Ok
When running in Fedora with RabbitMQ 3.6.0, I noticed that the test async_adapter_tests.TestZ_AccessDenied times out. This test attempts to open a connection using a non-existent vhost. In this scenario, RabbitMQ eventually closes the socket connection (about 30 seconds after Connection.Open with the non-existent vhost from the client).
It turns out that RabbitMQ 3.6.0 sends a Connection.Close immediately after receiving Connection.Open with bad vhost from the client. However, there is a bug in pika: pika doesn't register to handle Connection.Close from the broker until it receives Connection.Open-Ok, so pika just drops the Connection.Close frame in this scenario. Pika eventually detects that the connection is closed, subject to whenever RabbitMQ decides to close the connection.
pika needs to register its handler for Connection.Close before initiating the connection. `Connection._init_connection_state` might be a good place for it, right next to the call `self._add_connection_start_callback()`. | pika/pika | diff --git a/tests/unit/blocking_connection_tests.py b/tests/unit/blocking_connection_tests.py
index 70dc065..2888525 100644
--- a/tests/unit/blocking_connection_tests.py
+++ b/tests/unit/blocking_connection_tests.py
@@ -3,6 +3,10 @@
Tests for pika.adapters.blocking_connection.BlockingConnection
"""
+
+# Suppress pylint warnings concering access to protected member
+# pylint: disable=W0212
+
import socket
from pika.exceptions import AMQPConnectionError
@@ -26,11 +30,11 @@ class BlockingConnectionMockTemplate(blocking_connection.BlockingConnection):
pass
class SelectConnectionTemplate(blocking_connection.SelectConnection):
- is_closed = False
- is_closing = False
- is_open = True
- outbound_buffer = []
- _channels = dict()
+ is_closed = None
+ is_closing = None
+ is_open = None
+ outbound_buffer = None
+ _channels = None
class BlockingConnectionTests(unittest.TestCase):
@@ -41,7 +45,7 @@ class BlockingConnectionTests(unittest.TestCase):
def test_constructor(self, select_connection_class_mock):
with mock.patch.object(blocking_connection.BlockingConnection,
'_process_io_for_connection_setup'):
- connection = blocking_connection.BlockingConnection('params')
+ blocking_connection.BlockingConnection('params')
select_connection_class_mock.assert_called_once_with(
parameters='params',
@@ -153,9 +157,7 @@ class BlockingConnectionTests(unittest.TestCase):
with self.assertRaises(pika.exceptions.ConnectionClosed) as cm:
connection._flush_output(lambda: False, lambda: True)
- self.assertSequenceEqual(
- cm.exception.args,
- ())
+ self.assertSequenceEqual(cm.exception.args, (200, 'ok'))
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate)
@@ -190,7 +192,7 @@ class BlockingConnectionTests(unittest.TestCase):
blocking_connection.BlockingConnection,
'_flush_output',
spec_set=blocking_connection.BlockingConnection._flush_output):
- channel = connection.channel()
+ connection.channel()
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate)
diff --git a/tests/unit/connection_tests.py b/tests/unit/connection_tests.py
index 7ef7afe..ed8b038 100644
--- a/tests/unit/connection_tests.py
+++ b/tests/unit/connection_tests.py
@@ -2,6 +2,18 @@
Tests for pika.connection.Connection
"""
+
+# Suppress pylint warnings concerning access to protected member
+# pylint: disable=W0212
+
+# Suppress pylint messages concerning missing docstrings
+# pylint: disable=C0111
+
+# Suppress pylint messages concerning invalid method name
+# pylint: disable=C0103
+
+
+
try:
import mock
except ImportError:
@@ -17,6 +29,7 @@ except ImportError:
from pika import connection
from pika import channel
from pika import credentials
+from pika import exceptions
from pika import frame
from pika import spec
from pika.compat import xrange, urlencode
@@ -97,15 +110,128 @@ class ConnectionTests(unittest.TestCase):
self.assertFalse(on_close_ready.called,
'_on_close_ready should not have been called')
- def test_on_disconnect(self):
- """if connection isn't closing _on_close_ready should not be called"""
- self.connection._on_disconnect(0, 'Undefined')
+ def test_on_terminate_cleans_up(self):
+ """_on_terminate cleans up heartbeat, adapter, and channels"""
+ heartbeat = mock.Mock()
+ self.connection.heartbeat = heartbeat
+ self.connection._adapter_disconnect = mock.Mock()
+
+ self.connection._on_terminate(0, 'Undefined')
+
+ heartbeat.stop.assert_called_once_with()
+ self.connection._adapter_disconnect.assert_called_once_with()
+
self.assertTrue(self.channel._on_close.called,
'channel._on_close should have been called')
method_frame = self.channel._on_close.call_args[0][0]
self.assertEqual(method_frame.method.reply_code, 0)
self.assertEqual(method_frame.method.reply_text, 'Undefined')
+ self.assertTrue(self.connection.is_closed)
+
+ def test_on_terminate_invokes_connection_closed_callback(self):
+ """_on_terminate invokes `Connection.ON_CONNECTION_CLOSED` callbacks"""
+ self.connection.callbacks.process = mock.Mock(
+ wraps=self.connection.callbacks.process)
+
+ self.connection._adapter_disconnect = mock.Mock()
+
+ self.connection._on_terminate(1, 'error text')
+
+ self.connection.callbacks.process.assert_called_once_with(
+ 0, self.connection.ON_CONNECTION_CLOSED,
+ self.connection, self.connection,
+ 1, 'error text')
+
+ with self.assertRaises(AssertionError):
+ self.connection.callbacks.process.assert_any_call(
+ 0, self.connection.ON_CONNECTION_ERROR,
+ self.connection, self.connection,
+ mock.ANY)
+
+ def test_on_terminate_invokes_protocol_on_connection_error_and_closed(self):
+ """_on_terminate invokes `ON_CONNECTION_ERROR` with `IncompatibleProtocolError` and `ON_CONNECTION_CLOSED` callbacks"""
+ with mock.patch.object(self.connection.callbacks, 'process'):
+
+ self.connection._adapter_disconnect = mock.Mock()
+
+ self.connection._set_connection_state(
+ self.connection.CONNECTION_PROTOCOL)
+
+ self.connection._on_terminate(1, 'error text')
+
+ self.assertEqual(self.connection.callbacks.process.call_count, 2)
+
+ self.connection.callbacks.process.assert_any_call(
+ 0, self.connection.ON_CONNECTION_ERROR,
+ self.connection, self.connection,
+ mock.ANY)
+
+ conn_exc = self.connection.callbacks.process.call_args_list[0][0][4]
+ self.assertIs(type(conn_exc), exceptions.IncompatibleProtocolError)
+ self.assertSequenceEqual(conn_exc.args, [1, 'error text'])
+
+ self.connection.callbacks.process.assert_any_call(
+ 0, self.connection.ON_CONNECTION_CLOSED,
+ self.connection, self.connection,
+ 1, 'error text')
+
+ def test_on_terminate_invokes_auth_on_connection_error_and_closed(self):
+ """_on_terminate invokes `ON_CONNECTION_ERROR` with `ProbableAuthenticationError` and `ON_CONNECTION_CLOSED` callbacks"""
+ with mock.patch.object(self.connection.callbacks, 'process'):
+
+ self.connection._adapter_disconnect = mock.Mock()
+
+ self.connection._set_connection_state(
+ self.connection.CONNECTION_START)
+
+ self.connection._on_terminate(1, 'error text')
+
+ self.assertEqual(self.connection.callbacks.process.call_count, 2)
+
+ self.connection.callbacks.process.assert_any_call(
+ 0, self.connection.ON_CONNECTION_ERROR,
+ self.connection, self.connection,
+ mock.ANY)
+
+ conn_exc = self.connection.callbacks.process.call_args_list[0][0][4]
+ self.assertIs(type(conn_exc),
+ exceptions.ProbableAuthenticationError)
+ self.assertSequenceEqual(conn_exc.args, [1, 'error text'])
+
+ self.connection.callbacks.process.assert_any_call(
+ 0, self.connection.ON_CONNECTION_CLOSED,
+ self.connection, self.connection,
+ 1, 'error text')
+
+ def test_on_terminate_invokes_access_denied_on_connection_error_and_closed(
+ self):
+ """_on_terminate invokes `ON_CONNECTION_ERROR` with `ProbableAccessDeniedError` and `ON_CONNECTION_CLOSED` callbacks"""
+ with mock.patch.object(self.connection.callbacks, 'process'):
+
+ self.connection._adapter_disconnect = mock.Mock()
+
+ self.connection._set_connection_state(
+ self.connection.CONNECTION_TUNE)
+
+ self.connection._on_terminate(1, 'error text')
+
+ self.assertEqual(self.connection.callbacks.process.call_count, 2)
+
+ self.connection.callbacks.process.assert_any_call(
+ 0, self.connection.ON_CONNECTION_ERROR,
+ self.connection, self.connection,
+ mock.ANY)
+
+ conn_exc = self.connection.callbacks.process.call_args_list[0][0][4]
+ self.assertIs(type(conn_exc), exceptions.ProbableAccessDeniedError)
+ self.assertSequenceEqual(conn_exc.args, [1, 'error text'])
+
+ self.connection.callbacks.process.assert_any_call(
+ 0, self.connection.ON_CONNECTION_CLOSED,
+ self.connection, self.connection,
+ 1, 'error text')
+
@mock.patch('pika.connection.Connection.connect')
def test_new_conn_should_use_first_channel(self, connect):
"""_next_channel_number in new conn should always be 1"""
@@ -124,12 +250,12 @@ class ConnectionTests(unittest.TestCase):
"""make sure the callback adding works"""
self.connection.callbacks = mock.Mock(spec=self.connection.callbacks)
for test_method, expected_key in (
- (self.connection.add_backpressure_callback,
- self.connection.ON_CONNECTION_BACKPRESSURE),
- (self.connection.add_on_open_callback,
- self.connection.ON_CONNECTION_OPEN),
- (self.connection.add_on_close_callback,
- self.connection.ON_CONNECTION_CLOSED)):
+ (self.connection.add_backpressure_callback,
+ self.connection.ON_CONNECTION_BACKPRESSURE),
+ (self.connection.add_on_open_callback,
+ self.connection.ON_CONNECTION_OPEN),
+ (self.connection.add_on_close_callback,
+ self.connection.ON_CONNECTION_CLOSED)):
self.connection.callbacks.reset_mock()
test_method(callback_method)
self.connection.callbacks.add.assert_called_once_with(
@@ -234,12 +360,13 @@ class ConnectionTests(unittest.TestCase):
}
#Test Type Errors
for bad_field, bad_value in (
- ('host', 15672), ('port', '5672'), ('virtual_host', True),
- ('channel_max', '4'), ('frame_max', '5'), ('credentials', 'bad'),
- ('locale', 1), ('heartbeat_interval', '6'),
- ('socket_timeout', '42'), ('retry_delay', 'two'),
- ('backpressure_detection', 'true'), ('ssl', {'ssl': 'dict'}),
- ('ssl_options', True), ('connection_attempts', 'hello')):
+ ('host', 15672), ('port', '5672'), ('virtual_host', True),
+ ('channel_max', '4'), ('frame_max', '5'),
+ ('credentials', 'bad'), ('locale', 1),
+ ('heartbeat_interval', '6'), ('socket_timeout', '42'),
+ ('retry_delay', 'two'), ('backpressure_detection', 'true'),
+ ('ssl', {'ssl': 'dict'}), ('ssl_options', True),
+ ('connection_attempts', 'hello')):
bkwargs = copy.deepcopy(kwargs)
bkwargs[bad_field] = bad_value
self.assertRaises(TypeError, connection.ConnectionParameters,
@@ -371,20 +498,28 @@ class ConnectionTests(unittest.TestCase):
self.assertEqual(['ab'], list(self.connection.outbound_buffer))
self.assertEqual('hearbeat obj', self.connection.heartbeat)
- def test_on_connection_closed(self):
- """make sure connection close sends correct frames"""
+ def test_on_connection_close(self):
+ """make sure _on_connection_close terminates connection"""
method_frame = mock.Mock()
method_frame.method = mock.Mock(spec=spec.Connection.Close)
method_frame.method.reply_code = 1
method_frame.method.reply_text = 'hello'
- heartbeat = mock.Mock()
- self.connection.heartbeat = heartbeat
- self.connection._adapter_disconnect = mock.Mock()
- self.connection._on_connection_closed(method_frame, from_adapter=False)
+ self.connection._on_terminate = mock.Mock()
+ self.connection._on_connection_close(method_frame)
#Check
- self.assertTupleEqual((1, 'hello'), self.connection.closing)
- heartbeat.stop.assert_called_once_with()
- self.connection._adapter_disconnect.assert_called_once_with()
+ self.connection._on_terminate.assert_called_once_with(1, 'hello')
+
+ def test_on_connection_close_ok(self):
+ """make sure _on_connection_close_ok terminates connection"""
+ method_frame = mock.Mock()
+ method_frame.method = mock.Mock(spec=spec.Connection.CloseOk)
+ self.connection.closing = (1, 'bye')
+ self.connection._on_terminate = mock.Mock()
+
+ self.connection._on_connection_close_ok(method_frame)
+
+ #Check
+ self.connection._on_terminate.assert_called_once_with(1, 'bye')
@mock.patch('pika.frame.decode_frame')
def test_on_data_available(self, decode_frame):
diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py
index 62aa777..4149eef 100644
--- a/tests/unit/heartbeat_tests.py
+++ b/tests/unit/heartbeat_tests.py
@@ -2,6 +2,16 @@
Tests for pika.heartbeat
"""
+
+# Suppress pylint warnings concering access to protected member
+# pylint: disable=W0212
+
+# Suppress pylint messages concering missing docstring
+# pylint: disable=C0111
+
+# Suppress pylint messages concering invalid method name
+# pylint: disable=C0103
+
try:
import mock
except ImportError:
@@ -58,7 +68,7 @@ class HeartbeatTests(unittest.TestCase):
@mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer')
def test_constructor_called_setup_timer(self, timer):
- obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)
+ heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)
timer.assert_called_once_with()
def test_active_true(self):
@@ -135,9 +145,8 @@ class HeartbeatTests(unittest.TestCase):
self.obj._interval)
self.mock_conn.close.assert_called_once_with(
self.obj._CONNECTION_FORCED, reason)
- self.mock_conn._on_disconnect.assert_called_once_with(
+ self.mock_conn._on_terminate.assert_called_once_with(
self.obj._CONNECTION_FORCED, reason)
- self.mock_conn._adapter_disconnect.assert_called_once_with()
def test_has_received_data_false(self):
self.obj._bytes_received = 100
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 11
} | 0.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"coverage",
"codecov",
"mock",
"nose",
"tornado",
"twisted",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libev-dev"
],
"python": "3.5",
"reqs_path": [
"test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
nose==1.3.7
packaging==21.3
-e git+https://github.com/pika/pika.git@8be81a21d8b554ee9af4fae08907956e5b8b138f#egg=pika
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
tomli==1.2.3
tornado==6.1
Twisted==15.3.0
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
zope.interface==5.5.2
| name: pika
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- tornado==6.1
- twisted==15.3.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
- zope-interface==5.5.2
prefix: /opt/conda/envs/pika
| [
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output_server_initiated_no_error_close",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connection_close",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connection_close_ok",
"tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_cleans_up",
"tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_invokes_access_denied_on_connection_error_and_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_invokes_auth_on_connection_error_and_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_invokes_connection_closed_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_invokes_protocol_on_connection_error_and_closed",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close"
]
| [
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_connection_attempts_with_timeout",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_setup_timer_called"
]
| [
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_channel",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_close",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_constructor",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output_server_initiated_error_close",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output_user_initiated_close",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_process_io_for_connection_setup",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_process_io_for_connection_setup_fails_with_open_error",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_sleep",
"tests/unit/connection_tests.py::ConnectionTests::test_add_callbacks",
"tests/unit/connection_tests.py::ConnectionTests::test_add_on_close_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_add_on_open_error_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_bad_type_connection_parameters",
"tests/unit/connection_tests.py::ConnectionTests::test_channel",
"tests/unit/connection_tests.py::ConnectionTests::test_client_properties",
"tests/unit/connection_tests.py::ConnectionTests::test_close_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_close_closes_open_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_close_ignores_closed_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_connect",
"tests/unit/connection_tests.py::ConnectionTests::test_connect_reconnect",
"tests/unit/connection_tests.py::ConnectionTests::test_good_connection_parameters",
"tests/unit/connection_tests.py::ConnectionTests::test_new_conn_should_use_first_channel",
"tests/unit/connection_tests.py::ConnectionTests::test_next_channel_number_returns_lowest_unused",
"tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_no_open_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_non_closing_state",
"tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_open_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_on_close_ready_no_open_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_on_close_ready_open_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connection_start",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connection_tune",
"tests/unit/connection_tests.py::ConnectionTests::test_on_data_available",
"tests/unit/connection_tests.py::ConnectionTests::test_process_url",
"tests/unit/connection_tests.py::ConnectionTests::test_set_backpressure_multiplier",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_false",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_true",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_heartbeat_interval",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_setup_timer",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_max_idle_count",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_received",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_bytes",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_no_bytes",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_missed_bytes",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_not_closed",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_send_heartbeat_frame",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_start_timer",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_update_counters",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_not_active",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent"
]
| []
| BSD 3-Clause "New" or "Revised" License | 366 | [
"pika/adapters/blocking_connection.py",
"pika/heartbeat.py",
"pika/adapters/select_connection.py",
"pika/exceptions.py",
"pika/adapters/tornado_connection.py",
"pika/adapters/twisted_connection.py",
"pika/channel.py",
"pika/connection.py",
"pika/adapters/libev_connection.py",
"pika/adapters/base_connection.py",
"docs/version_history.rst"
]
| [
"pika/adapters/blocking_connection.py",
"pika/heartbeat.py",
"pika/adapters/select_connection.py",
"pika/exceptions.py",
"pika/adapters/tornado_connection.py",
"pika/adapters/twisted_connection.py",
"pika/channel.py",
"pika/connection.py",
"pika/adapters/libev_connection.py",
"pika/adapters/base_connection.py",
"docs/version_history.rst"
]
|
joke2k__faker-318 | 807bf01588fd5dd9f680d69d1c6ddd13c255136f | 2016-01-04 15:33:52 | 883576c2d718ad7f604415e02a898f1f917d5b86 | diff --git a/README.rst b/README.rst
index 4dc04a86..0941dbda 100644
--- a/README.rst
+++ b/README.rst
@@ -263,13 +263,26 @@ How to use with factory-boy
title = factory.LazyAttribute(lambda x: faker.sentence(nb_words=4))
author_name = factory.LazyAttribute(lambda x: faker.name())
+Accessing the `random` instance
+-------------------------------
+
+The ``.random`` property on the generator returns the instance of ``random.Random``
+used to generate the values:
+
+__ code:: python
+
+ from faker import Faker
+ fake = Faker()
+ fake.random
+ fake.random.getstate()
+
Seeding the Generator
---------------------
When using Faker for unit testing, you will often want to generate the same
-data set. The generator offers a ``seed()`` method, which seeds the random
-number generator. Calling the same script twice with the same seed produces the
-same results.
+data set. For convenience, the generator also provide a ``seed()`` method, which
+seeds the random number generator. Calling the same script twice with the same
+seed produces the same results.
.. code:: python
@@ -280,8 +293,20 @@ same results.
print fake.name()
> Margaret Boehm
+The code above is equivalent to the following:
+
+.. code:: python
+
+ from faker import Faker
+ fake = Faker()
+ faker.random.seed(4321)
+
+ print fake.name()
+ > Margaret Boehm
+
Tests
-----
+
Installing dependencies:
.. code:: bash
diff --git a/docs/index.rst b/docs/index.rst
index 601d474d..7e96203c 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -264,13 +264,26 @@ How to use with factory-boy
title = factory.LazyAttribute(lambda x: faker.sentence(nb_words=4))
author_name = factory.LazyAttribute(lambda x: faker.name())
+Accessing the `random` instance
+-------------------------------
+
+The ``.random`` property on the generator returns the instance of ``random.Random``
+used to generate the values:
+
+__ code:: python
+
+ from faker import Faker
+ fake = Faker()
+ fake.random
+ fake.random.getstate()
+
Seeding the Generator
---------------------
When using Faker for unit testing, you will often want to generate the same
-data set. The generator offers a ``seed()`` method, which seeds the random
-number generator. Calling the same script twice with the same seed produces the
-same results.
+data set. For convenience, the generator also provide a ``seed()`` method, which
+seeds the random number generator. Calling the same script twice with the same
+seed produces the same results.
.. code:: python
@@ -281,6 +294,17 @@ same results.
print fake.name()
> Margaret Boehm
+The code above is equivalent to the following:
+
+.. code:: python
+
+ from faker import Faker
+ fake = Faker()
+ faker.random.seed(4321)
+
+ print fake.name()
+ > Margaret Boehm
+
Tests
-----
diff --git a/faker/generator.py b/faker/generator.py
index 95dfac2a..74034cb4 100644
--- a/faker/generator.py
+++ b/faker/generator.py
@@ -50,6 +50,10 @@ class Generator(object):
"""Returns added providers."""
return self.providers
+ @property
+ def random(self):
+ return random
+
def seed(self, seed=None):
"""Calls random.seed"""
random.seed(seed)
| Access to the Generator.random
It would be nice if one could gain access to the Generator.random variable so that one could save/set the state. I realize I can pass in the seed, but one currently has no way of gathering what the seed/state is if using the automatically generated seed. I don't want to use a fixed seed, but I do want to log/print the seed used _if_ the tests fail.
That is, I'd like to be able to do something like: `faker.generator.getstate()` (which gets the random state w/o exposing random) or `faker.generator.random.getstate()` (which gives access to the random variable)
For now, the workaround appears to be to create a Faker object with your own Generator. | joke2k/faker | diff --git a/faker/tests/__init__.py b/faker/tests/__init__.py
index 6502a448..5dc22528 100644
--- a/faker/tests/__init__.py
+++ b/faker/tests/__init__.py
@@ -518,6 +518,12 @@ class GeneratorTestCase(unittest.TestCase):
def setUp(self):
self.generator = Generator()
+ @patch('random.getstate')
+ def test_get_random(self, mock_system_random):
+ random_instance = self.generator.random
+ random_instance.getstate()
+ self.assertFalse(mock_system_random.called)
+
@patch('random.seed')
def test_random_seed_doesnt_seed_system_random(self, mock_system_random):
self.generator.seed(0)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
-e git+https://github.com/joke2k/faker.git@807bf01588fd5dd9f680d69d1c6ddd13c255136f#egg=fake_factory
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
six==1.17.0
tomli==2.2.1
| name: faker
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/faker
| [
"faker/tests/__init__.py::GeneratorTestCase::test_get_random"
]
| []
| [
"faker/tests/__init__.py::ShimsTestCase::test_counter",
"faker/tests/__init__.py::UtilsTestCase::test_add_dicts",
"faker/tests/__init__.py::UtilsTestCase::test_choice_distribution",
"faker/tests/__init__.py::UtilsTestCase::test_find_available_locales",
"faker/tests/__init__.py::UtilsTestCase::test_find_available_providers",
"faker/tests/__init__.py::FactoryTestCase::test_add_provider_gives_priority_to_newly_added_provider",
"faker/tests/__init__.py::FactoryTestCase::test_command",
"faker/tests/__init__.py::FactoryTestCase::test_command_custom_provider",
"faker/tests/__init__.py::FactoryTestCase::test_date_time_between_dates",
"faker/tests/__init__.py::FactoryTestCase::test_date_time_between_dates_with_tzinfo",
"faker/tests/__init__.py::FactoryTestCase::test_date_time_this_period",
"faker/tests/__init__.py::FactoryTestCase::test_date_time_this_period_with_tzinfo",
"faker/tests/__init__.py::FactoryTestCase::test_datetime_safe",
"faker/tests/__init__.py::FactoryTestCase::test_datetimes_with_and_without_tzinfo",
"faker/tests/__init__.py::FactoryTestCase::test_documentor",
"faker/tests/__init__.py::FactoryTestCase::test_format_calls_formatter_on_provider",
"faker/tests/__init__.py::FactoryTestCase::test_format_transfers_arguments_to_formatter",
"faker/tests/__init__.py::FactoryTestCase::test_get_formatter_returns_callable",
"faker/tests/__init__.py::FactoryTestCase::test_get_formatter_returns_correct_formatter",
"faker/tests/__init__.py::FactoryTestCase::test_get_formatter_throws_exception_on_incorrect_formatter",
"faker/tests/__init__.py::FactoryTestCase::test_magic_call_calls_format",
"faker/tests/__init__.py::FactoryTestCase::test_magic_call_calls_format_with_arguments",
"faker/tests/__init__.py::FactoryTestCase::test_no_words_paragraph",
"faker/tests/__init__.py::FactoryTestCase::test_no_words_sentence",
"faker/tests/__init__.py::FactoryTestCase::test_parse_returns_same_string_when_it_contains_no_curly_braces",
"faker/tests/__init__.py::FactoryTestCase::test_parse_returns_string_with_tokens_replaced_by_formatters",
"faker/tests/__init__.py::FactoryTestCase::test_password",
"faker/tests/__init__.py::FactoryTestCase::test_prefix_suffix_always_string",
"faker/tests/__init__.py::FactoryTestCase::test_random_element",
"faker/tests/__init__.py::FactoryTestCase::test_slugify",
"faker/tests/__init__.py::FactoryTestCase::test_timezone_conversion",
"faker/tests/__init__.py::FactoryTestCase::test_us_ssn_valid",
"faker/tests/__init__.py::GeneratorTestCase::test_random_seed_doesnt_seed_system_random"
]
| []
| MIT License | 367 | [
"README.rst",
"docs/index.rst",
"faker/generator.py"
]
| [
"README.rst",
"docs/index.rst",
"faker/generator.py"
]
|
|
ntoll__uflash-9 | 28bc481b67d67cc20aacc1191c87ac1e4c59bb34 | 2016-01-04 21:42:12 | 28bc481b67d67cc20aacc1191c87ac1e4c59bb34 | ntoll: I'll try to get this reviewed and merged this evening.
funkyHat: I've pushed another branch which fixes the test coverage (although one of the new tests is a bit of a beast...)
Also removed `uflash help` as mentioned above: https://github.com/funkyHat/uflash/tree/unhexlify-plus
ntoll: Can you merge the new branch into this one and I can do a final review..?
funkyHat: Done. Want me to squash it into a single commit?
ntoll: Please do... :-) | diff --git a/uflash.py b/uflash.py
index 61fe83d..f5cd8a6 100644
--- a/uflash.py
+++ b/uflash.py
@@ -3,6 +3,7 @@
This module contains functions for turning a Python script into a .hex file
and flashing it onto a BBC micro:bit.
"""
+import argparse
import sys
import os
import struct
@@ -19,8 +20,6 @@ _SCRIPT_ADDR = 0x3e000
_HELP_TEXT = """
Flash Python onto the BBC micro:bit
-Usage: uflash [path_to_script.py] [path_to_microbit]
-
If no path to the micro:bit is provided uflash will attempt to autodetect the
correct path to the device. If no path to the Python script is provided uflash
will flash the unmodified MicroPython firmware onto the device.
@@ -72,6 +71,23 @@ def hexlify(script):
return '\n'.join(output)
+def unhexlify(blob):
+ """
+ Takes a hexlified script and turns it back into Python code.
+ """
+ lines = blob.split('\n')[1:]
+ output = []
+ for line in lines:
+ # Discard the address, length etc. and reverse the hexlification
+ output.append(binascii.unhexlify(line[9:-2]))
+ # Strip off "MP<size>" from the start
+ output[0] = output[0][4:]
+ # and strip any null bytes from the end
+ output[-1] = output[-1].strip(b'\x00')
+ script = b''.join(output)
+ return script
+
+
def embed_hex(runtime_hex, python_hex=None):
"""
Given a string representing the MicroPython runtime hex, will embed a
@@ -98,6 +114,28 @@ def embed_hex(runtime_hex, python_hex=None):
return '\n'.join(embedded_list) + '\n'
+def extract_script(embedded_hex):
+ """
+ Given a hex file containing the MicroPython runtime and an embedded Python
+ script, will extract the original script.
+
+ Returns a string containing the original embedded script.
+ """
+ hex_lines = embedded_hex.split('\n')
+ # Find the marker in the hex that comes just before the script
+ try:
+ start_line = hex_lines.index(':08058000193901005D150000AE') + 1
+ except ValueError as e:
+ raise ValueError('Bad input hex file:', e)
+ # Recombine the lines after that, but leave out the last 3 lines
+ blob = '\n'.join(hex_lines[start_line:-3])
+ if blob == '':
+ # If the result is the empty string, there was no embedded script
+ return b''
+ # Pass the extracted hex through unhexlify
+ return unhexlify(blob)
+
+
def find_microbit():
"""
Returns a path on the filesystem that represents the plugged in BBC
@@ -179,6 +217,8 @@ def flash(path_to_python=None, path_to_microbit=None):
# Grab the Python script (if needed).
python_hex = ''
if path_to_python:
+ if not path_to_python.endswith('.py'):
+ raise ValueError('Python files must end in ".py".')
with open(path_to_python, 'rb') as python_script:
python_hex = hexlify(python_script.read())
# Generate the resulting hex file.
@@ -195,6 +235,20 @@ def flash(path_to_python=None, path_to_microbit=None):
raise IOError('Unable to find micro:bit. Is it plugged in?')
+def extract(path_to_hex=None, output_path=None):
+ """
+ Given a hex file this function will attempt to extract the embedded script
+ from it and save it either to output_path or stdout
+ """
+ with open(path_to_hex, 'r') as hex_file:
+ python_script = extract_script(hex_file.read())
+ if output_path is not None:
+ with open(output_path, 'w') as output_file:
+ output_file.write(python_script)
+ else:
+ print(python_script.decode('utf-8'))
+
+
def main(argv=None):
"""
Entry point for the command line tool 'uflash'.
@@ -210,20 +264,21 @@ def main(argv=None):
"""
if not argv:
argv = sys.argv[1:]
- arg_len = len(argv)
try:
- if arg_len == 0:
- flash()
- elif arg_len >= 1:
- if argv[0] == 'help':
- print(_HELP_TEXT)
- return
- if not argv[0].lower().endswith('.py'):
- raise ValueError('Python files must end in ".py".')
- if arg_len == 1:
- flash(argv[0])
- elif arg_len > 1:
- flash(argv[0], argv[1])
+ parser = argparse.ArgumentParser(description=_HELP_TEXT)
+ parser.add_argument('source', nargs='?', default=None)
+ parser.add_argument('target', nargs='?', default=None)
+ parser.add_argument('-e', '--extract',
+ action='store_true',
+ help="""Extract python source from a hex file
+ instead of creating the hex file""",
+ )
+ args = parser.parse_args(argv)
+
+ if args.extract:
+ extract(args.source, args.target)
+ else:
+ flash(args.source, args.target)
except Exception as ex:
# The exception of no return. Print the exception information.
print(ex)
| Add ability to extract Python code from a .hex file.
Because sometimes, you don't save the source file... ;-) | ntoll/uflash | diff --git a/tests/test_uflash.py b/tests/test_uflash.py
index 5434084..eb0c87c 100644
--- a/tests/test_uflash.py
+++ b/tests/test_uflash.py
@@ -39,6 +39,15 @@ def test_hexlify():
assert len(lines) == 5
+def test_unhexlify():
+ """
+ Ensure that we can get the script back out using unhexlify
+ """
+ hexlified = uflash.hexlify(TEST_SCRIPT)
+ unhexlified = uflash.unhexlify(hexlified)
+ assert unhexlified == TEST_SCRIPT
+
+
def test_hexlify_empty_script():
"""
The function returns an empty string if the script is empty.
@@ -84,6 +93,32 @@ def test_embed_no_runtime():
assert ex.value.args[0] == 'MicroPython runtime hex required.'
+def test_extract():
+ """
+ The script should be returned if there is one
+ """
+ python = uflash.hexlify(TEST_SCRIPT)
+ result = uflash.embed_hex(uflash._RUNTIME, python)
+ extracted = uflash.extract_script(result)
+ assert extracted == TEST_SCRIPT
+
+
+def test_extract_not_valid_hex():
+ """
+ Return a sensible message if the hex file isn't valid
+ """
+ with pytest.raises(ValueError) as e:
+ uflash.extract_script('invalid input')
+ assert 'Bad input hex file' in e.value.args[0]
+
+
+def test_extract_no_python():
+ """
+ What to do here?
+ """
+ assert uflash.extract_script(uflash._RUNTIME) == b''
+
+
def test_find_microbit_posix_exists():
"""
Simulate being on os.name == 'posix' and a call to "mount" returns a
@@ -278,8 +313,7 @@ def test_main_no_args():
with mock.patch('sys.argv', ['uflash', ]):
with mock.patch('uflash.flash') as mock_flash:
uflash.main()
- assert mock_flash.call_count == 1
- assert mock_flash.call_args == ()
+ assert mock_flash.called_once_with(None, None)
def test_main_first_arg_python():
@@ -322,11 +356,55 @@ def test_main_two_args():
assert mock_flash.called_once_with('foo.py', '/media/foo/bar')
-def test_main_extra_args_ignored():
+def test_extract_command():
"""
- Any arguments more than two are ignored, with only the first two passed
- into the flash() function.
+ Test the command-line script extract feature
"""
- with mock.patch('uflash.flash') as mock_flash:
- uflash.main(argv=['foo.py', '/media/foo/bar', 'baz', 'quux'])
- assert mock_flash.called_once_with('foo.py', '/media/foo/bar')
+ with mock.patch('uflash.extract') as mock_extract:
+ uflash.main(argv=['-e', 'hex.hex', 'foo.py'])
+ assert mock_extract.called_once_with('hex.hex', 'foo.py')
+
+
+def test_extract_paths():
+ """
+ Test the different paths of the extract() function.
+ It should open and extract the contents of the file (input arg)
+ When called with only an input it should print the output of extract_script
+ When called with two arguments it should write the output to the output arg
+ """
+ mock_e = mock.MagicMock(return_value=mock.sentinel.script)
+ mock_o = mock.MagicMock()
+ mock_o.return_value.__enter__ = lambda s: s
+ mock_o.return_value.__exit__ = mock.Mock()
+ mock_o.return_value.read.return_value = 'script'
+ mock_o.return_value.write = mock.Mock()
+
+ with mock.patch('uflash.extract_script', mock_e) as mock_extract_script, \
+ mock.patch('builtins.print') as mock_print, \
+ mock.patch('builtins.open', mock_o) as mock_open:
+ uflash.extract('foo.hex')
+ assert mock_open.called_once_with('foo.hex')
+ assert mock_extract_script.called_once_with(mock.sentinel.file_handle)
+ assert mock_print.called_once_with(mock.sentinel.script)
+
+ uflash.extract('foo.hex', 'out.py')
+ assert mock_open.call_count == 3
+ assert mock_open.called_with('out.py', 'w')
+ assert mock_open.return_value.write.call_count == 1
+
+
+def test_extract_command_source_only():
+ """
+ If there is no target file the extract command should write to stdout
+ """
+ with mock.patch('uflash.extract') as mock_extract:
+ uflash.main(argv=['hex.hex'])
+ assert mock_extract.called_once_with('hex.hex')
+
+
+def test_extract_command_no_source():
+ """
+ If there is no source file the extract command should complain
+ """
+ with pytest.raises(TypeError):
+ uflash.extract(None, None)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
babel==2.17.0
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
docutils==0.21.2
exceptiongroup==1.2.2
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
packaging==24.2
pep8==1.7.1
pluggy==1.5.0
pyflakes==3.3.1
Pygments==2.19.1
pytest==8.3.5
pytest-cov==6.0.0
requests==2.32.3
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
tomli==2.2.1
-e git+https://github.com/ntoll/uflash.git@28bc481b67d67cc20aacc1191c87ac1e4c59bb34#egg=uflash
urllib3==2.3.0
zipp==3.21.0
| name: uflash
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- babel==2.17.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- docutils==0.21.2
- exceptiongroup==1.2.2
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- packaging==24.2
- pep8==1.7.1
- pluggy==1.5.0
- pyflakes==3.3.1
- pygments==2.19.1
- pytest==8.3.5
- pytest-cov==6.0.0
- requests==2.32.3
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/uflash
| [
"tests/test_uflash.py::test_unhexlify",
"tests/test_uflash.py::test_extract",
"tests/test_uflash.py::test_extract_not_valid_hex",
"tests/test_uflash.py::test_extract_no_python",
"tests/test_uflash.py::test_extract_command",
"tests/test_uflash.py::test_extract_paths",
"tests/test_uflash.py::test_extract_command_source_only",
"tests/test_uflash.py::test_extract_command_no_source"
]
| []
| [
"tests/test_uflash.py::test_get_version",
"tests/test_uflash.py::test_hexlify",
"tests/test_uflash.py::test_hexlify_empty_script",
"tests/test_uflash.py::test_embed_hex",
"tests/test_uflash.py::test_embed_no_python",
"tests/test_uflash.py::test_embed_no_runtime",
"tests/test_uflash.py::test_find_microbit_posix_exists",
"tests/test_uflash.py::test_find_microbit_posix_missing",
"tests/test_uflash.py::test_find_microbit_nt_exists",
"tests/test_uflash.py::test_find_microbit_nt_missing",
"tests/test_uflash.py::test_find_microbit_unknown_os",
"tests/test_uflash.py::test_save_hex",
"tests/test_uflash.py::test_save_hex_no_hex",
"tests/test_uflash.py::test_save_hex_path_not_to_hex_file",
"tests/test_uflash.py::test_flash_no_args",
"tests/test_uflash.py::test_flash_has_python_no_path_to_microbit",
"tests/test_uflash.py::test_flash_with_paths",
"tests/test_uflash.py::test_flash_cannot_find_microbit",
"tests/test_uflash.py::test_flash_wrong_python",
"tests/test_uflash.py::test_main_no_args",
"tests/test_uflash.py::test_main_first_arg_python",
"tests/test_uflash.py::test_main_first_arg_help",
"tests/test_uflash.py::test_main_first_arg_not_python",
"tests/test_uflash.py::test_main_two_args"
]
| []
| MIT License | 368 | [
"uflash.py"
]
| [
"uflash.py"
]
|
networkx__networkx-1908 | e0479d2e090ec301de9612330585e9bc8d1f967c | 2016-01-05 18:21:56 | e0479d2e090ec301de9612330585e9bc8d1f967c | diff --git a/networkx/algorithms/matching.py b/networkx/algorithms/matching.py
index afc505e5f..6bb064dee 100644
--- a/networkx/algorithms/matching.py
+++ b/networkx/algorithms/matching.py
@@ -46,7 +46,7 @@ def maximal_matching(G):
for u,v in G.edges():
# If the edge isn't covered, add it to the matching
# then remove neighborhood of u and v from consideration.
- if u not in nodes and v not in nodes:
+ if u not in nodes and v not in nodes and u!=v:
matching.add((u,v))
nodes.add(u)
nodes.add(v)
| maximal_matching and self loops
`maximal_matching` does allow self-loops
```py
>>> G = nx.Graph([[1,1]])
>>> nx.matching.maximal_matching(G)
{(1, 1)}
```
whereas `max_weight_matching` does not
```py
>>> nx.matching.max_weight_matching(G)
{}
```
Is this expected behaviour? If not, a simple `u != v` check should fix it. | networkx/networkx | diff --git a/networkx/algorithms/tests/test_matching.py b/networkx/algorithms/tests/test_matching.py
index 05fa8c1b1..ac86da9bc 100644
--- a/networkx/algorithms/tests/test_matching.py
+++ b/networkx/algorithms/tests/test_matching.py
@@ -247,6 +247,20 @@ def test_maximal_matching():
vset = set(u for u, v in matching)
vset = vset | set(v for u, v in matching)
+ for edge in graph.edges():
+ u, v = edge
+ ok_(len(set([v]) & vset) > 0 or len(set([u]) & vset) > 0, \
+ "not a proper matching!")
+ graph = nx.Graph()
+ graph.add_edge(1, 1)
+ graph.add_edge(1, 2)
+ graph.add_edge(2, 2)
+ graph.add_edge(2, 3)
+ matching = nx.maximal_matching(graph)
+ assert(len(matching)==1)
+ vset = set(u for u, v in matching)
+ vset = vset | set(v for u, v in matching)
+
for edge in graph.edges():
u, v = edge
ok_(len(set([v]) & vset) > 0 or len(set([u]) & vset) > 0, \
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 1.111 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
decorator==5.1.1
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@e0479d2e090ec301de9612330585e9bc8d1f967c#egg=networkx
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- decorator==5.1.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/algorithms/tests/test_matching.py::test_maximal_matching"
]
| []
| [
"networkx/algorithms/tests/test_matching.py::TestMatching::test_trivial1",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_trivial2",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_trivial3",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_trivial4",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_trivial5",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_trivial6",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_floating_point_weights",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_negative_weights",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_s_blossom",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_s_t_blossom",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_nested_s_blossom",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_nested_s_blossom_relabel",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_nested_s_blossom_expand",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_s_blossom_relabel_expand",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_nested_s_blossom_relabel_expand",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_nasty_blossom1",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_nasty_blossom2",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_nasty_blossom_least_slack",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_nasty_blossom_augmenting",
"networkx/algorithms/tests/test_matching.py::TestMatching::test_nasty_blossom_expand_recursively",
"networkx/algorithms/tests/test_matching.py::test_maximal_matching_ordering"
]
| []
| BSD 3-Clause | 369 | [
"networkx/algorithms/matching.py"
]
| [
"networkx/algorithms/matching.py"
]
|
|
geowurster__tinymr-13 | a387cf72cfc2a18978b77058e1e28f532258ae49 | 2016-01-07 05:12:23 | a387cf72cfc2a18978b77058e1e28f532258ae49 | diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..9c9f9fb
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,2 @@
+[run]
+omit: tinymr/_backport_heapq.py
diff --git a/README.rst b/README.rst
index 853c011..d653294 100644
--- a/README.rst
+++ b/README.rst
@@ -105,7 +105,7 @@ implementation with parallelized map and reduce phases will be added.
yield key, sum(values)
- def final_reducer(self, pairs):
+ def output(self, pairs):
"""
Normally this phase is where the final dataset is written to disk,
@@ -186,52 +186,24 @@ that appear below match the ``word`` only because a ``sort`` key was not given.
Words that appear in the input text on multiple lines have multiple
``(word, count)`` pairs. A ``count`` of ``2`` would indicate a word that
appeared twice on a single line, but our input data does not have this
-condition.
+condition. Truncated output below. The dictionary values are lists containing
+tuples to allow for a sort key, which is explained elsewhere.
.. code-block:: python
{
- 'use': [('use', 1)],
- 'new': [('new', 1)],
- 'above': [('above', 1)],
- 'redistributions': [('redistributions', 1)],
- 'source': [('source', 1), ('source', 1)],
- 'without': [('without', 1)],
- 'notice': [('notice', 1)],
- 'redistribution': [('redistribution', 1)],
- 'bsd': [('bsd', 1)],
- 'that': [('that', 1)],
- 'permitted': [('permitted', 1)],
- 'forms': [('forms', 1)],
- 'rights': [('rights', 1)],
- 'must': [('must', 1)],
- 'list': [('list', 1)],
- 'are': [('are', 1), ('are', 1)],
- 'with': [('with', 1)],
- 'd': [('d', 1)],
- 'license': [('license', 1)],
- 'binary': [('binary', 1)],
- 'reserved': [('reserved', 1)],
- 'or': [('or', 1)],
- 'the': [('the', 1), ('the', 1), ('the', 1)],
- 'and': [('and', 1), ('and', 1), ('and', 1)],
- 'all': [('all', 1)],
- 'met': [('met', 1)],
- 'this': [('this', 1)],
- 'provided': [('provided', 1)],
- 'of': [('of', 1), ('of', 1)],
- 'c': [('c', 1)],
- 'wurster': [('wurster', 1)],
- 'code': [('code', 1)],
- 'disclaimer': [('disclaimer', 1)],
- 'modification': [('modification', 1)],
- 'copyright': [('copyright', 1), ('copyright', 1)],
- 'retain': [('retain', 1)], 'kevin': [('kevin', 1)],
- 'conditions': [('conditions', 1), ('conditions', 1)],
- 'following': [('following', 1), ('following', 1)],
- 'in': [('in', 1)], '2015': [('2015', 1)]
+ '2015': [(1,)]
+ 'above': [(1,)]
+ 'all': [(1,)]
+ 'and': [(1,), (1,), (1,)]
+ 'are': [(1,), (1,)]
+ 'binary': [(1,)]
+ 'bsd': [(1,)]
+ 'c': [(1,)]
+ 'code': [(1,)]
}
+
**Reduce**
Sum ``count`` for each ``word``.
@@ -241,62 +213,31 @@ Sum ``count`` for each ``word``.
# The ``reducer()`` receives a key and an iterator of values
key = 'the'
values = (1, 1, 1)
- yield key, sum(values)
+ def reducer(key, values):
+ yield key, sum(values)
**Partition**
The reducer does not _have_ to produces the same key it was given, so the data
is partitioned by key again, which is superfluous for this wordcount example.
Again the keys are kept in case the data is sorted and only match ``word``
-because an optional ``sort`` key was not given.
+because an optional ``sort`` key was not given. Truncated output below.
.. code-block:: python
{
- 'following': [('following', 2)],
- '2015': [('2015', 1)],
- 'reserved': [('reserved', 1)],
- 'permitted': [('permitted', 1)],
- 'forms': [('forms', 1)],
- 'are': [('are', 2)],
- 'license': [('license', 1)],
- 'c': [('c', 1)],
- 'kevin': [('kevin', 1)],
- 'without': [('without', 1)],
- 'redistribution': [('redistribution', 1)],
- 'copyright': [('copyright', 2)],
- 'met': [('met', 1)],
- 'use': [('use', 1)],
- 'the': [('the', 3)],
- 'rights': [('rights', 1)],
- 'that': [('that', 1)],
- 'or': [('or', 1)],
- 'this': [('this', 1)],
- 'with': [('with', 1)],
- 'source': [('source', 2)],
- 'new': [('new', 1)],
- 'binary': [('binary', 1)],
- 'wurster': [('wurster', 1)],
- 'list': [('list', 1)],
- 'must': [('must', 1)],
- 'of': [('of', 2)],
- 'retain': [('retain', 1)],
- 'modification': [('modification', 1)],
- 'and': [('and', 3)],
- 'above': [('above', 1)],
- 'all': [('all', 1)],
- 'redistributions': [('redistributions', 1)],
- 'bsd': [('bsd', 1)],
- 'in': [('in', 1)],
- 'conditions': [('conditions', 2)],
- 'disclaimer': [('disclaimer', 1)],
- 'd': [('d', 1)],
- 'code': [('code', 1)],
- 'provided': [('provided', 1)],
- 'notice': [('notice', 1)]
+ '2015': [(1,)]
+ 'above': [(1,)]
+ 'all': [(1,)]
+ 'and': [(3,)]
+ 'are': [(2,)]
+ 'binary': [(1,)]
+ 'bsd': [(1,)]
+ 'c': [(1,)]
+ 'code': [(1,)]
}
-**Final Reduce**
+**Output**
The default implementation is to return ``(key, iter(values))`` pairs from the
``final_reducer()``, which would look something like:
diff --git a/tinymr/__init__.py b/tinymr/__init__.py
index 5ccf28f..f283135 100644
--- a/tinymr/__init__.py
+++ b/tinymr/__init__.py
@@ -5,6 +5,10 @@ Heavily inspired by Spotify's Luigi framework - github.com/Spotify/Luigi
"""
+import logging
+logging.basicConfig()
+
+
__version__ = '0.1'
__author__ = 'Kevin Wurster'
__email__ = '[email protected]'
diff --git a/tinymr/_backport_heapq.py b/tinymr/_backport_heapq.py
new file mode 100644
index 0000000..a574351
--- /dev/null
+++ b/tinymr/_backport_heapq.py
@@ -0,0 +1,620 @@
+# encoding: utf-8
+
+
+# This module was copied from the cpython source code and maintains its
+# original license. Modifications were limited to the changes required to
+# support Python 2 + 3 and are provided without any guarantee. For example,
+# `yield from generator` calls were replaced with:
+#
+# for value in generator:
+# yield value
+#
+# and `iterator.__next__` were replaced with:
+#
+# getattr(iterator, '__next__', getattr(iterator, 'next'))
+#
+# Python 3's `heapq.merge()` accepts a keyfunc for determining how to sort a
+# given object. Code was copied from:
+# https://github.com/python/cpython/tree/15572204799fb8506645dc1c448135f4e4ffde00
+
+
+"""Heap queue algorithm (a.k.a. priority queue).
+Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
+all k, counting elements from 0. For the sake of comparison,
+non-existing elements are considered to be infinite. The interesting
+property of a heap is that a[0] is always its smallest element.
+Usage:
+heap = [] # creates an empty heap
+heappush(heap, item) # pushes a new item on the heap
+item = heappop(heap) # pops the smallest item from the heap
+item = heap[0] # smallest item on the heap without popping it
+heapify(x) # transforms list into a heap, in-place, in linear time
+item = heapreplace(heap, item) # pops and returns smallest item, and adds
+ # new item; the heap size is unchanged
+Our API differs from textbook heap algorithms as follows:
+- We use 0-based indexing. This makes the relationship between the
+ index for a node and the indexes for its children slightly less
+ obvious, but is more suitable since Python uses 0-based indexing.
+- Our heappop() method returns the smallest item, not the largest.
+These two make it possible to view the heap as a regular Python list
+without surprises: heap[0] is the smallest item, and heap.sort()
+maintains the heap invariant!
+"""
+
+# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
+
+__about__ = """Heap queues
+[explanation by François Pinard]
+Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
+all k, counting elements from 0. For the sake of comparison,
+non-existing elements are considered to be infinite. The interesting
+property of a heap is that a[0] is always its smallest element.
+The strange invariant above is meant to be an efficient memory
+representation for a tournament. The numbers below are `k', not a[k]:
+ 0
+ 1 2
+ 3 4 5 6
+ 7 8 9 10 11 12 13 14
+ 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
+In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
+an usual binary tournament we see in sports, each cell is the winner
+over the two cells it tops, and we can trace the winner down the tree
+to see all opponents s/he had. However, in many computer applications
+of such tournaments, we do not need to trace the history of a winner.
+To be more memory efficient, when a winner is promoted, we try to
+replace it by something else at a lower level, and the rule becomes
+that a cell and the two cells it tops contain three different items,
+but the top cell "wins" over the two topped cells.
+If this heap invariant is protected at all time, index 0 is clearly
+the overall winner. The simplest algorithmic way to remove it and
+find the "next" winner is to move some loser (let's say cell 30 in the
+diagram above) into the 0 position, and then percolate this new 0 down
+the tree, exchanging values, until the invariant is re-established.
+This is clearly logarithmic on the total number of items in the tree.
+By iterating over all items, you get an O(n ln n) sort.
+A nice feature of this sort is that you can efficiently insert new
+items while the sort is going on, provided that the inserted items are
+not "better" than the last 0'th element you extracted. This is
+especially useful in simulation contexts, where the tree holds all
+incoming events, and the "win" condition means the smallest scheduled
+time. When an event schedule other events for execution, they are
+scheduled into the future, so they can easily go into the heap. So, a
+heap is a good structure for implementing schedulers (this is what I
+used for my MIDI sequencer :-).
+Various structures for implementing schedulers have been extensively
+studied, and heaps are good for this, as they are reasonably speedy,
+the speed is almost constant, and the worst case is not much different
+than the average case. However, there are other representations which
+are more efficient overall, yet the worst cases might be terrible.
+Heaps are also very useful in big disk sorts. You most probably all
+know that a big sort implies producing "runs" (which are pre-sorted
+sequences, which size is usually related to the amount of CPU memory),
+followed by a merging passes for these runs, which merging is often
+very cleverly organised[1]. It is very important that the initial
+sort produces the longest runs possible. Tournaments are a good way
+to that. If, using all the memory available to hold a tournament, you
+replace and percolate items that happen to fit the current run, you'll
+produce runs which are twice the size of the memory for random input,
+and much better for input fuzzily ordered.
+Moreover, if you output the 0'th item on disk and get an input which
+may not fit in the current tournament (because the value "wins" over
+the last output value), it cannot fit in the heap, so the size of the
+heap decreases. The freed memory could be cleverly reused immediately
+for progressively building a second heap, which grows at exactly the
+same rate the first heap is melting. When the first heap completely
+vanishes, you switch heaps and start a new run. Clever and quite
+effective!
+In a word, heaps are useful memory structures to know. I use them in
+a few applications, and I think it is good to keep a `heap' module
+around. :-)
+--------------------
+[1] The disk balancing algorithms which are current, nowadays, are
+more annoying than clever, and this is a consequence of the seeking
+capabilities of the disks. On devices which cannot seek, like big
+tape drives, the story was quite different, and one had to be very
+clever to ensure (far in advance) that each tape movement will be the
+most effective possible (that is, will best participate at
+"progressing" the merge). Some tapes were even able to read
+backwards, and this was also used to avoid the rewinding time.
+Believe me, real good tape sorts were quite spectacular to watch!
+From all times, sorting has always been a Great Art! :-)
+"""
+
+
+import functools
+
+
+__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
+ 'nlargest', 'nsmallest', 'heappushpop']
+
+
+builtin_next = next
+
+
+def heappush(heap, item):
+ """Push item onto heap, maintaining the heap invariant."""
+ heap.append(item)
+ _siftdown(heap, 0, len(heap)-1)
+
+
+def heappop(heap):
+ """Pop the smallest item off the heap, maintaining the heap invariant."""
+ lastelt = heap.pop() # raises appropriate IndexError if heap is empty
+ if heap:
+ returnitem = heap[0]
+ heap[0] = lastelt
+ _siftup(heap, 0)
+ return returnitem
+ return lastelt
+
+
+def heapreplace(heap, item):
+ """Pop and return the current smallest value, and add the new item.
+ This is more efficient than heappop() followed by heappush(), and can be
+ more appropriate when using a fixed-size heap. Note that the value
+ returned may be larger than item! That constrains reasonable uses of
+ this routine unless written as part of a conditional replacement:
+ if item > heap[0]:
+ item = heapreplace(heap, item)
+ """
+ returnitem = heap[0] # raises appropriate IndexError if heap is empty
+ heap[0] = item
+ _siftup(heap, 0)
+ return returnitem
+
+
+def heappushpop(heap, item):
+ """Fast version of a heappush followed by a heappop."""
+ if heap and heap[0] < item:
+ item, heap[0] = heap[0], item
+ _siftup(heap, 0)
+ return item
+
+
+def heapify(x):
+ """Transform list into a heap, in-place, in O(len(x)) time."""
+ n = len(x)
+ # Transform bottom-up. The largest index there's any point to looking at
+ # is the largest with a child index in-range, so must have 2*i + 1 < n,
+ # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
+ # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
+ # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
+ for i in reversed(range(n//2)):
+ _siftup(x, i)
+
+
+def _heappop_max(heap):
+ """Maxheap version of a heappop."""
+ lastelt = heap.pop() # raises appropriate IndexError if heap is empty
+ if heap:
+ returnitem = heap[0]
+ heap[0] = lastelt
+ _siftup_max(heap, 0)
+ return returnitem
+ return lastelt
+
+
+def _heapreplace_max(heap, item):
+ """Maxheap version of a heappop followed by a heappush."""
+ returnitem = heap[0] # raises appropriate IndexError if heap is empty
+ heap[0] = item
+ _siftup_max(heap, 0)
+ return returnitem
+
+
+def _heapify_max(x):
+ """Transform list into a maxheap, in-place, in O(len(x)) time."""
+ n = len(x)
+ for i in reversed(range(n//2)):
+ _siftup_max(x, i)
+
+
+# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
+# is the index of a leaf with a possibly out-of-order value. Restore the
+# heap invariant.
+def _siftdown(heap, startpos, pos):
+ newitem = heap[pos]
+ # Follow the path to the root, moving parents down until finding a place
+ # newitem fits.
+ while pos > startpos:
+ parentpos = (pos - 1) >> 1
+ parent = heap[parentpos]
+ if newitem < parent:
+ heap[pos] = parent
+ pos = parentpos
+ continue
+ break
+ heap[pos] = newitem
+
+
+# The child indices of heap index pos are already heaps, and we want to make
+# a heap at index pos too. We do this by bubbling the smaller child of
+# pos up (and so on with that child's children, etc) until hitting a leaf,
+# then using _siftdown to move the oddball originally at index pos into place.
+#
+# We *could* break out of the loop as soon as we find a pos where newitem <=
+# both its children, but turns out that's not a good idea, and despite that
+# many books write the algorithm that way. During a heap pop, the last array
+# element is sifted in, and that tends to be large, so that comparing it
+# against values starting from the root usually doesn't pay (= usually doesn't
+# get us out of the loop early). See Knuth, Volume 3, where this is
+# explained and quantified in an exercise.
+#
+# Cutting the # of comparisons is important, since these routines have no
+# way to extract "the priority" from an array element, so that intelligence
+# is likely to be hiding in custom comparison methods, or in array elements
+# storing (priority, record) tuples. Comparisons are thus potentially
+# expensive.
+#
+# On random arrays of length 1000, making this change cut the number of
+# comparisons made by heapify() a little, and those made by exhaustive
+# heappop() a lot, in accord with theory. Here are typical results from 3
+# runs (3 just to demonstrate how small the variance is):
+#
+# Compares needed by heapify Compares needed by 1000 heappops
+# -------------------------- --------------------------------
+# 1837 cut to 1663 14996 cut to 8680
+# 1855 cut to 1659 14966 cut to 8678
+# 1847 cut to 1660 15024 cut to 8703
+#
+# Building the heap by using heappush() 1000 times instead required
+# 2198, 2148, and 2219 compares: heapify() is more efficient, when
+# you can use it.
+#
+# The total compares needed by list.sort() on the same lists were 8627,
+# 8627, and 8632 (this should be compared to the sum of heapify() and
+# heappop() compares): list.sort() is (unsurprisingly!) more efficient
+# for sorting.
+
+
+def _siftup(heap, pos):
+ endpos = len(heap)
+ startpos = pos
+ newitem = heap[pos]
+ # Bubble up the smaller child until hitting a leaf.
+ childpos = 2*pos + 1 # leftmost child position
+ while childpos < endpos:
+ # Set childpos to index of smaller child.
+ rightpos = childpos + 1
+ if rightpos < endpos and not heap[childpos] < heap[rightpos]:
+ childpos = rightpos
+ # Move the smaller child up.
+ heap[pos] = heap[childpos]
+ pos = childpos
+ childpos = 2*pos + 1
+ # The leaf at pos is empty now. Put newitem there, and bubble it up
+ # to its final resting place (by sifting its parents down).
+ heap[pos] = newitem
+ _siftdown(heap, startpos, pos)
+
+
+def _siftdown_max(heap, startpos, pos):
+ 'Maxheap variant of _siftdown'
+ newitem = heap[pos]
+ # Follow the path to the root, moving parents down until finding a place
+ # newitem fits.
+ while pos > startpos:
+ parentpos = (pos - 1) >> 1
+ parent = heap[parentpos]
+ if parent < newitem:
+ heap[pos] = parent
+ pos = parentpos
+ continue
+ break
+ heap[pos] = newitem
+
+
+def _siftup_max(heap, pos):
+ 'Maxheap variant of _siftup'
+ endpos = len(heap)
+ startpos = pos
+ newitem = heap[pos]
+ # Bubble up the larger child until hitting a leaf.
+ childpos = 2*pos + 1 # leftmost child position
+ while childpos < endpos:
+ # Set childpos to index of larger child.
+ rightpos = childpos + 1
+ if rightpos < endpos and not heap[rightpos] < heap[childpos]:
+ childpos = rightpos
+ # Move the larger child up.
+ heap[pos] = heap[childpos]
+ pos = childpos
+ childpos = 2*pos + 1
+ # The leaf at pos is empty now. Put newitem there, and bubble it up
+ # to its final resting place (by sifting its parents down).
+ heap[pos] = newitem
+ _siftdown_max(heap, startpos, pos)
+
+
+def merge(*iterables, **kwargs):
+ '''Merge multiple sorted inputs into a single sorted output.
+ Similar to sorted(itertools.chain(*iterables)) but returns a generator,
+ does not pull the data into memory all at once, and assumes that each of
+ the input streams is already sorted (smallest to largest).
+ >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
+ [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
+ If *key* is not None, applies a key function to each element to determine
+ its sort order.
+ >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len))
+ ['dog', 'cat', 'fish', 'horse', 'kangaroo']
+ '''
+
+ key = kwargs.pop('key', None)
+ reverse = kwargs.pop('reverse', False)
+
+ assert not kwargs, "Unrecognized kwargs: {}".format(kwargs)
+
+ h = []
+ h_append = h.append
+
+ if reverse:
+ _heapify = _heapify_max
+ _heappop = _heappop_max
+ _heapreplace = _heapreplace_max
+ direction = -1
+ else:
+ _heapify = heapify
+ _heappop = heappop
+ _heapreplace = heapreplace
+ direction = 1
+
+ if key is None:
+ for order, it in enumerate(map(iter, iterables)):
+ try:
+ next = functools.partial(builtin_next, it)
+ # next = getattr(it, '__next__', getattr(it, 'next'))
+ h_append([next(), order * direction, next])
+ except StopIteration:
+ pass
+ _heapify(h)
+ while len(h) > 1:
+ try:
+ while True:
+ value, order, next = s = h[0]
+ yield value
+ s[0] = next() # raises StopIteration when exhausted
+ _heapreplace(h, s) # restore heap condition
+ except StopIteration:
+ _heappop(h) # remove empty iterator
+ if h:
+ # fast case when only a single iterator remains
+ value, order, next = h[0]
+ yield value
+ while True:
+ yield next()
+ return
+
+ for order, it in enumerate(map(iter, iterables)):
+ try:
+ next = functools.partial(builtin_next, it)
+ # next = getattr(it, '__next__', getattr(it, 'next'))
+ value = next()
+ h_append([key(value), order * direction, value, next])
+ except StopIteration:
+ pass
+ _heapify(h)
+ while len(h) > 1:
+ try:
+ while True:
+ key_value, order, value, next = s = h[0]
+ yield value
+ value = next()
+ s[0] = key(value)
+ s[2] = value
+ _heapreplace(h, s)
+ except StopIteration:
+ _heappop(h)
+ if h:
+ key_value, order, value, next = h[0]
+ yield value
+ while True:
+ yield next()
+
+
+# Algorithm notes for nlargest() and nsmallest()
+# ==============================================
+#
+# Make a single pass over the data while keeping the k most extreme values
+# in a heap. Memory consumption is limited to keeping k values in a list.
+#
+# Measured performance for random inputs:
+#
+# number of comparisons
+# n inputs k-extreme values (average of 5 trials) % more than min()
+# ------------- ---------------- --------------------- -----------------
+# 1,000 100 3,317 231.7%
+# 10,000 100 14,046 40.5%
+# 100,000 100 105,749 5.7%
+# 1,000,000 100 1,007,751 0.8%
+# 10,000,000 100 10,009,401 0.1%
+#
+# Theoretical number of comparisons for k smallest of n random inputs:
+#
+# Step Comparisons Action
+# ---- -------------------------- ---------------------------
+# 1 1.66 * k heapify the first k-inputs
+# 2 n - k compare remaining elements to top of heap
+# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap
+# 4 k * lg2(k) - (k/2) final sort of the k most extreme values
+#
+# Combining and simplifying for a rough estimate gives:
+#
+# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k))
+#
+# Computing the number of comparisons for step 3:
+# -----------------------------------------------
+# * For the i-th new value from the iterable, the probability of being in the
+# k most extreme values is k/i. For example, the probability of the 101st
+# value seen being in the 100 most extreme values is 100/101.
+# * If the value is a new extreme value, the cost of inserting it into the
+# heap is 1 + log(k, 2).
+# * The probability times the cost gives:
+# (k/i) * (1 + log(k, 2))
+# * Summing across the remaining n-k elements gives:
+# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1))
+# * This reduces to:
+# (H(n) - H(k)) * k * (1 + log(k, 2))
+# * Where H(n) is the n-th harmonic number estimated by:
+# gamma = 0.5772156649
+# H(n) = log(n, e) + gamma + 1 / (2 * n)
+# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence
+# * Substituting the H(n) formula:
+# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2)
+#
+# Worst-case for step 3:
+# ----------------------
+# In the worst case, the input data is reversed sorted so that every new element
+# must be inserted in the heap:
+#
+# comparisons = 1.66 * k + log(k, 2) * (n - k)
+#
+# Alternative Algorithms
+# ----------------------
+# Other algorithms were not used because they:
+# 1) Took much more auxiliary memory,
+# 2) Made multiple passes over the data.
+# 3) Made more comparisons in common cases (small k, large n, semi-random input).
+# See the more detailed comparison of approach at:
+# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest
+
+def nsmallest(n, iterable, key=None):
+ """Find the n smallest elements in a dataset.
+ Equivalent to: sorted(iterable, key=key)[:n]
+ """
+
+ # Short-cut for n==1 is to use min()
+ if n == 1:
+ it = iter(iterable)
+ sentinel = object()
+ if key is None:
+ result = min(it, default=sentinel)
+ else:
+ result = min(it, default=sentinel, key=key)
+ return [] if result is sentinel else [result]
+
+ # When n>=size, it's faster to use sorted()
+ try:
+ size = len(iterable)
+ except (TypeError, AttributeError):
+ pass
+ else:
+ if n >= size:
+ return sorted(iterable, key=key)[:n]
+
+ # When key is none, use simpler decoration
+ if key is None:
+ it = iter(iterable)
+ # put the range(n) first so that zip() doesn't
+ # consume one too many elements from the iterator
+ result = [(elem, i) for i, elem in zip(range(n), it)]
+ if not result:
+ return result
+ _heapify_max(result)
+ top = result[0][0]
+ order = n
+ _heapreplace = _heapreplace_max
+ for elem in it:
+ if elem < top:
+ _heapreplace(result, (elem, order))
+ top = result[0][0]
+ order += 1
+ result.sort()
+ return [r[0] for r in result]
+
+ # General case, slowest method
+ it = iter(iterable)
+ result = [(key(elem), i, elem) for i, elem in zip(range(n), it)]
+ if not result:
+ return result
+ _heapify_max(result)
+ top = result[0][0]
+ order = n
+ _heapreplace = _heapreplace_max
+ for elem in it:
+ k = key(elem)
+ if k < top:
+ _heapreplace(result, (k, order, elem))
+ top = result[0][0]
+ order += 1
+ result.sort()
+ return [r[2] for r in result]
+
+
+def nlargest(n, iterable, key=None):
+ """Find the n largest elements in a dataset.
+ Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
+ """
+
+ # Short-cut for n==1 is to use max()
+ if n == 1:
+ it = iter(iterable)
+ sentinel = object()
+ if key is None:
+ result = max(it, default=sentinel)
+ else:
+ result = max(it, default=sentinel, key=key)
+ return [] if result is sentinel else [result]
+
+ # When n>=size, it's faster to use sorted()
+ try:
+ size = len(iterable)
+ except (TypeError, AttributeError):
+ pass
+ else:
+ if n >= size:
+ return sorted(iterable, key=key, reverse=True)[:n]
+
+ # When key is none, use simpler decoration
+ if key is None:
+ it = iter(iterable)
+ result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]
+ if not result:
+ return result
+ heapify(result)
+ top = result[0][0]
+ order = -n
+ _heapreplace = heapreplace
+ for elem in it:
+ if top < elem:
+ _heapreplace(result, (elem, order))
+ top = result[0][0]
+ order -= 1
+ result.sort(reverse=True)
+ return [r[0] for r in result]
+
+ # General case, slowest method
+ it = iter(iterable)
+ result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]
+ if not result:
+ return result
+ heapify(result)
+ top = result[0][0]
+ order = -n
+ _heapreplace = heapreplace
+ for elem in it:
+ k = key(elem)
+ if top < k:
+ _heapreplace(result, (k, order, elem))
+ top = result[0][0]
+ order -= 1
+ result.sort(reverse=True)
+ return [r[2] for r in result]
+
+
+# If available, use C implementation
+try:
+ from _heapq import *
+except ImportError:
+ pass
+try:
+ from _heapq import _heapreplace_max
+except ImportError:
+ pass
+try:
+ from _heapq import _heapify_max
+except ImportError:
+ pass
+try:
+ from _heapq import _heappop_max
+except ImportError:
+ pass
diff --git a/tinymr/_mrtools.py b/tinymr/_mrtools.py
new file mode 100644
index 0000000..1a200db
--- /dev/null
+++ b/tinymr/_mrtools.py
@@ -0,0 +1,100 @@
+"""
+Helpers for MapReduce implementations.
+"""
+
+
+from collections import namedtuple
+
+import six
+
+from tinymr.tools import sorter
+
+
+def strip_sort_key(kv_stream):
+
+ """
+ Given a stream of `(key, [(sort, data), (sort, data)])` with sort key
+ intact, remove the key from the values.
+
+ Example:
+
+ [
+ ('key1', [(10, data1), (3, data25)]),
+ ('key2', [(200, data100), (250, data67))
+ ]
+
+ Produces:
+
+ [
+ ('key1', [data1, data25]),
+ ('key2', [data100, data67)
+ ]
+
+ Parameters
+ ----------
+ kv_stream : dict or iter
+ Dictionary like `{key: [(sort, data)]}` or a stream of tuples like
+ `(key, [(sort, data])`.
+
+ Yields
+ ------
+ tuple
+ `(key, [data, data, ...])`
+ """
+
+ kv_stream = six.iteritems(kv_stream) if isinstance(kv_stream, dict) else kv_stream
+ return ((k, tuple(i[-1] for i in v)) for k, v in kv_stream)
+
+
+def sort_partitioned_values(kv_stream):
+
+ """
+ Given a stream of `(key, [(sort, data), (sort, data)])` sort the values
+ for every key.
+
+ Example:
+
+ [
+ ('key1', [(10, data), (3, data)]),
+ ('key2', [(200, data), (250, data))
+ ]
+
+ Produces:
+
+ [
+ ('key1', [(3, data), (10, data)]),
+ ('key2', [(200, data), (250, data))
+ ]
+
+ Parameters
+ ----------
+ kv_stream : dict or iter
+ Dictionary like `{key: [(sort, data]}` or a stream of tuples like
+ `(key, [(sort, data])`.
+
+ Yields
+ ------
+ tuple
+ `(key, [(sort, data), (sort, data), ...])`
+ """
+
+ kv_stream = six.iteritems(kv_stream) if isinstance(kv_stream, dict) else kv_stream
+ return ((k, sorter(v, key=lambda x: x[0])) for k, v in kv_stream)
+
+
+class ReduceJob(namedtuple('ReduceJob', ['reducer', 'sort', 'jobs', 'chunksize'])):
+
+ """
+ Describes a reduce job. Makes keeping track of multiple reducers easier.
+
+ Parameters
+ ----------
+ reducer : callable
+ Does the reducing. Has a signature like `reducer(key, iter(values))`.
+ sort : bool
+ Determines if the partitioned values should be sorted.
+ jobs : int
+ Number of jobs to run in parallel.
+ chunksize : int
+ Amount of data to pass to one `job`.
+ """
diff --git a/tinymr/base.py b/tinymr/base.py
index 5c5c07f..eba8905 100644
--- a/tinymr/base.py
+++ b/tinymr/base.py
@@ -3,30 +3,30 @@ Base classes. Subclass away!
"""
-from collections import defaultdict
-from contextlib import contextmanager
from itertools import chain
import six
+from tinymr import _mrtools
+from tinymr import errors
+from tinymr import tools
-class MRBase(object):
+
+class BaseMapReduce(object):
"""
Base class for various MapReduce implementations. Not all methods are
used by every implementation.
"""
- _closed = False
-
def __enter__(self):
"""
See `__exit__` for context manager usage.
"""
- if self.closed:
- raise IOError("MapReduce task is closed - cannot reuse.")
+ # if self.closed:
+ # raise IOError("MapReduce task is closed - cannot reuse.")
return self
@@ -44,10 +44,109 @@ class MRBase(object):
self.close()
@property
- def sort_map(self):
+ def jobs(self):
"""
- Sort the output from the map phase before the combine phase.
+ Default number of jobs to run in parallel for each phase.
+
+ Returns
+ -------
+ int
+ """
+
+ return 1
+
+ @property
+ def map_jobs(self):
+
+ """
+ Number of jobs to run in parallel during the map phase. Defaults
+ to `jobs` property.
+
+ Returns
+ -------
+ int
+ """
+
+ return self.jobs
+
+ @property
+ def sort_jobs(self):
+
+ """
+ Number of jobs to run in parallel during the sort phases. Defaults
+ to `jobs` property.
+
+ Returns
+ -------
+ int
+ """
+
+ return self.jobs
+
+ @property
+ def reduce_jobs(self):
+
+ """
+ Number of jobs to run in parallel during the reduce phase. If `None`,
+ defaults to `jobs` property.
+
+ Returns
+ -------
+ int
+ """
+
+ return self.jobs
+
+ @property
+ def chunksize(self):
+
+ """
+ Amount of data to process in each `job`.
+
+ Returns
+ -------
+ int
+ """
+
+ return 1
+
+ @property
+ def map_chunksize(self):
+
+ """
+ Amount of data to process in each `job` during the map phase.
+ Defaults to `chunksize`.
+ """
+
+ return self.chunksize
+
+ @property
+ def sort_chunksize(self):
+
+ """
+ Amount of data to process in each `job` during the sort phase.
+ Defaults to `chunksize`.
+ """
+
+ return self.chunksize
+
+ @property
+ def reduce_chunksize(self):
+
+ """
+ Amount of data to process in each `job` during the reduce phase.
+ Defaults to `chunksize`.
+ """
+
+ return self.chunksize
+
+ @property
+ def sort(self):
+
+ """
+ Disable all sorting phases. Setting individual properties overrides
+ this setting for individual phases.
Returns
-------
@@ -57,56 +156,59 @@ class MRBase(object):
return True
@property
- def sort_combine(self):
+ def sort_map(self):
"""
- Sort the output from the combine phase before the partition phase.
+ Sort the output from the map phase before the combine phase.
Returns
-------
bool
"""
- return True
+ return self.sort
@property
- def sort_final_reduce(self):
+ def sort_combine(self):
"""
- Pass data to the `final_reducer()` sorted by key.
+ Sort the output from the combine phase before the partition phase.
Returns
-------
bool
"""
- return True
+ return self.sort
@property
- def sort_reduce(self):
+ def sort_output(self):
"""
- Sort the output from the `reducer()` phase before the `final_reducer().
+ Pass data to `output()` sorted by key.
Returns
-------
bool
"""
- return True
+ return self.sort
@property
- def closed(self):
+ def sort_reduce(self):
"""
- Indicates whether or not the MapReduce task is closed.
+ Sort the output from each `reducer()` before executing the next or
+ before being passed to `output()`.
+
+ Define one property per reducer, so `reducer2()` would be `sort_reduce2`.
Returns
-------
bool
"""
- return self._closed
+ return self.sort
def close(self):
@@ -158,7 +260,10 @@ class MRBase(object):
See `reducer()` for more information.
"""
- raise NotImplementedError
+ # Not required so we raise a special exception that we can catch later
+ # Raising NotImplementedError also causes linters and code inspectors
+ # to prompt the user to implement this method when it is not required.
+ raise errors._CombinerNotImplemented
def init_reduce(self):
@@ -189,7 +294,7 @@ class MRBase(object):
raise NotImplementedError
- def final_reducer(self, pairs):
+ def output(self, pairs):
"""
Receives `(key, value)` pairs from each reducer. The output of this
@@ -212,89 +317,94 @@ class MRBase(object):
return ((key, tuple(values)) for key, values in pairs)
- @contextmanager
- def _partition(self, psd_stream):
+ @property
+ def _reduce_jobs(self):
+
+ reducers = tools.sorter(filter(
+ lambda x: not x.startswith('_') and 'reducer' in x,
+ dir(self)))
+
+ for r in reducers:
+ yield _mrtools.ReduceJob(
+ reducer=getattr(self, r),
+ sort=getattr(self, 'sort_{}'.format(r.replace('reducer', 'reduce'))),
+ jobs=getattr(self, '{}_jobs'.format(r.replace('reducer', 'reduce'))),
+ chunksize=getattr(self, '{}_jobs'.format(r.replace('reducer', 'reduce'))))
+
+ def _map_combine_partition(self, stream):
"""
- Context manager to partition data and destroy it when finished.
+ Run `mapper()`, partition, `combiner()` (if implemented) and partition
+ on a chunk of input data. Data may be sorted between each phase
+ according to the control properties.
+
+ Produces a dictionary of partitioned data with sort keys intact.
Parameters
----------
- psd_stream : iter
- Produces `(partition, sort, data)` tuples.
- sort_key : bool, optional
- Some MapReduce implementations don't benefit from sorting, and
- therefore do not pass around a sort key. Set to `False` in this
- case.
+ stream : iter
+ Input data passed to the MapReduce task.
Returns
-------
- defaultdict
- Keys are partition keys and values are lists of `(sort, data)` tuples.
+ dict
+ {key: [(sort, data), (sort, data), ...]}
"""
- partitioned = defaultdict(list)
-
- try:
-
- for kv_data in psd_stream:
- partitioned[kv_data[0]].append(kv_data)
-
- yield partitioned
-
- finally:
- partitioned = None
-
- def _sorter(self, key_values, fake=False):
-
- """
- Produces sorted data without any keys.
+ # Map, partition, and convert back to a `(key, [v, a, l, u, e, s])` stream
+ mapped = chain(*(self.mapper(item) for item in stream))
+ map_partitioned = tools.partition(mapped)
+ map_partitioned = six.iteritems(map_partitioned)
- Parameters
- ----------
- data : iter
- Produces tuples from the map phase.
- fake : bool, optional
- Don't do the sort - just strip off the data key.
+ if self.sort_map:
+ map_partitioned = _mrtools.sort_partitioned_values(map_partitioned)
- Yields
- ------
- iter
- Sorted data without keys.
- """
+ try:
- for key, values in key_values:
- values = iter(values) if fake else sorted(values, key=lambda x: x[-2])
- yield key, (v[-1] for v in values)
+ # The generators within this method get weird and don't break
+ # properly when wrapped in this try/except
+ # Instead we just kinda probe the `combiner()` to see if it exists
+ # and hope it doesn't do any setup.
+ self.combiner(None, None)
+ has_combiner = True
- def _map(self, stream):
+ except errors.CombinerNotImplemented:
+ has_combiner = False
- """
- Apply `mapper()` across the input stream.
- """
+ if has_combiner:
+ map_partitioned = _mrtools.strip_sort_key(map_partitioned)
+ combined = chain(*(self.combiner(k, v) for k, v in map_partitioned))
+ combine_partitioned = tools.partition(combined)
+ combine_partitioned = six.iteritems(combine_partitioned)
- return chain(*(self.mapper(item) for item in stream))
+ # If we don't have a combiner then we don't need to partition either
+ # because we're already dealing with partitioned output from the
+ # map phase
+ else:
+ combine_partitioned = map_partitioned
- def _reduce(self, kv_stream):
+ # If we don't have a combiner or if we're not sorting, then whatever
+ # we got from the mapper is good enough
+ if has_combiner and self.sort_combine:
+ combine_partitioned = _mrtools.sort_partitioned_values(combine_partitioned)
- """
- Apply the `reducer()` across a stream of `(key, values)` pairs.
- """
+ return dict(combine_partitioned)
- return chain(*(self.reducer(key, values) for key, values in kv_stream))
+ def _reduce_partition(self, stream, reducer, sort):
- def _combine(self, kv_stream):
+ reduced = chain(*(reducer(k, v) for k, v in stream))
+ partitioned = tools.partition(reduced)
+ partitioned = six.iteritems(partitioned)
- """
- Apply the `combiner()` across a stream of `(key, values)` pairs.
- """
+ if sort:
+ partitioned = _mrtools.sort_partitioned_values(partitioned)
- return chain(*(self.combiner(key, values) for key, values in kv_stream))
+ return tuple(partitioned)
- def _final_reducer_sorter(self, kv_stream):
+ def _output_sorter(self, kv_stream):
"""
- Sort data by key before it enters the `final_reducer()`.
+ Sort data by key before it enters `output()`.
Parameters
----------
@@ -306,4 +416,4 @@ class MRBase(object):
tuple
"""
- return ((k, v) for k, v in sorted(kv_stream, key=lambda x: x[0]))
+ return ((k, v) for k, v in tools.sorter(kv_stream, key=lambda x: x[0]))
diff --git a/tinymr/errors.py b/tinymr/errors.py
new file mode 100644
index 0000000..ee546cc
--- /dev/null
+++ b/tinymr/errors.py
@@ -0,0 +1,38 @@
+"""
+tinymr specific exceptions.
+"""
+
+
+class MRException(Exception):
+
+ """
+ Base exception for tinymr.
+ """
+
+
+class UnorderableKeys(MRException):
+
+ """
+ Encountered keys during a sort operation that could not be ordered. This
+ could mean that some keys are `str`, some are `int`, some are `None`, etc.
+ """
+
+
+class CombinerNotImplemented(MRException):
+
+ """
+ MapReduce task does not implement a `combiner()`.
+ """
+
+
+class ClosedTask(MRException):
+
+ """
+ Cannot re-use closed MapReduce tasks.
+ """
+
+
+# Instantiated exceptions to make sure we get a clear message
+_UnorderableKeys = UnorderableKeys(UnorderableKeys.__doc__.strip())
+_CombinerNotImplemented = CombinerNotImplemented(CombinerNotImplemented.__doc__.strip())
+_ClosedTask = ClosedTask(ClosedTask.__doc__.strip())
diff --git a/tinymr/memory.py b/tinymr/memory.py
index a136a8e..74245f2 100644
--- a/tinymr/memory.py
+++ b/tinymr/memory.py
@@ -3,81 +3,47 @@ In-memory MapReduce - for those weird use cases ...
"""
-from itertools import chain
-import multiprocessing as mp
-
-import six
-
-import tinymr as mr
-import tinymr.base
-import tinymr.tools
-
-
-# class MRParallelNoSort(mr.base.MRBase):
-#
-# @property
-# def jobs(self):
-#
-# """
-# Number of tasks to execute in parallel.
-# """
-#
-# return 1
-#
-# @property
-# def map_size(self):
-#
-# """
-# Number of items from the input data stream to hand to each mapper.
-# """
-#
-# return 1
-#
-# def __call__(self, stream):
-#
-# stream = mr.tools.slicer(stream, self.map_size)
-#
-# combined = chain(
-# *mp.Pool(self.jobs).imap_unordered(self._map_partition_combine, stream))
-#
-# with self._partition_no_sort(combined) as partitioned:
-# partitioned = tuple(six.iteritems(partitioned))
-#
-# reduced = tuple(mp.Pool(self.jobs).imap_unordered(self._imap_reducer, partitioned))
-#
-# with self._partition_no_sort(reduced) as partitioned:
-# return self.final_reducer(six.iteritems(partitioned))
-#
-# def _imap_reducer(self, pair):
-#
-# """
-# Adapter to integrate `reducer()` into the `imap_unordered()` API.
-# """
-#
-# return tuple(self.reducer(*pair))
-
-
-class MRSerial(mr.base.MRBase):
-
- """
- For MapReduce operations that don't benefit from sorting or parallelism.
-
- The `mapper()` and `reducer()` must yield 2 element tuples. The first
- element is used for partitioning and the second is data.
- """
+import functools
+import logging
+
+from tinymr import _mrtools
+from tinymr import base
+from tinymr import tools
+from tinymr.tools import runner
+
+
+logger = logging.getLogger('tinymr')
+logger.setLevel(logging.DEBUG)
+
+
+class MapReduce(base.BaseMapReduce):
def __call__(self, stream):
- with self._partition(self._map(stream)) as partitioned:
+ sliced = tools.slicer(stream, self.map_chunksize)
+
+ # Map, partition, combine, partition
+ with runner(self._map_combine_partition, sliced, self.map_jobs) as mcp:
+ partitioned = tools.merge_partitions(*mcp, sort=self.sort_combine)
+
+ self.init_reduce()
+
+ # Run all partition jobs
+ reducer_input = partitioned
+ for rj in self._reduce_jobs:
+
+ func = functools.partial(
+ self._reduce_partition, reducer=rj.reducer, sort=rj.sort)
- sorted_data = self._sorter(six.iteritems(partitioned), fake=self.sort_map)
+ reducer_input = _mrtools.strip_sort_key(reducer_input)
+ sliced = tools.slicer(reducer_input, rj.chunksize)
- with self._partition(self._reduce(sorted_data)) as partitioned:
+ with runner(func, sliced, rj.jobs) as reduced:
+ partitioned = tools.merge_partitions(*reduced, sort=rj.sort)
- sorted_data = self._sorter(six.iteritems(partitioned), fake=self.sort_reduce)
+ partitioned = _mrtools.strip_sort_key(partitioned)
- if self.sort_final_reduce:
- sorted_data = self._final_reducer_sorter(sorted_data)
+ if self.sort_output:
+ partitioned = self._output_sorter(partitioned)
- self.init_reduce()
- return self.final_reducer(sorted_data)
+ return self.output(partitioned)
diff --git a/tinymr/tools.py b/tinymr/tools.py
index 422faa3..eddc9e5 100644
--- a/tinymr/tools.py
+++ b/tinymr/tools.py
@@ -3,12 +3,37 @@ Tools for building MapReduce implementations.
"""
-from collections import OrderedDict
+from collections import defaultdict
import itertools as it
import multiprocessing as mp
+import six
from six.moves import zip
+from tinymr._backport_heapq import merge as heapq_merge
+from tinymr import errors
+
+
+# Make instance methods pickle-able in Python 2
+# Instance methods are not available as a type, so we have to create a tiny
+# class so we can grab an instance method
+# We then register our improved _reduce_method() with copy_reg so pickle knows
+# what to do.
+if six.PY2: # pragma: no cover
+ import copy_reg
+
+ class _I:
+ def m(self):
+ pass
+
+ def _reduce_method(m):
+ if m.im_self is None:
+ return getattr, (m.im_class, m.im_func.func_name)
+ else:
+ return getattr, (m.im_self, m.im_func.func_name)
+
+ copy_reg.pickle(type(_I().m), _reduce_method)
+
def slicer(iterable, chunksize):
@@ -46,53 +71,73 @@ def slicer(iterable, chunksize):
raise StopIteration
-def runner(func, iterable, jobs):
+class runner(object):
"""
The `multiprocessing` module can be difficult to debug and introduces some
overhead that isn't needed when only running one job. Use a generator in
this case instead.
- Parameters
- ----------
- func : callable
- Callable object to map across `iterable`.
- iterable : iter
- Data to process.
- jobs : int
+ Wrapped in a class to make the context syntax optional.
"""
- if jobs < 1:
- raise ValueError("jobs must be >= 1, not: {}".format(jobs))
- elif jobs == 1:
- return (func(i) for i in iterable)
- else:
- return mp.Pool(jobs).imap_unordered(func, iterable)
+ def __init__(self, func, iterable, jobs):
+
+ """
+ Parameters
+ ----------
+ func : callable
+ Callable object to map across `iterable`.
+ iterable : iter
+ Data to process.
+ jobs : int
+ Number of `multiprocessing` jobs.
+ """
+
+ self._func = func
+ self._iterable = iterable
+ self._jobs = jobs
+ self._closed = False
+
+ if jobs < 1:
+ raise ValueError("jobs must be >= 1, not: {}".format(jobs))
+ elif jobs == 1:
+ self._pool = None
+ self._proc = (func(i) for i in iterable)
+ else:
+ self._pool = mp.Pool(jobs)
+ self._proc = self._pool.imap_unordered(func, iterable)
+
+ def __enter__(self):
+ return self
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
-class DefaultOrderedDict(OrderedDict):
+ def __repr__(self):
+ return "{cname}(func={func}, iterable={iterable}, jobs={jobs})".format(
+ cname=self.__class__.__name__,
+ func=repr(self._func),
+ iterable=repr(self._iterable),
+ jobs=self._jobs)
- def __init__(self, default_factory, *args, **kwargs):
+ def __iter__(self):
+ return self._proc
- if not callable(default_factory):
- raise TypeError("default_factory must be callable")
+ def __next__(self):
+ return next(self._proc)
- super(DefaultOrderedDict, self).__init__(*args, **kwargs)
- self.default_factory = default_factory
+ next = __next__
- def __missing__(self, key):
- v = self.default_factory()
- super(DefaultOrderedDict, self).__setitem__(key, v)
- return v
+ def close(self):
- def __repr__(self):
- return "{cname}({df}, {dr})".format(
- cname=self.__class__.__name__,
- df=self.default_factory,
- dr=super(DefaultOrderedDict, self).__repr__())
+ """
+ Close the `multiprocessing` pool if we're using it.
+ """
- def copy(self):
- return self.__class__(self.default_factory, self)
+ if self._pool is not None:
+ self._pool.close()
+ self._closed = True
def mapkey(key, values):
@@ -121,4 +166,205 @@ def mapkey(key, values):
iter
"""
- return zip(it.cycle([key]), values)
+ return zip(it.repeat(key), values)
+
+
+def sorter(*args, **kwargs):
+
+ """
+ Wrapper for the builtin `sorted()` that produces a better error when
+ unorderable types are encountered.
+
+ Instead of:
+
+ >>> sorted(['1', 1])
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ TypeError: unorderable types: int() < str()
+
+ we get a `tinymr.errors.UnorderableKeys` exception.
+
+ Python 2 is much more forgiving of unorderable types so the example above
+ does not raise an exception.
+
+ Parameters
+ ----------
+ *args : *args
+ Positional arguments for `sorted()`.
+ **kwargs : **kwargs
+ Keyword arguments for `sorted()`.
+
+ Raises
+ ------
+ tinymr.errors.UnorderableKeys
+
+ Returns
+ -------
+ list
+ Output from `sorted()`.
+ """
+
+ try:
+ return sorted(*args, **kwargs)
+ except TypeError as e:
+ if 'unorderable' in str(e):
+ raise errors._UnorderableKeys
+ else:
+ raise e
+
+
+def partition(key_values):
+
+ """
+ Given a stream of `(key, value)` tuples, group them by key into a dict.
+ Equivalent to the code below, but faster:
+
+ >>> from itertools import groupby
+ >>> {k: list(v) for k, v in groupby(key_values, key=lambda x: x[0])}
+
+ Example:
+
+ >>> data = [('key1', 1), ('key1', 2), ('key2', None)]
+ >>> partition(data)
+ {
+ 'key1': [('key1', 1), ('key1', 2)],
+ 'key2': [('key2', None)]
+ }
+
+ Parameters
+ ----------
+ key_values : iter
+ Tuples - typically `(key, value)`, although only the first key is
+
+ Returns
+ -------
+ dict
+ """
+
+ out = defaultdict(list)
+ for data in key_values:
+ out[data[0]].append(data[1:])
+
+ return dict(out)
+
+
+class Orderable(object):
+
+ """
+ Make any object orderable.
+ """
+
+ __slots__ = ['_obj', '_lt', '_le', '_gt', '_ge', '_eq']
+
+ def __init__(self, obj, lt=True, le=True, gt=False, ge=False, eq=False):
+
+ """
+ Default parameters make the object sort as less than or equal to.
+
+ Parameters
+ ----------
+ obj : object
+ The object being made orderable.
+ lt : bool, optional
+ Set `__lt__()` evaluation.
+ le : bool, optional
+ Set `__le__()` evaluation.
+ gt : bool, optional
+ Set `__gt__()` evaluation.
+ ge : bool, optional
+ Set `__ge__()` evaluation.
+ eq : bool or None, optional
+ Set `__eq__()` evaluation. Set to `None` to enable a real
+ equality check.
+ """
+
+ self._obj = obj
+ self._lt = lt
+ self._le = le
+ self._gt = gt
+ self._ge = ge
+ self._eq = eq
+
+ @property
+ def obj(self):
+
+ """
+ Handle to the object being made orderable.
+ """
+
+ return self._obj
+
+ def __lt__(self, other):
+ return self._lt
+
+ def __le__(self, other):
+ return self._le
+
+ def __gt__(self, other):
+ return self._gt
+
+ def __ge__(self, other):
+ return self._ge
+
+ def __eq__(self, other):
+ if self._eq is None:
+ return isinstance(other, self.__class__) and other.obj == self.obj
+ else:
+ return self._eq
+
+
+class _OrderableNone(Orderable):
+
+ """
+ Like `None` but orderable.
+ """
+
+ def __init__(self):
+
+ """
+ Use the instantiated `OrderableNone` variable.
+ """
+
+ super(_OrderableNone, self).__init__(None, eq=None)
+
+
+# Instantiate so we can make it more None-like
+OrderableNone = _OrderableNone()
+
+
+def merge_partitions(*partitions, **kwargs):
+
+ """
+ Merge data from multiple `partition()` operations into one dictionary.
+
+ Parameters
+ ----------
+ partitions : *args
+ Dictionaries from `partition()`.
+ sort : bool, optional
+ Sort partitioned data as it is merged. Uses `heapq.merge()` so within
+ each partition's key, all values must be sorted smallest to largest.
+
+ Returns
+ -------
+ dict
+ {key: [values]}
+ """
+
+ sort = kwargs.pop('sort', False)
+ assert not kwargs, "Unrecognized kwargs: {}".format(kwargs)
+
+ partitions = (six.iteritems(ptn) if isinstance(ptn, dict) else ptn for ptn in partitions)
+
+ out = defaultdict(list)
+
+ if not sort:
+ for ptn in partitions:
+ for key, values in ptn:
+ out[key].extend(values)
+ else:
+ for ptn in partitions:
+ for key, values in ptn:
+ out[key] = tuple(heapq_merge(out[key], values, key=lambda x: x[0]))
+
+ return dict(out)
| Parallel mapping with a chunksize
The inputer iterable can be read in chunks and shoved off into `mapper()` subprocesses would be extremely helpful. | geowurster/tinymr | diff --git a/tests/conftest.py b/tests/conftest.py
index c9ec77e..569196d 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -39,7 +39,7 @@ def tiny_text_wc_output():
@pytest.fixture(scope='function')
def mr_wordcount_memory_no_sort():
- class WordCount(mr.memory.MRSerial):
+ class WordCount(mr.memory.MapReduce):
def mapper(self, item):
for word in item.split():
@@ -48,7 +48,7 @@ def mr_wordcount_memory_no_sort():
def reducer(self, key, values):
yield key, sum(values)
- def final_reducer(self, pairs):
+ def output(self, pairs):
return {k: tuple(v)[0] for k, v in pairs}
return WordCount
diff --git a/tests/test_base.py b/tests/test_base.py
index 0ce44ee..7468888 100644
--- a/tests/test_base.py
+++ b/tests/test_base.py
@@ -6,14 +6,15 @@ Unittests for tinymr.base
import pytest
from tinymr import base
+from tinymr import errors
def test_not_implemented_methods():
- mr = base.MRBase()
+ mr = base.BaseMapReduce()
with pytest.raises(NotImplementedError):
mr.mapper(None)
- with pytest.raises(NotImplementedError):
+ with pytest.raises(errors.CombinerNotImplemented):
mr.combiner(None, None)
with pytest.raises(NotImplementedError):
mr.reducer(None, None)
@@ -21,59 +22,68 @@ def test_not_implemented_methods():
def test_default_settings():
- mr = base.MRBase()
+ mr = base.BaseMapReduce()
assert mr.sort_map
assert mr.sort_combine
assert mr.sort_reduce
- assert mr.sort_final_reduce
+ assert mr.sort_output
def test_default_methods():
- mr = base.MRBase()
+ mr = base.BaseMapReduce()
expected = [(i, tuple(range(i))) for i in range(1, 10)]
- assert list(mr.final_reducer(expected)) == expected
-
-
-def test_context_manager():
-
- class MR(base.MRBase):
-
- def __init__(self):
- self._closed = False
-
- def close(self):
- self._closed = True
-
- with MR() as mr:
- assert not mr.closed
- assert mr.closed
-
-
-def test_no_context_manager():
-
- class MR(base.MRBase):
-
- def close(self):
- self._closed = True
-
- mr = MR()
- assert not mr.closed
- mr.close()
- assert mr.closed
- assert not MR._closed
- assert not MR().closed
-
-
-def test_cant_reuse_tasks():
-
- class MR(base.MRBase):
- pass
-
- with MR() as mr:
- pass
-
- assert mr.closed
- with pytest.raises(IOError):
- with mr as c:
- pass
+ assert list(mr.output(expected)) == expected
+
+
+# def test_context_manager():
+#
+# class MapReduce(base.BaseMapReduce):
+#
+# def __init__(self):
+# self._closed = False
+#
+# def close(self):
+# self._closed = True
+#
+# with MapReduce() as mr:
+# assert not mr.closed
+# assert mr.closed
+
+
+# def test_no_context_manager():
+#
+# class MapReduce(base.BaseMapReduce):
+#
+# def close(self):
+# self._closed = True
+#
+# mr = MapReduce()
+# assert not mr.closed
+# mr.close()
+# assert mr.closed
+# assert not MapReduce._closed
+# assert not MapReduce().closed
+
+
+# def test_cannot_reuse_tasks():
+#
+# class MapReduce(base.BaseMapReduce):
+# pass
+#
+# with MapReduce() as mr:
+# pass
+#
+# # assert mr.closed
+# with pytest.raises(IOError):
+# with mr as c:
+# pass
+
+
+# def test_runtime_validate():
+#
+# class MapReduce(base.BaseMapReduce):
+# closed = True
+#
+# with pytest.raises(errors.ClosedTask):
+# MapReduce()._runtime_validate()
diff --git a/tests/test_memory.py b/tests/test_memory.py
index 39314a0..59078cd 100644
--- a/tests/test_memory.py
+++ b/tests/test_memory.py
@@ -7,14 +7,14 @@ import tinymr as mr
import tinymr.memory
-def test_MRSerial_no_sort(tiny_text, tiny_text_wc_output, mr_wordcount_memory_no_sort):
-
- with mr_wordcount_memory_no_sort() as wc:
- actual = wc(tiny_text.splitlines())
- assert actual == tiny_text_wc_output
+# def test_MapReduce_no_sort(tiny_text, tiny_text_wc_output, mr_wordcount_memory_no_sort):
+#
+# with mr_wordcount_memory_no_sort() as wc:
+# actual = wc(tiny_text.splitlines())
+# assert actual == tiny_text_wc_output
-def test_MRSerial_init_reduce(tiny_text, tiny_text_wc_output, mr_wordcount_memory_no_sort):
+def test_MapReduce_init_reduce(tiny_text, tiny_text_wc_output, mr_wordcount_memory_no_sort):
class WCInitReduce(mr_wordcount_memory_no_sort):
@@ -23,3 +23,175 @@ def test_MRSerial_init_reduce(tiny_text, tiny_text_wc_output, mr_wordcount_memor
with WCInitReduce() as wc:
actual = wc(tiny_text.splitlines())
+
+
+def test_MapReduce_sort():
+
+ """
+ Make sure enabling sorting actually sorts.
+ """
+
+ text = [
+ 'key2 sort2 data2',
+ 'key2 sort1 data1',
+ 'key3 sort2 data2',
+ 'key3 sort1 data1',
+ 'key1 sort2 data2',
+ 'key1 sort1 data1'
+ ]
+
+ class WordCount(mr.memory.MapReduce):
+
+ # Make sure everything gets sent to a single map + combine
+ chunksize = 10
+
+ def mapper(self, item):
+ yield item.split()
+
+ def combiner(self, key, values):
+
+ d1, d2 = list(values)
+ assert [d1, d2] == ['data1', 'data2']
+
+ yield key, 'sort2', d2
+ yield key, 'sort1', d1
+
+ def reducer(self, key, values):
+
+ d1, d2 = list(values)
+ assert [d1, d2] == ['data1', 'data2']
+
+ yield key, 'sort2', d2
+ yield key, 'sort1', d1
+
+ wc = WordCount()
+
+ for attr in ('jobs', 'map_jobs', 'sort_jobs', 'reduce_jobs'):
+ assert getattr(wc, attr) == 1
+ for attr in ('sort', 'sort_map', 'sort_combine', 'sort_reduce', 'sort_output'):
+ assert getattr(wc, attr)
+ for attr in ('chunksize', 'map_chunksize', 'reduce_chunksize', 'sort_chunksize'):
+ assert getattr(wc, attr) == 10
+
+ assert tuple(wc(text)) == (
+ ('key1', ('data1', 'data2')),
+ ('key2', ('data1', 'data2')),
+ ('key3', ('data1', 'data2')))
+
+
+def test_MapReduce_no_sort():
+
+ """
+ Make sure that disabling sorting actually disables sorting.
+ """
+
+ text = [
+ 'key2 sort2 data2',
+ 'key2 sort1 data1',
+ 'key3 sort2 data2',
+ 'key3 sort1 data1',
+ 'key1 sort2 data2',
+ 'key1 sort1 data1'
+ ]
+
+ class WordCount(mr.memory.MapReduce):
+
+ # Make sure everything gets sent to a single map + combine
+ chunksize = 10
+ sort = False
+
+ def mapper(self, item):
+ yield item.split()
+
+ def combiner(self, key, values):
+
+ d2, d1 = list(values)
+ assert [d2, d1] == ['data2', 'data1']
+
+ yield key, 'sort2', d2
+ yield key, 'sort1', d1
+
+ def _final_reducer_sorter(self, kv_stream):
+ raise Exception("Shouldn't hit this.")
+
+ def reducer(self, key, values):
+
+ d2, d1 = list(values)
+ assert [d2, d1] == ['data2', 'data1']
+
+ yield key, 'sort2', d2
+ yield key, 'sort1', d1
+
+ wc = WordCount()
+ for attr in ('jobs', 'map_jobs', 'sort_jobs', 'reduce_jobs'):
+ assert getattr(wc, attr) == 1
+ for attr in ('sort', 'sort_map', 'sort_combine', 'sort_reduce', 'sort_output'):
+ assert getattr(wc, attr) is False
+ for attr in ('chunksize', 'map_chunksize', 'reduce_chunksize', 'sort_chunksize'):
+ assert getattr(wc, attr) == 10
+
+ # Can't really check key order here, so we're just going to
+ assert dict(wc(text)) == {
+ 'key2': ('data2', 'data1'),
+ 'key3': ('data2', 'data1'),
+ 'key1': ('data2', 'data1')}
+
+
+class _WCParallelSort(mr.memory.MapReduce):
+
+ """
+ Define out here so we can pickle it in multiprocessing
+ """
+
+ # Make sure everything gets sent to a single map + combine
+ chunksize = 10
+ jobs = 4
+
+ def mapper(self, item):
+ yield item.split()
+
+ def combiner(self, key, values):
+
+ d1, d2 = list(values)
+ assert [d1, d2] == ['data1', 'data2']
+
+ yield key, 'sort2', d2
+ yield key, 'sort1', d1
+
+ def reducer(self, key, values):
+
+ d1, d2 = list(values)
+ assert [d1, d2] == ['data1', 'data2']
+
+ yield key, 'sort2', d2
+ yield key, 'sort1', d1
+
+
+def test_MapReduce_parallel_sort():
+
+ """
+ Process in parallel with sorting.
+ """
+
+ text = [
+ 'key2 sort2 data2',
+ 'key2 sort1 data1',
+ 'key3 sort2 data2',
+ 'key3 sort1 data1',
+ 'key1 sort2 data2',
+ 'key1 sort1 data1'
+ ]
+
+ wc = _WCParallelSort()
+
+ for attr in ('jobs', 'map_jobs', 'sort_jobs', 'reduce_jobs'):
+ assert getattr(wc, attr) == 4
+ for attr in ('sort', 'sort_map', 'sort_combine', 'sort_reduce', 'sort_output'):
+ assert getattr(wc, attr)
+ for attr in ('chunksize', 'map_chunksize', 'reduce_chunksize', 'sort_chunksize'):
+ assert getattr(wc, attr) == 10
+
+ assert tuple(wc(text)) == (
+ ('key1', ('data1', 'data2')),
+ ('key2', ('data1', 'data2')),
+ ('key3', ('data1', 'data2')))
diff --git a/tests/test_tools.py b/tests/test_tools.py
index 7d17b63..4b66402 100644
--- a/tests/test_tools.py
+++ b/tests/test_tools.py
@@ -3,13 +3,14 @@ Unittests for tinymr.tools
"""
-import pickle
+from collections import defaultdict
from multiprocessing.pool import IMapUnorderedIterator
from types import GeneratorType
import pytest
import six
+from tinymr import errors
from tinymr import tools
@@ -45,104 +46,179 @@ def _func(v):
return v + 1
-def test_runner():
+def test_runner_1job():
input = list(range(10))
expected = tuple(i + 1 for i in input)
j1 = tools.runner(_func, input, 1)
- assert isinstance(j1, GeneratorType)
+ assert isinstance(j1, tools.runner)
+ assert isinstance(iter(j1), GeneratorType)
assert tuple(j1) == expected
- j2 = tools.runner(_func, input, 2)
- assert isinstance(j2, IMapUnorderedIterator)
- assert tuple(sorted(j2)) == expected
+def test_runner_2job():
+
+ input = list(range(10))
+ expected = tuple(i + 1 for i in input)
+
+ # Also tests context manager
+ with tools.runner(_func, input, 2) as j2:
+ assert not j2._closed
+ assert isinstance(j2, tools.runner)
+ assert isinstance(iter(j2), IMapUnorderedIterator)
+ assert tuple(sorted(j2)) == expected
+ assert j2._closed
+
+
+def test_runner_next():
+
+ input = list(range(10))
+ expected = list(i + 1 for i in input)
+
+ r = tools.runner(_func, input, 1)
+ assert next(r) == _func(input[0])
+
+ # Multiple jobs - have to pretty much run the whole thing and sort to compare
+ results = []
+ with tools.runner(_func, input, 2) as proc:
+ for i in input:
+ results.append(next(proc))
+
+ assert sorted(results) == expected
+
+
+def test_runner_attrs_and_exceptions():
+
+ # repr
+ r = tools.runner(_func, range(10), 2)
+ assert repr(r).startswith(r.__class__.__name__)
+ assert 'jobs=2' in repr(r)
+ assert 'iterable={}'.format(repr(range(10))) in repr(r)
+
+ # Bad values
with pytest.raises(ValueError):
tools.runner(None, None, -1)
-class TestDefaultUnorderedDict:
+def test_mapkey():
+
+ actual = tools.mapkey('key', range(5))
+ expected = [('key', 0), ('key', 1), ('key', 2), ('key', 3), ('key', 4)]
+
+ assert not isinstance(actual, (list, tuple)) # Make sure we get an iterator
+ assert list(actual) == expected
- def setup_method(self, method):
- self.d = tools.DefaultOrderedDict(list)
- def test_repr(self):
- assert self.d.__class__.__name__ in repr(self.d)
+def test_sorter():
- def test_present_key(self):
- self.d['key'] = 'value'
- assert self.d['key'] == 'value'
+ items = [1, 6, 3, 5, 9, 10]
+ assert sorted(items) == tools.sorter(items)
- def test_missing_key(self):
- assert self.d['missing'] == []
- def test_bool_true(self):
- self.d[None] = 'word'
- assert self.d
+# Python 2 isn't very forgiving when it comes to sorting.
+# Make sure a useful error is raised for unorderable types
+if six.PY3:
+ def test_sorter_unorderable():
+ # Unorderable types
+ with pytest.raises(errors.UnorderableKeys):
+ tools.sorter(['2', 1])
- def test_bool_false(self):
- assert not self.d
- def test_not_present(self):
- d = tools
+def test_sorter_exceptions():
- def test_exceptions(self):
- with pytest.raises(TypeError):
- tools.DefaultOrderedDict(None)
+ if not six.PY2:
+ with pytest.raises(errors.UnorderableKeys):
+ tools.sorter(['1', 1])
- def test_copy(self):
- self.d['key1'] = 'v1'
- self.d['key2'] = 'v2'
- c = self.d.copy()
- assert isinstance(c, tools.DefaultOrderedDict)
- assert self.d['key1'] == 'v1'
- assert self.d['key2'] == 'v2'
- self.d['key1'] = None
- assert c['key1'] == 'v1'
- assert len(c) == 2
- assert list(c.keys()) == ['key1', 'key2']
- assert list(c.values()) == ['v1', 'v2']
- assert c.default_factory is list
+ def _k(v):
+ raise TypeError('bad')
- def test_sorted_keys(self):
+ with pytest.raises(TypeError):
+ tools.sorter([2, 1], key=_k)
- """
- Verify that keys maintain their insert position.
- """
- # Set values
- it = list(range(10))
- for i in it:
- self.d[i] = i + 1
+def test_Orderable():
- # Check values
- for k, v in self.d.items():
- assert k + 1 == v
+ on = tools.Orderable(None)
+ for v in (-1, 0, 1):
+ assert on < v
+ assert on <= v
+ assert not on > v
+ assert not on >= v
+ assert on != v
+ assert on.obj is None
- # Check sorting
- assert list(self.d.keys()) == it
- assert sorted(self.d.keys()) == it
+ on = tools.Orderable(None, lt=False, le=False, gt=True, ge=True)
+ for v in (-1, 0, 1):
+ assert on > v
+ assert on >= v
+ assert not on < v
+ assert not on <= v
+ assert on != v
+ assert on.obj is None
- def test_unsorted_keys(self):
+ # Actually perform equality test
+ on = tools.Orderable(None, eq=None)
+ assert on == on
+ assert not on is False
+ assert not on == 67
- """
- Verify that unsorted keys remain the the same unsorted order.
- """
+ # Never equal to a type
+ on = tools.Orderable(None, eq=False)
+ assert not on == on
+ assert not on == on
+ assert not on == 'True'
+ assert not on == 21
- for i in range(5):
- self.d[i] = i + 1
- for i in reversed(range(30, 35)):
- self.d[i] = i + 1
+ # Always equal to any type
+ on = tools.Orderable(None, eq=True)
+ assert on == on
+ assert on == 'False'
+ assert on == 10
- assert list(self.d.keys()) == [0, 1, 2, 3, 4, 34, 33, 32, 31, 30]
- assert len(self.d.keys()) == 10
+def test_OrderableNone():
-def test_mapkey():
+ assert isinstance(tools.OrderableNone, tools._OrderableNone)
+ assert tools.OrderableNone.obj is None
- actual = tools.mapkey('key', range(5))
- expected = [('key', 0), ('key', 1), ('key', 2), ('key', 3), ('key', 4)]
- assert not isinstance(actual, (list, tuple)) # Make sure we get an iterator
- assert list(actual) == expected
+def test_partition():
+
+ data = [
+ (1, 2),
+ (1, 1),
+ (2, 1),
+ (3, 1),
+ ('ptn', 'sort', 'data')]
+
+ expected = {
+ 1: [(2,), (1,)],
+ 2: [(1,)],
+ 3: [(1,)],
+ 'ptn': [('sort', 'data')]}
+
+ ptn = tools.partition(data)
+
+ assert isinstance(ptn, dict)
+ assert not isinstance(ptn, defaultdict)
+ assert ptn == expected
+
+
+def test_merge_partitions():
+
+ dptn = {
+ 1: [(1, 2), (1, 1)],
+ 2: [(2, 1)],
+ 3: [(3, 1)],
+ 'ptn': [('ptn', 'sort', 'data')]}
+
+ expected = {
+ 1: [(1, 2), (1, 1), (1, 2), (1, 1)],
+ 2: [(2, 1), (2, 1)],
+ 3: [(3, 1), (3, 1)],
+ 'ptn': [('ptn', 'sort', 'data'), ('ptn', 'sort', 'data')]}
+
+ actual = tools.merge_partitions(dptn, dptn)
+ assert expected == actual
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 5
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
docopt==0.6.2
idna==3.10
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
requests==2.27.1
six==1.17.0
-e git+https://github.com/geowurster/tinymr.git@a387cf72cfc2a18978b77058e1e28f532258ae49#egg=tinymr
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tinymr
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- docopt==0.6.2
- idna==3.10
- pytest-cov==4.0.0
- requests==2.27.1
- six==1.17.0
- tomli==1.2.3
- urllib3==1.26.20
prefix: /opt/conda/envs/tinymr
| [
"tests/test_base.py::test_not_implemented_methods",
"tests/test_base.py::test_default_settings",
"tests/test_base.py::test_default_methods",
"tests/test_memory.py::test_MapReduce_init_reduce",
"tests/test_memory.py::test_MapReduce_sort",
"tests/test_memory.py::test_MapReduce_no_sort",
"tests/test_memory.py::test_MapReduce_parallel_sort",
"tests/test_tools.py::test_slicer_even",
"tests/test_tools.py::test_slicer_odd",
"tests/test_tools.py::test_runner_1job",
"tests/test_tools.py::test_runner_2job",
"tests/test_tools.py::test_runner_next",
"tests/test_tools.py::test_runner_attrs_and_exceptions",
"tests/test_tools.py::test_mapkey",
"tests/test_tools.py::test_sorter",
"tests/test_tools.py::test_Orderable",
"tests/test_tools.py::test_OrderableNone",
"tests/test_tools.py::test_partition",
"tests/test_tools.py::test_merge_partitions"
]
| [
"tests/test_tools.py::test_sorter_unorderable",
"tests/test_tools.py::test_sorter_exceptions"
]
| []
| []
| New BSD License | 370 | [
"README.rst",
"tinymr/errors.py",
"tinymr/base.py",
"tinymr/tools.py",
"tinymr/memory.py",
"tinymr/__init__.py",
".coveragerc",
"tinymr/_mrtools.py",
"tinymr/_backport_heapq.py"
]
| [
"README.rst",
"tinymr/errors.py",
"tinymr/base.py",
"tinymr/tools.py",
"tinymr/memory.py",
"tinymr/__init__.py",
".coveragerc",
"tinymr/_mrtools.py",
"tinymr/_backport_heapq.py"
]
|
|
m-lab__bigsanity-17 | 7bb4df0ddb204026af693e91f33e13a039f66a3d | 2016-01-11 18:53:45 | 7bb4df0ddb204026af693e91f33e13a039f66a3d | diff --git a/bigsanity/query_construct.py b/bigsanity/query_construct.py
index 58e1b2d..cead3cd 100644
--- a/bigsanity/query_construct.py
+++ b/bigsanity/query_construct.py
@@ -102,6 +102,26 @@ def _project_has_intermediate_snapshots(project):
project == constants.PROJECT_ID_NPAD)
+def _project_to_time_field(project):
+ """Returns the appropriate test log time field for the project type.
+
+ Returns the appropriate test log time field for a test given its project
+ type. All web100 M-Lab tests use 'web100_log_entry.log_time', while Paris
+ Traceroute uses 'log_time'.
+
+ Args:
+ project: The numeric ID of the project (e.g. NDT = 0).
+
+ Returns:
+ The string name of the log time field for the given project in the
+ BigQuery dataset schema.
+ """
+ if project == constants.PROJECT_ID_PARIS_TRACEROUTE:
+ return 'log_time'
+ else:
+ return 'web100_log_entry.log_time'
+
+
class TableEquivalenceQueryGenerator(object):
"""Generates queries to test the equivalence of two M-Lab tables."""
@@ -153,17 +173,18 @@ class TableEquivalenceQueryGenerator(object):
return _construct_test_id_subquery(tables, conditions)
def _format_time_range_condition(self):
+ time_field = _project_to_time_field(self._project)
start_time = _to_unix_timestamp(self._time_range_start)
start_time_human = _to_human_readable_date(self._time_range_start)
end_time = _to_unix_timestamp(self._time_range_end)
end_time_human = _to_human_readable_date(self._time_range_end)
- return (
- '((web100_log_entry.log_time >= {start_time}) AND -- {start_time_human}'
- '\n (web100_log_entry.log_time < {end_time})) -- {end_time_human}'
- ).format(start_time=start_time,
- start_time_human=start_time_human,
- end_time=end_time,
- end_time_human=end_time_human)
+ return ('(({time_field} >= {start_time}) AND -- {start_time_human}'
+ '\n ({time_field} < {end_time})) -- {end_time_human}'
+ ).format(time_field=time_field,
+ start_time=start_time,
+ start_time_human=start_time_human,
+ end_time=end_time,
+ end_time_human=end_time_human)
class TableEquivalenceQueryGeneratorFactory(object):
| paris_traceroute uses log_time
paris_traceroute data uses `log_time` rather than `web100_log_entry.log_time` since Paris Traceroute is not web100 based. We need to adjust the query construction to address this. | m-lab/bigsanity | diff --git a/tests/test_query_construct.py b/tests/test_query_construct.py
index 22441d3..a64f523 100644
--- a/tests/test_query_construct.py
+++ b/tests/test_query_construct.py
@@ -284,8 +284,8 @@ class TableEquivalenceQueryGeneratorTest(unittest.TestCase):
plx.google:m_lab.2015_01.all
WHERE
project = 3
- AND ((web100_log_entry.log_time >= 1419724800) AND -- 2014-12-28
- (web100_log_entry.log_time < 1420243200)) -- 2015-01-03
+ AND ((log_time >= 1419724800) AND -- 2014-12-28
+ (log_time < 1420243200)) -- 2015-01-03
) AS per_month
FULL OUTER JOIN EACH
(
@@ -294,8 +294,8 @@ class TableEquivalenceQueryGeneratorTest(unittest.TestCase):
FROM
plx.google:m_lab.paris_traceroute.all
WHERE
- ((web100_log_entry.log_time >= 1419724800) AND -- 2014-12-28
- (web100_log_entry.log_time < 1420243200)) -- 2015-01-03
+ ((log_time >= 1419724800) AND -- 2014-12-28
+ (log_time < 1420243200)) -- 2015-01-03
) AS per_project
ON
per_month.test_id=per_project.test_id
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r requirements.txt && pip install -r test-requirements.txt",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt",
"test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
six==1.17.0
tomli==2.2.1
| name: bigsanity
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/bigsanity
| [
"tests/test_query_construct.py::TableEquivalenceQueryGeneratorTest::test_correct_query_generation_for_paris_traceroute"
]
| []
| [
"tests/test_query_construct.py::TableEquivalenceQueryGeneratorTest::test_correct_query_generation_for_ndt_across_months",
"tests/test_query_construct.py::TableEquivalenceQueryGeneratorTest::test_correct_query_generation_for_ndt_full_month",
"tests/test_query_construct.py::TableEquivalenceQueryGeneratorTest::test_correct_query_generation_for_ndt_within_single_month",
"tests/test_query_construct.py::TableEquivalenceQueryGeneratorTest::test_correct_query_generation_for_npad",
"tests/test_query_construct.py::TableEquivalenceQueryGeneratorTest::test_correct_query_generation_for_sidestream"
]
| []
| Apache License 2.0 | 373 | [
"bigsanity/query_construct.py"
]
| [
"bigsanity/query_construct.py"
]
|
|
scrapy__scrapy-1671 | f01fd076420f0e58a1a165be31ec505eeb561ef4 | 2016-01-12 09:51:05 | 6aa85aee2a274393307ac3e777180fcbdbdc9848 | diff --git a/scrapy/utils/iterators.py b/scrapy/utils/iterators.py
index ce59c9719..b0688791e 100644
--- a/scrapy/utils/iterators.py
+++ b/scrapy/utils/iterators.py
@@ -17,7 +17,7 @@ logger = logging.getLogger(__name__)
def xmliter(obj, nodename):
"""Return a iterator of Selector's over all nodes of a XML document,
- given tha name of the node to iterate. Useful for parsing XML feeds.
+ given the name of the node to iterate. Useful for parsing XML feeds.
obj can be:
- a Response object
@@ -35,7 +35,7 @@ def xmliter(obj, nodename):
header_end = re_rsearch(HEADER_END_RE, text)
header_end = text[header_end[1]:].strip() if header_end else ''
- r = re.compile(r"<{0}[\s>].*?</{0}>".format(nodename_patt), re.DOTALL)
+ r = re.compile(r'<%(np)s[\s>].*?</%(np)s>' % {'np': nodename_patt}, re.DOTALL)
for match in r.finditer(text):
nodetext = header_start + match.group() + header_end
yield Selector(text=nodetext, type='xml').xpath('//' + nodename)[0]
@@ -48,7 +48,7 @@ def xmliter_lxml(obj, nodename, namespace=None, prefix='x'):
iterable = etree.iterparse(reader, tag=tag, encoding=reader.encoding)
selxpath = '//' + ('%s:%s' % (prefix, nodename) if namespace else nodename)
for _, node in iterable:
- nodetext = etree.tostring(node)
+ nodetext = etree.tostring(node, encoding='unicode')
node.clear()
xs = Selector(text=nodetext, type='xml')
if namespace:
@@ -128,8 +128,11 @@ def csviter(obj, delimiter=None, headers=None, encoding=None, quotechar=None):
def _body_or_str(obj, unicode=True):
- assert isinstance(obj, (Response, six.string_types, bytes)), \
- "obj must be Response or basestring, not %s" % type(obj).__name__
+ expected_types = (Response, six.text_type, six.binary_type)
+ assert isinstance(obj, expected_types), \
+ "obj must be %s, not %s" % (
+ " or ".join(t.__name__ for t in expected_types),
+ type(obj).__name__)
if isinstance(obj, Response):
if not unicode:
return obj.body
| XMLFeedSpider encoding issue
Scrapy version: 1.04
My spider (from the XMLFeedSpider example in the docs) doesn´t seem to read the defined itertag since it contains the iso-8859-1 letter "Þ"
http://www.w3schools.com/charsets/ref_html_8859.asp
I´ve tried my code on a different url with english xml tags and it works fine.
```python
# -*- coding: utf-8 -*-
import scrapy
from scrapy.spiders import XMLFeedSpider
from althingi_scraper.items import PartyItem
class PartySpider(XMLFeedSpider):
name = 'party'
allowed_domains = ['http://www.althingi.is']
#session = '145'
start_urls = [
#'http://www.althingi.is/altext/xml/thingflokkar/?lthing=%s' % session,
'http://www.althingi.is/altext/xml/thingflokkar/'
]
itertag = 'þingflokkar'
def parse_node(self, response, node):
item = PartyItem()
item['party_id'] = node.xpath('@id').extract()
item['name'] = node.xpath('heiti').extract()
#item['short_abbr'] = node.xpath('stuttskammstöfun').extract()
#item['long_abbr'] = node.xpath('löngskammstöfun').extract()
return item
```
Any thoughts? | scrapy/scrapy | diff --git a/tests/test_utils_iterators.py b/tests/test_utils_iterators.py
index d42ed2c91..b2e3889a4 100644
--- a/tests/test_utils_iterators.py
+++ b/tests/test_utils_iterators.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
import os
import six
from twisted.trial import unittest
@@ -46,6 +47,60 @@ class XmliterTestCase(unittest.TestCase):
for e in self.xmliter(response, 'matchme...')]
self.assertEqual(nodenames, [['matchme...']])
+ def test_xmliter_unicode(self):
+ # example taken from https://github.com/scrapy/scrapy/issues/1665
+ body = u"""<?xml version="1.0" encoding="UTF-8"?>
+ <þingflokkar>
+ <þingflokkur id="26">
+ <heiti />
+ <skammstafanir>
+ <stuttskammstöfun>-</stuttskammstöfun>
+ <löngskammstöfun />
+ </skammstafanir>
+ <tímabil>
+ <fyrstaþing>80</fyrstaþing>
+ </tímabil>
+ </þingflokkur>
+ <þingflokkur id="21">
+ <heiti>Alþýðubandalag</heiti>
+ <skammstafanir>
+ <stuttskammstöfun>Ab</stuttskammstöfun>
+ <löngskammstöfun>Alþb.</löngskammstöfun>
+ </skammstafanir>
+ <tímabil>
+ <fyrstaþing>76</fyrstaþing>
+ <síðastaþing>123</síðastaþing>
+ </tímabil>
+ </þingflokkur>
+ <þingflokkur id="27">
+ <heiti>Alþýðuflokkur</heiti>
+ <skammstafanir>
+ <stuttskammstöfun>A</stuttskammstöfun>
+ <löngskammstöfun>Alþfl.</löngskammstöfun>
+ </skammstafanir>
+ <tímabil>
+ <fyrstaþing>27</fyrstaþing>
+ <síðastaþing>120</síðastaþing>
+ </tímabil>
+ </þingflokkur>
+ </þingflokkar>"""
+
+ for r in (
+ # with bytes
+ XmlResponse(url="http://example.com", body=body.encode('utf-8')),
+ # Unicode body needs encoding information
+ XmlResponse(url="http://example.com", body=body, encoding='utf-8')):
+
+ attrs = []
+ for x in self.xmliter(r, u'þingflokkur'):
+ attrs.append((x.xpath('@id').extract(),
+ x.xpath(u'./skammstafanir/stuttskammstöfun/text()').extract(),
+ x.xpath(u'./tímabil/fyrstaþing/text()').extract()))
+
+ self.assertEqual(attrs,
+ [([u'26'], [u'-'], [u'80']),
+ ([u'21'], [u'Ab'], [u'76']),
+ ([u'27'], [u'A'], [u'27'])])
def test_xmliter_text(self):
body = u"""<?xml version="1.0" encoding="UTF-8"?><products><product>one</product><product>two</product></products>"""
@@ -96,6 +151,10 @@ class XmliterTestCase(unittest.TestCase):
self.assertRaises(StopIteration, next, iter)
+ def test_xmliter_objtype_exception(self):
+ i = self.xmliter(42, 'product')
+ self.assertRaises(AssertionError, next, i)
+
def test_xmliter_encoding(self):
body = b'<?xml version="1.0" encoding="ISO-8859-9"?>\n<xml>\n <item>Some Turkish Characters \xd6\xc7\xde\xdd\xd0\xdc \xfc\xf0\xfd\xfe\xe7\xf6</item>\n</xml>\n\n'
response = XmlResponse('http://www.example.com', body=body)
@@ -169,6 +228,9 @@ class LxmlXmliterTestCase(XmliterTestCase):
node = next(my_iter)
self.assertEqual(node.xpath('f:name/text()').extract(), ['African Coffee Table'])
+ def test_xmliter_objtype_exception(self):
+ i = self.xmliter(42, 'product')
+ self.assertRaises(TypeError, next, i)
class UtilsCsvTestCase(unittest.TestCase):
sample_feeds_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sample_data', 'feeds')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
Automat==24.8.1
cffi==1.17.1
constantly==23.10.4
coverage==7.8.0
cryptography==44.0.2
cssselect==1.3.0
exceptiongroup==1.2.2
hyperlink==21.0.0
idna==3.10
incremental==24.7.2
iniconfig==2.1.0
jmespath==1.0.1
lxml==5.3.1
packaging==24.2
parsel==1.10.0
pluggy==1.5.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycparser==2.22
PyDispatcher==2.0.7
pyOpenSSL==25.0.0
pytest==8.3.5
pytest-cov==6.0.0
queuelib==1.7.0
-e git+https://github.com/scrapy/scrapy.git@f01fd076420f0e58a1a165be31ec505eeb561ef4#egg=Scrapy
service-identity==24.2.0
six==1.17.0
tomli==2.2.1
Twisted==24.11.0
typing_extensions==4.13.0
w3lib==2.3.1
zope.interface==7.2
| name: scrapy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- automat==24.8.1
- cffi==1.17.1
- constantly==23.10.4
- coverage==7.8.0
- cryptography==44.0.2
- cssselect==1.3.0
- exceptiongroup==1.2.2
- hyperlink==21.0.0
- idna==3.10
- incremental==24.7.2
- iniconfig==2.1.0
- jmespath==1.0.1
- lxml==5.3.1
- packaging==24.2
- parsel==1.10.0
- pluggy==1.5.0
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycparser==2.22
- pydispatcher==2.0.7
- pyopenssl==25.0.0
- pytest==8.3.5
- pytest-cov==6.0.0
- queuelib==1.7.0
- service-identity==24.2.0
- six==1.17.0
- tomli==2.2.1
- twisted==24.11.0
- typing-extensions==4.13.0
- w3lib==2.3.1
- zope-interface==7.2
prefix: /opt/conda/envs/scrapy
| [
"tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_unicode"
]
| [
"tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_defaults",
"tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_delimiter",
"tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_delimiter_binary_response_assume_utf8_encoding",
"tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_encoding",
"tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_exception",
"tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_falserow",
"tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_headers",
"tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_quotechar",
"tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_wrong_quotechar"
]
| [
"tests/test_utils_iterators.py::XmliterTestCase::test_xmliter",
"tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_encoding",
"tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_exception",
"tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_namespaces",
"tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_objtype_exception",
"tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_text",
"tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_unicode",
"tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_unusual_node",
"tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter",
"tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_encoding",
"tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_exception",
"tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_iterate_namespace",
"tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_namespaces",
"tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_namespaces_prefix",
"tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_objtype_exception",
"tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_text",
"tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_unusual_node",
"tests/test_utils_iterators.py::TestHelper::test_body_or_str"
]
| []
| BSD 3-Clause "New" or "Revised" License | 374 | [
"scrapy/utils/iterators.py"
]
| [
"scrapy/utils/iterators.py"
]
|
|
ARMmbed__yotta-656 | 16cc2baeba653dc77e3ce32c20018b32ab108bf4 | 2016-01-12 15:29:25 | 16cc2baeba653dc77e3ce32c20018b32ab108bf4 | diff --git a/yotta/lib/pack.py b/yotta/lib/pack.py
index ade5aae..f7ef9d0 100644
--- a/yotta/lib/pack.py
+++ b/yotta/lib/pack.py
@@ -257,6 +257,12 @@ class Pack(object):
else:
return None
+ def getKeywords(self):
+ if self.description:
+ return self.description.get('keywords', [])
+ else:
+ return []
+
def _parseIgnoreFile(self, f):
r = []
for l in f:
diff --git a/yotta/options/__init__.py b/yotta/options/__init__.py
index 1482b43..aa66e80 100644
--- a/yotta/options/__init__.py
+++ b/yotta/options/__init__.py
@@ -11,6 +11,7 @@ from . import noninteractive
from . import registry
from . import target
from . import config
+from . import force
# this modifies argparse when it's imported:
from . import parser
diff --git a/yotta/options/force.py b/yotta/options/force.py
new file mode 100644
index 0000000..33c8e6e
--- /dev/null
+++ b/yotta/options/force.py
@@ -0,0 +1,12 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0
+# See LICENSE file for details.
+
+def addTo(parser):
+ parser.add_argument('-f', '--force', action='store_true', dest="force",
+ help='Force the operation to (try to) continue even in situations which '+
+ 'would be an error.'
+ )
+
+
diff --git a/yotta/publish.py b/yotta/publish.py
index 28d999b..721d932 100644
--- a/yotta/publish.py
+++ b/yotta/publish.py
@@ -8,11 +8,50 @@ import logging
# validate, , validate things, internal
from .lib import validate
+# options, , shared options, internal
+import yotta.options as options
def addOptions(parser):
- # no options
+ options.force.addTo(parser)
+
+# python 2 + 3 compatibility
+try:
+ global input
+ input = raw_input
+except NameError:
pass
+def prePublishCheck(p, force=False, interactive=True):
+ need_ok = False
+ if p.description.get('bin', None) is not None:
+ logging.warning(
+ 'This is an executable application, not a re-usable library module. Other modules will not be able to depend on it!'
+ )
+ need_ok = True
+
+ official_keywords = [x for x in p.getKeywords() if x.endswith('-official')]
+ if len(official_keywords):
+ need_ok = True
+ for k in official_keywords:
+ prefix = k[:-len('-official')]
+ logging.warning(
+ ('You\'re publishing with the %s tag. Is this really an '+
+ 'officially supported %s module? If not, please remove the %s '+
+ 'tag from your %s file. If you are unsure, please ask on the '+
+ 'issue tracker.') % (
+ k, prefix, k, p.description_filename
+ )
+ )
+
+ if need_ok and not interactive:
+ logging.error('--noninteractive prevents user confirmation. Please re-run with --force')
+ return 1
+
+ if need_ok and not force:
+ input("If you still want to publish, press [enter] to continue.")
+
+ return 0
+
def execCommand(args, following_args):
p = validate.currentDirectoryModuleOrTarget()
if not p:
@@ -22,17 +61,9 @@ def execCommand(args, following_args):
logging.error('The working directory is not clean. Commit before publishing!')
return 1
- if p.description.get('bin', None) is not None:
- logging.warning(
- 'This is an executable application, not a re-usable library module. Other modules will not be able to depend on it!'
- )
- # python 2 + 3 compatibility
- try:
- global input
- input = raw_input
- except NameError:
- pass
- raw_input("If you still want to publish it, press [enter] to continue.")
+ errcode = prePublishCheck(p, args.force, args.interactive)
+ if errcode and not args.force:
+ return errcode
error = p.publish(args.registry)
if error:
| Discourage accidental use of mbed-official keyword
There have been several cases where target descriptions have been published with descriptions and keywords indicating that they are officially supported, when in fact they aren't.
yotta should warn when these keywords are used, to make sure that their use is intentional. | ARMmbed/yotta | diff --git a/yotta/test/cli/test_publish.py b/yotta/test/cli/test_publish.py
index de93d77..11b713e 100644
--- a/yotta/test/cli/test_publish.py
+++ b/yotta/test/cli/test_publish.py
@@ -13,7 +13,7 @@ import tempfile
# internal modules:
from yotta.lib.fsutils import rmRf
from . import cli
-
+from . import util
Test_Target = "x86-osx-native,*"
@@ -54,6 +54,26 @@ Public_Module_JSON = '''{
}'''
+Test_Publish = {
+'module.json':'''{
+ "name": "test-publish",
+ "version": "0.0.0",
+ "description": "Test yotta publish",
+ "author": "James Crosby <[email protected]>",
+ "license": "Apache-2.0",
+ "keywords": ["mbed-official"],
+ "dependencies":{
+ }
+}''',
+'readme.md':'''##This is a test module used in yotta's test suite.''',
+'source/foo.c':'''#include "stdio.h"
+int foo(){
+ printf("foo!\\n");
+ return 7;
+}'''
+}
+
+
class TestCLIPublish(unittest.TestCase):
@classmethod
def setUpClass(cls):
@@ -89,6 +109,15 @@ class TestCLIPublish(unittest.TestCase):
else:
del os.environ['YOTTA_USER_SETTINGS_DIR']
+ def test_warnOfficialKeywords(self):
+ path = util.writeTestFiles(Test_Publish, True)
+
+ stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', '--noninteractive', 'publish'], cwd=path)
+ self.assertNotEqual(statuscode, 0)
+ self.assertIn('Is this really an officially supported mbed module', stdout + stderr)
+
+ util.rmRf(path)
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc cmake ninja-build"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | argcomplete==1.0.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
colorama==0.3.9
cryptography==44.0.2
Deprecated==1.2.18
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
future==1.0.0
hgapi==1.7.4
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
intelhex==2.3.0
intervaltree==3.1.0
Jinja2==2.11.3
jsonpointer==2.0
jsonschema==2.6.0
MarkupSafe==3.0.2
mbed_test_wrapper==0.0.3
packaging @ file:///croot/packaging_1734472117206/work
pathlib==1.0.1
pluggy @ file:///croot/pluggy_1733169602837/work
project-generator-definitions==0.2.46
project_generator==0.8.17
pycparser==2.22
pyelftools==0.23
PyGithub==1.54.1
PyJWT==1.7.1
pyocd==0.15.0
pytest @ file:///croot/pytest_1738938843180/work
pyusb==1.3.1
PyYAML==3.13
requests==2.32.3
semantic-version==2.10.0
six==1.17.0
sortedcontainers==2.4.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
urllib3==2.3.0
valinor==0.0.15
websocket-client==1.8.0
wrapt==1.17.2
xmltodict==0.14.2
-e git+https://github.com/ARMmbed/yotta.git@16cc2baeba653dc77e3ce32c20018b32ab108bf4#egg=yotta
| name: yotta
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argcomplete==1.0.0
- argparse==1.4.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- colorama==0.3.9
- cryptography==44.0.2
- deprecated==1.2.18
- future==1.0.0
- hgapi==1.7.4
- idna==3.10
- intelhex==2.3.0
- intervaltree==3.1.0
- jinja2==2.11.3
- jsonpointer==2.0
- jsonschema==2.6.0
- markupsafe==3.0.2
- mbed-test-wrapper==0.0.3
- pathlib==1.0.1
- project-generator==0.8.17
- project-generator-definitions==0.2.46
- pycparser==2.22
- pyelftools==0.23
- pygithub==1.54.1
- pyjwt==1.7.1
- pyocd==0.15.0
- pyusb==1.3.1
- pyyaml==3.13
- requests==2.32.3
- semantic-version==2.10.0
- six==1.17.0
- sortedcontainers==2.4.0
- urllib3==2.3.0
- valinor==0.0.15
- websocket-client==1.8.0
- wrapt==1.17.2
- xmltodict==0.14.2
prefix: /opt/conda/envs/yotta
| [
"yotta/test/cli/test_publish.py::TestCLIPublish::test_warnOfficialKeywords"
]
| [
"yotta/test/cli/test_publish.py::TestCLIPublish::test_publishNotAuthed"
]
| [
"yotta/test/cli/test_publish.py::TestCLIPublish::test_publishPrivate"
]
| []
| Apache License 2.0 | 375 | [
"yotta/options/force.py",
"yotta/publish.py",
"yotta/options/__init__.py",
"yotta/lib/pack.py"
]
| [
"yotta/options/force.py",
"yotta/publish.py",
"yotta/options/__init__.py",
"yotta/lib/pack.py"
]
|
|
Pylons__webob-230 | 9400c049d05c8ba350daf119aa16ded24ece31f6 | 2016-01-12 17:50:01 | 9400c049d05c8ba350daf119aa16ded24ece31f6 | diff --git a/webob/exc.py b/webob/exc.py
index 57a81b5..044c00a 100644
--- a/webob/exc.py
+++ b/webob/exc.py
@@ -165,10 +165,12 @@ References:
"""
+import json
from string import Template
import re
import sys
+from webob.acceptparse import Accept
from webob.compat import (
class_types,
text_,
@@ -250,7 +252,7 @@ ${body}''')
empty_body = False
def __init__(self, detail=None, headers=None, comment=None,
- body_template=None, **kw):
+ body_template=None, json_formatter=None, **kw):
Response.__init__(self,
status='%s %s' % (self.code, self.title),
**kw)
@@ -265,6 +267,8 @@ ${body}''')
if self.empty_body:
del self.content_type
del self.content_length
+ if json_formatter is not None:
+ self.json_formatter = json_formatter
def __str__(self):
return self.detail or self.explanation
@@ -300,14 +304,31 @@ ${body}''')
return self.html_template_obj.substitute(status=self.status,
body=body)
+ def json_formatter(self, body, status, title, environ):
+ return {'message': body,
+ 'code': status,
+ 'title': title}
+
+ def json_body(self, environ):
+ body = self._make_body(environ, no_escape)
+ jsonbody = self.json_formatter(body=body, status=self.status,
+ title=self.title, environ=environ)
+ return json.dumps(jsonbody)
+
def generate_response(self, environ, start_response):
if self.content_length is not None:
del self.content_length
headerlist = list(self.headerlist)
- accept = environ.get('HTTP_ACCEPT', '')
- if accept and 'html' in accept or '*/*' in accept:
+ accept_value = environ.get('HTTP_ACCEPT', '')
+ accept = Accept(accept_value)
+ match = accept.best_match(['application/json', 'text/html',
+ 'text/plain'], default_match='text/plain')
+ if match == 'text/html':
content_type = 'text/html'
body = self.html_body(environ)
+ elif match == 'application/json':
+ content_type = 'application/json'
+ body = self.json_body(environ)
else:
content_type = 'text/plain'
body = self.plain_body(environ)
| Allow for JSON Exception Bodies
I'm currently working on several projects that provide a JSON API using WebOb. Currently, however, whenever we use a `webob.exc` exception to return an error to the user (e.g., `webob.exc.HTTPBadRequest`) the body of that message is always in a content-type other than what they're expecting (HTML if they don't specify an Accept header, plain-text otherwise). There doesn't seem to be a pleasant, convenient, or simple way to make it use JSON beyond something like (the untested) following code:
```py
import string
import webob.exc
class WSGIHTTPException(webob.exc.WSGIHTTPException):
body_template_obj = string.Template('{"code", ${status}, "message": "${body}", "title": "${title}"}'
plain_template_obj = string.Template('{"error": ${body}}')
class HTTPBadRequest(webob.exc.HTTPBadRequest, WSGIHTTPException):
pass
class HTTPUnauthored(webob.exc.HTTPBadRequest, WSGIHTTPException):
pass
# etc.
```
This is particularly problematic because we have to redefine all of the exceptions we want to use to doubly inherit from our new sub-classed `WSGIHTTPException` and the original. It also doesn't handle the fact that we have to basically copy and paste [generate_response][] into our subclass so that we set the appropriate content-type header.
Is it too much to ask to either:
A) Add support for JSON response bodies in `WSGIHTTPException`s, or
B) Make `WSGIHTTPException` slightly more modular so we can only override parts we need?
[generate_response]: https://github.com/Pylons/webob/blob/7f98f694e7c1a569f53fb4085d084430ee8b2cc2/webob/exc.py#L302..L323
Thanks in advance, | Pylons/webob | diff --git a/tests/test_exc.py b/tests/test_exc.py
index dcb1fed..8204783 100644
--- a/tests/test_exc.py
+++ b/tests/test_exc.py
@@ -1,3 +1,5 @@
+import json
+
from webob.request import Request
from webob.dec import wsgify
from webob import exc as webob_exc
@@ -119,6 +121,57 @@ def test_WSGIHTTPException_html_body_w_comment():
'</html>'
)
+def test_WSGIHTTPException_json_body_no_comment():
+ class ValidationError(webob_exc.WSGIHTTPException):
+ code = '422'
+ title = 'Validation Failed'
+ explanation = 'Validation of an attribute failed.'
+
+ exc = ValidationError(detail='Attribute "xyz" is invalid.')
+ body = exc.json_body({})
+ eq_(json.loads(body), {
+ "code": "422 Validation Failed",
+ "title": "Validation Failed",
+ "message": "Validation of an attribute failed.<br /><br />\nAttribute"
+ ' "xyz" is invalid.\n\n',
+ })
+
+def test_WSGIHTTPException_respects_application_json():
+ class ValidationError(webob_exc.WSGIHTTPException):
+ code = '422'
+ title = 'Validation Failed'
+ explanation = 'Validation of an attribute failed.'
+ def start_response(status, headers, exc_info=None):
+ pass
+
+ exc = ValidationError(detail='Attribute "xyz" is invalid.')
+ resp = exc.generate_response(environ={
+ 'wsgi.url_scheme': 'HTTP',
+ 'SERVER_NAME': 'localhost',
+ 'SERVER_PORT': '80',
+ 'REQUEST_METHOD': 'PUT',
+ 'HTTP_ACCEPT': 'application/json',
+ }, start_response=start_response)
+ eq_(json.loads(resp[0].decode('utf-8')), {
+ "code": "422 Validation Failed",
+ "title": "Validation Failed",
+ "message": "Validation of an attribute failed.<br /><br />\nAttribute"
+ ' "xyz" is invalid.\n\n',
+ })
+
+def test_WSGIHTTPException_allows_custom_json_formatter():
+ def json_formatter(body, status, title, environ):
+ return {"fake": True}
+ class ValidationError(webob_exc.WSGIHTTPException):
+ code = '422'
+ title = 'Validation Failed'
+ explanation = 'Validation of an attribute failed.'
+
+ exc = ValidationError(detail='Attribute "xyz" is invalid.',
+ json_formatter=json_formatter)
+ body = exc.json_body({})
+ eq_(json.loads(body), {"fake": True})
+
def test_WSGIHTTPException_generate_response():
def start_response(status, headers, exc_info=None):
pass
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[testing]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
nose==1.3.7
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/Pylons/webob.git@9400c049d05c8ba350daf119aa16ded24ece31f6#egg=WebOb
| name: webob
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- nose==1.3.7
prefix: /opt/conda/envs/webob
| [
"tests/test_exc.py::test_WSGIHTTPException_json_body_no_comment",
"tests/test_exc.py::test_WSGIHTTPException_respects_application_json",
"tests/test_exc.py::test_WSGIHTTPException_allows_custom_json_formatter"
]
| []
| [
"tests/test_exc.py::test_noescape_null",
"tests/test_exc.py::test_noescape_not_basestring",
"tests/test_exc.py::test_noescape_unicode",
"tests/test_exc.py::test_strip_tags_empty",
"tests/test_exc.py::test_strip_tags_newline_to_space",
"tests/test_exc.py::test_strip_tags_zaps_carriage_return",
"tests/test_exc.py::test_strip_tags_br_to_newline",
"tests/test_exc.py::test_strip_tags_zaps_comments",
"tests/test_exc.py::test_strip_tags_zaps_tags",
"tests/test_exc.py::test_HTTPException",
"tests/test_exc.py::test_exception_with_unicode_data",
"tests/test_exc.py::test_WSGIHTTPException_headers",
"tests/test_exc.py::test_WSGIHTTPException_w_body_template",
"tests/test_exc.py::test_WSGIHTTPException_w_empty_body",
"tests/test_exc.py::test_WSGIHTTPException___str__",
"tests/test_exc.py::test_WSGIHTTPException_plain_body_no_comment",
"tests/test_exc.py::test_WSGIHTTPException_html_body_w_comment",
"tests/test_exc.py::test_WSGIHTTPException_generate_response",
"tests/test_exc.py::test_WSGIHTTPException_call_w_body",
"tests/test_exc.py::test_WSGIHTTPException_wsgi_response",
"tests/test_exc.py::test_WSGIHTTPException_exception_newstyle",
"tests/test_exc.py::test_WSGIHTTPException_exception_no_newstyle",
"tests/test_exc.py::test_HTTPOk_head_of_proxied_head",
"tests/test_exc.py::test_HTTPMove",
"tests/test_exc.py::test_HTTPMove_location_not_none",
"tests/test_exc.py::test_HTTPMove_location_newlines",
"tests/test_exc.py::test_HTTPMove_add_slash_and_location",
"tests/test_exc.py::test_HTTPMove_call_add_slash",
"tests/test_exc.py::test_HTTPMove_call_query_string",
"tests/test_exc.py::test_HTTPExceptionMiddleware_ok",
"tests/test_exc.py::test_HTTPExceptionMiddleware_exception",
"tests/test_exc.py::test_HTTPExceptionMiddleware_exception_exc_info_none",
"tests/test_exc.py::test_status_map_is_deterministic"
]
| []
| null | 376 | [
"webob/exc.py"
]
| [
"webob/exc.py"
]
|
|
jupyter-incubator__sparkmagic-115 | e29c15bf11fbada311796c36e1f5c9d7091b2667 | 2016-01-13 04:06:38 | e29c15bf11fbada311796c36e1f5c9d7091b2667 | diff --git a/remotespark/datawidgets/autovizwidget.py b/remotespark/datawidgets/autovizwidget.py
index f0b6405..f166c2e 100644
--- a/remotespark/datawidgets/autovizwidget.py
+++ b/remotespark/datawidgets/autovizwidget.py
@@ -3,20 +3,14 @@
import pandas as pd
from ipywidgets import FlexBox
-from IPython.display import display
+from remotespark.utils.ipythondisplay import IpythonDisplay
from .encoding import Encoding
from .encodingwidget import EncodingWidget
from .ipywidgetfactory import IpyWidgetFactory
from .plotlygraphs.graphrenderer import GraphRenderer
-class IpythonDisplay(object):
- @staticmethod
- def display_to_ipython(to_display):
- display(to_display)
-
-
class AutoVizWidget(FlexBox):
def __init__(self, df, encoding, renderer=None, ipywidget_factory=None, encoding_widget=None, ipython_display=None,
nested_widget_mode=False, testing=False, **kwargs):
@@ -74,14 +68,14 @@ class AutoVizWidget(FlexBox):
# self.controls.children
self.to_display.clear_output()
- self.renderer.render(self.df, self.encoding, self.to_display)
-
self.encoding_widget.show_x(self.renderer.display_x(self.encoding.chart_type))
self.encoding_widget.show_y(self.renderer.display_y(self.encoding.chart_type))
self.encoding_widget.show_controls(self.renderer.display_controls(self.encoding.chart_type))
self.encoding_widget.show_logarithmic_x_axis(self.renderer.display_logarithmic_x_axis(self.encoding.chart_type))
self.encoding_widget.show_logarithmic_y_axis(self.renderer.display_logarithmic_y_axis(self.encoding.chart_type))
+ self.renderer.render(self.df, self.encoding, self.to_display)
+
def _create_controls_widget(self):
# Create types of viz hbox
viz_types_widget = self._create_viz_types_buttons()
diff --git a/remotespark/datawidgets/plotlygraphs/datagraph.py b/remotespark/datawidgets/plotlygraphs/datagraph.py
index 130791b..a38a89a 100644
--- a/remotespark/datawidgets/plotlygraphs/datagraph.py
+++ b/remotespark/datawidgets/plotlygraphs/datagraph.py
@@ -1,17 +1,39 @@
# Copyright (c) 2015 [email protected]
# Distributed under the terms of the Modified BSD License.
-from plotly.tools import FigureFactory as FigFac
-from plotly.offline import iplot
+import pandas as pd
+from remotespark.utils.ipythondisplay import IpythonDisplay
-class DataGraph(object):
- @staticmethod
- def render(df, encoding, output):
- table = FigFac.create_table(df)
+class DataGraph(object):
+ """This does not use the table version of plotly because it freezes up the browser for >60 rows. Instead, we use
+ pandas df HTML representation."""
+ def __init__(self, display=None):
+ if display is None:
+ self.display = IpythonDisplay()
+ else:
+ self.display = display
+
+ def render(self, df, encoding, output):
with output:
- iplot(table, show_link=False)
+ max_rows = pd.get_option("display.max_rows")
+ max_cols = pd.get_option("display.max_columns")
+ show_dimensions = pd.get_option("display.show_dimensions")
+
+ # This will hide the index column for pandas df.
+ self.display.html_to_ipython("""
+<style>
+ table.dataframe.hideme thead th:first-child {
+ display: none;
+ }
+ table.dataframe.hideme tbody th {
+ display: none;
+ }
+</style>
+""")
+ self.display.html_to_ipython(df.to_html(max_rows=max_rows, max_cols=max_cols,
+ show_dimensions=show_dimensions, notebook=True, classes="hideme"))
@staticmethod
def display_logarithmic_x_axis():
diff --git a/remotespark/datawidgets/plotlygraphs/piegraph.py b/remotespark/datawidgets/plotlygraphs/piegraph.py
index d8caf93..ecd881c 100644
--- a/remotespark/datawidgets/plotlygraphs/piegraph.py
+++ b/remotespark/datawidgets/plotlygraphs/piegraph.py
@@ -4,16 +4,29 @@
from plotly.graph_objs import Pie, Figure, Data
from plotly.offline import iplot
+import remotespark.utils.configuration as conf
+
class PieGraph(object):
@staticmethod
def render(df, encoding, output):
- series = df.groupby([encoding.x]).size()
- data = [Pie(values=series.values.tolist(), labels=series.index.tolist())]
+ values, labels = PieGraph._get_x_values_labels(df, encoding)
+ max_slices_pie_graph = conf.max_slices_pie_graph()
with output:
- fig = Figure(data=Data(data))
- iplot(fig, show_link=False)
+ # There's performance issues with a large amount of slices.
+ # 1500 rows crash the browser.
+ # 500 rows take ~15 s.
+ # 100 rows is almost automatic.
+ if len(values) > max_slices_pie_graph:
+ print("There's {} values in your pie graph, which would render the graph unresponsive.\n"
+ "Please select another X with at most {} possible values."
+ .format(len(values), max_slices_pie_graph))
+ else:
+ data = [Pie(values=values, labels=labels)]
+
+ fig = Figure(data=Data(data))
+ iplot(fig, show_link=False)
@staticmethod
def display_logarithmic_x_axis():
@@ -32,5 +45,6 @@ class PieGraph(object):
return False
@staticmethod
- def _get_x_values(df, encoding):
- return df[encoding.x].tolist()
+ def _get_x_values_labels(df, encoding):
+ series = df.groupby([encoding.x]).size()
+ return series.values.tolist(), series.index.tolist()
diff --git a/remotespark/default_config.json b/remotespark/default_config.json
index 9f9a410..5a9d09b 100644
--- a/remotespark/default_config.json
+++ b/remotespark/default_config.json
@@ -55,5 +55,6 @@
},
"use_auto_viz": true,
- "max_results_sql": 800
+ "max_results_sql": 2500,
+ "max_slices_pie_graph": 100
}
diff --git a/remotespark/utils/configuration.py b/remotespark/utils/configuration.py
index 0d1a311..e9abc9e 100644
--- a/remotespark/utils/configuration.py
+++ b/remotespark/utils/configuration.py
@@ -176,9 +176,14 @@ def ignore_ssl_errors():
@_override
def use_auto_viz():
- return False
+ return True
@_override
def max_results_sql():
- return 800
+ return 2500
+
+
+@_override
+def max_slices_pie_graph():
+ return 100
diff --git a/remotespark/utils/ipythondisplay.py b/remotespark/utils/ipythondisplay.py
new file mode 100644
index 0000000..0615aea
--- /dev/null
+++ b/remotespark/utils/ipythondisplay.py
@@ -0,0 +1,11 @@
+from IPython.core.display import display, HTML
+
+
+class IpythonDisplay(object):
+ @staticmethod
+ def display_to_ipython(to_display):
+ display(to_display)
+
+ @staticmethod
+ def html_to_ipython(to_display):
+ IpythonDisplay.display_to_ipython(HTML(to_display))
| Incorrect visualizations on some sample data
Ran the following code:
hvac = sc.textFile('wasb:///HdiSamples/HdiSamples/SensorSampleData/hvac/HVAC.csv')
from pyspark.sql import Row
Doc = Row("TargetTemp", "ActualTemp", "System", "SystemAge", "BuildingID")
def parseDocument(line):
values = [str(x) for x in line.split(',')]
return Doc(values[2], values[3], values[4], values[5], values[6])
documents = hvac.filter(lambda s: "Date" not in s).map(parseDocument)
df = sqlContext.createDataFrame(documents)
df.registerTempTable('data')
and then
%select * from data limit 100
The visualizations, at least for the pie graphs, are wrong. Screenshot:

Clearly there is no building where the desired target temperature is 1.
| jupyter-incubator/sparkmagic | diff --git a/tests/datawidgetstests/test_plotlygraphs.py b/tests/datawidgetstests/test_plotlygraphs.py
index 6506b65..72b21b0 100644
--- a/tests/datawidgetstests/test_plotlygraphs.py
+++ b/tests/datawidgetstests/test_plotlygraphs.py
@@ -1,4 +1,5 @@
import pandas as pd
+from mock import MagicMock
from remotespark.datawidgets.plotlygraphs.graphbase import GraphBase
from remotespark.datawidgets.plotlygraphs.piegraph import PieGraph
@@ -65,6 +66,39 @@ def test_pie_graph_display_methods():
assert not PieGraph.display_logarithmic_y_axis()
+def test_pie_graph_get_values_labels():
+ records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': 12},
+ {u'buildingID': 1, u'date': u'6/1/13', u'temp_diff': 0},
+ {u'buildingID': 2, u'date': u'6/1/14', u'temp_diff': 11},
+ {u'buildingID': 0, u'date': u'6/1/15', u'temp_diff': 5},
+ {u'buildingID': 1, u'date': u'6/1/16', u'temp_diff': 19},
+ {u'buildingID': 2, u'date': u'6/1/17', u'temp_diff': 32}]
+ df = pd.DataFrame(records)
+ encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y="temp_diff", y_aggregation=Encoding.y_agg_sum)
+
+ values, labels = PieGraph._get_x_values_labels(df, encoding)
+
+ assert values == [2, 1, 1, 1, 1]
+ assert labels == ["6/1/13", "6/1/14", "6/1/15", "6/1/16", "6/1/17"]
+
+
+def test_data_graph_render():
+ records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': 12},
+ {u'buildingID': 1, u'date': u'6/1/13', u'temp_diff': 0},
+ {u'buildingID': 2, u'date': u'6/1/14', u'temp_diff': 11},
+ {u'buildingID': 0, u'date': u'6/1/15', u'temp_diff': 5},
+ {u'buildingID': 1, u'date': u'6/1/16', u'temp_diff': 19},
+ {u'buildingID': 2, u'date': u'6/1/17', u'temp_diff': 32}]
+ df = pd.DataFrame(records)
+ encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y="temp_diff", y_aggregation=Encoding.y_agg_sum)
+ display = MagicMock()
+
+ data = DataGraph(display)
+ data.render(df, encoding, MagicMock())
+
+ assert display.html_to_ipython.call_count == 2
+
+
def test_data_graph_display_methods():
assert not DataGraph.display_x()
assert not DataGraph.display_y()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 5
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==4.9.0
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
arrow==1.3.0
async-lru==2.0.5
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
comm==0.2.2
decorator==5.2.1
defusedxml==0.7.1
exceptiongroup==1.2.2
fastjsonschema==2.21.1
fqdn==1.5.1
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==4.1.1
ipython==4.0.0
ipython-genutils==0.2.0
ipywidgets==7.8.5
isoduration==20.11.0
Jinja2==3.1.6
json5==0.10.0
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter-lsp==2.2.5
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyter_server==2.15.0
jupyter_server_terminals==0.5.3
jupyterlab==4.1.5
jupyterlab_pygments==0.3.0
jupyterlab_server==2.27.3
jupyterlab_widgets==1.1.11
MarkupSafe==3.0.2
mistune==3.1.3
mock==5.2.0
narwhals==1.32.0
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nose==1.3.7
notebook==7.1.3
notebook_shim==0.2.4
numpy==2.0.2
overrides==7.7.0
packaging==24.2
pandas==2.2.3
pandocfilters==1.5.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==4.3.7
plotly==6.0.1
pluggy==1.5.0
prometheus_client==0.21.1
ptyprocess==0.7.0
pycparser==2.22
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
pytz==2025.2
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
-e git+https://github.com/jupyter-incubator/sparkmagic.git@e29c15bf11fbada311796c36e1f5c9d7091b2667#egg=remotespark
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
Send2Trash==1.8.3
simplegeneric==0.8.1
six==1.17.0
sniffio==1.3.1
soupsieve==2.6
terminado==0.18.1
tinycss2==1.4.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
uri-template==1.3.0
urllib3==2.3.0
webcolors==24.11.1
webencodings==0.5.1
websocket-client==1.8.0
widgetsnbextension==3.6.10
zipp==3.21.0
| name: sparkmagic
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==4.9.0
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- arrow==1.3.0
- async-lru==2.0.5
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- comm==0.2.2
- decorator==5.2.1
- defusedxml==0.7.1
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- fqdn==1.5.1
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==4.1.1
- ipython==4.0.0
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- isoduration==20.11.0
- jinja2==3.1.6
- json5==0.10.0
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyter-lsp==2.2.5
- jupyter-server==2.15.0
- jupyter-server-terminals==0.5.3
- jupyterlab==4.1.5
- jupyterlab-pygments==0.3.0
- jupyterlab-server==2.27.3
- jupyterlab-widgets==1.1.11
- markupsafe==3.0.2
- mistune==3.1.3
- mock==5.2.0
- narwhals==1.32.0
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nose==1.3.7
- notebook==7.1.3
- notebook-shim==0.2.4
- numpy==2.0.2
- overrides==7.7.0
- packaging==24.2
- pandas==2.2.3
- pandocfilters==1.5.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==4.3.7
- plotly==6.0.1
- pluggy==1.5.0
- prometheus-client==0.21.1
- ptyprocess==0.7.0
- pycparser==2.22
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pytz==2025.2
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- send2trash==1.8.3
- simplegeneric==0.8.1
- six==1.17.0
- sniffio==1.3.1
- soupsieve==2.6
- terminado==0.18.1
- tinycss2==1.4.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- uri-template==1.3.0
- urllib3==2.3.0
- webcolors==24.11.1
- webencodings==0.5.1
- websocket-client==1.8.0
- widgetsnbextension==3.6.10
- zipp==3.21.0
prefix: /opt/conda/envs/sparkmagic
| [
"tests/datawidgetstests/test_plotlygraphs.py::test_pie_graph_get_values_labels",
"tests/datawidgetstests/test_plotlygraphs.py::test_data_graph_render"
]
| [
"tests/datawidgetstests/test_plotlygraphs.py::test_graphbase_get_x_y_values"
]
| [
"tests/datawidgetstests/test_plotlygraphs.py::test_graph_base_display_methods",
"tests/datawidgetstests/test_plotlygraphs.py::test_pie_graph_display_methods",
"tests/datawidgetstests/test_plotlygraphs.py::test_data_graph_display_methods"
]
| []
| Modified BSD License | 378 | [
"remotespark/utils/configuration.py",
"remotespark/datawidgets/autovizwidget.py",
"remotespark/default_config.json",
"remotespark/datawidgets/plotlygraphs/datagraph.py",
"remotespark/datawidgets/plotlygraphs/piegraph.py",
"remotespark/utils/ipythondisplay.py"
]
| [
"remotespark/utils/configuration.py",
"remotespark/datawidgets/autovizwidget.py",
"remotespark/default_config.json",
"remotespark/datawidgets/plotlygraphs/datagraph.py",
"remotespark/datawidgets/plotlygraphs/piegraph.py",
"remotespark/utils/ipythondisplay.py"
]
|
|
jupyter-incubator__sparkmagic-121 | cb87d63ab3268c1c5bd63c9fc1f1e971d5a1fe31 | 2016-01-14 01:30:02 | cb87d63ab3268c1c5bd63c9fc1f1e971d5a1fe31 | diff --git a/remotespark/livyclientlib/livyclient.py b/remotespark/livyclientlib/livyclient.py
index 5559856..eab5eed 100644
--- a/remotespark/livyclientlib/livyclient.py
+++ b/remotespark/livyclientlib/livyclient.py
@@ -10,16 +10,15 @@ class LivyClient(object):
def __init__(self, session):
self.logger = Log("LivyClient")
-
- execute_timeout_seconds = conf.execute_timeout_seconds()
-
self._session = session
- self._session.create_sql_context()
- self._execute_timeout_seconds = execute_timeout_seconds
+ self._execute_timeout_seconds = conf.execute_timeout_seconds()
def __str__(self):
return str(self._session)
+ def start(self):
+ self._session.create_sql_context()
+
def serialize(self):
return self._session.get_state().to_dict()
diff --git a/remotespark/livyclientlib/sparkcontroller.py b/remotespark/livyclientlib/sparkcontroller.py
index 1254f48..fcc6ae6 100644
--- a/remotespark/livyclientlib/sparkcontroller.py
+++ b/remotespark/livyclientlib/sparkcontroller.py
@@ -72,6 +72,7 @@ class SparkController(object):
session.start()
livy_client = self.client_factory.build_client(session)
self.client_manager.add_client(name, livy_client)
+ livy_client.start()
def get_client_keys(self):
return self.client_manager.get_sessions_list()
| Shutdown pyspark kernel doesn't gaurantee that session has been deleted
Repro Steps:
- Open Pyspark kernel
- Do any operation, 1+1 for example
- shutdown it before getting the answer (while creating the SQL context & Hive one)
- SSH the cluster, you'll find the session still existing
For now, to delete it, you have to do that manually from ssh the cluster. | jupyter-incubator/sparkmagic | diff --git a/tests/test_livyclient.py b/tests/test_livyclient.py
index 57870fa..1a2931a 100644
--- a/tests/test_livyclient.py
+++ b/tests/test_livyclient.py
@@ -6,16 +6,23 @@ from remotespark.utils.utils import get_connection_string
from remotespark.utils.constants import Constants
-def test_create_sql_context_automatically():
+def test_doesnt_create_sql_context_automatically():
mock_spark_session = MagicMock()
LivyClient(mock_spark_session)
+ assert not mock_spark_session.create_sql_context.called
+
+def test_start_creates_sql_context():
+ mock_spark_session = MagicMock()
+ client = LivyClient(mock_spark_session)
+ client.start()
mock_spark_session.create_sql_context.assert_called_with()
def test_execute_code():
mock_spark_session = MagicMock()
client = LivyClient(mock_spark_session)
+ client.start()
command = "command"
client.execute(command)
@@ -28,6 +35,7 @@ def test_execute_code():
def test_execute_sql():
mock_spark_session = MagicMock()
client = LivyClient(mock_spark_session)
+ client.start()
command = "command"
client.execute_sql(command)
@@ -40,6 +48,7 @@ def test_execute_sql():
def test_execute_hive():
mock_spark_session = MagicMock()
client = LivyClient(mock_spark_session)
+ client.start()
command = "command"
client.execute_hive(command)
@@ -63,6 +72,7 @@ def test_serialize():
session.get_state.return_value = LivySessionState(session_id, connection_string, kind, sql_created)
client = LivyClient(session)
+ client.start()
serialized = client.serialize()
@@ -77,6 +87,7 @@ def test_serialize():
def test_close_session():
mock_spark_session = MagicMock()
client = LivyClient(mock_spark_session)
+ client.start()
client.close_session()
@@ -89,6 +100,7 @@ def test_kind():
language_mock = PropertyMock(return_value=kind)
type(mock_spark_session).kind = language_mock
client = LivyClient(mock_spark_session)
+ client.start()
l = client.kind
@@ -101,6 +113,7 @@ def test_session_id():
session_id_mock = PropertyMock(return_value=session_id)
type(mock_spark_session).id = session_id_mock
client = LivyClient(mock_spark_session)
+ client.start()
i = client.session_id
diff --git a/tests/test_sparkcontroller.py b/tests/test_sparkcontroller.py
index e522404..4aa9a12 100644
--- a/tests/test_sparkcontroller.py
+++ b/tests/test_sparkcontroller.py
@@ -41,7 +41,7 @@ def test_add_session():
name = "name"
properties = {"kind": "spark"}
connection_string = "url=http://location:port;username=name;password=word"
- client = "client"
+ client = MagicMock()
session = MagicMock()
client_factory.create_session = MagicMock(return_value=session)
client_factory.build_client = MagicMock(return_value=client)
@@ -51,6 +51,7 @@ def test_add_session():
client_factory.create_session.assert_called_once_with(connection_string, properties, "-1", False)
client_factory.build_client.assert_called_once_with(session)
client_manager.add_client.assert_called_once_with(name, client)
+ client.start.assert_called_once_with()
session.start.assert_called_once_with()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==4.9.0
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
arrow==1.3.0
async-lru==2.0.5
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
comm==0.2.2
decorator==5.2.1
defusedxml==0.7.1
exceptiongroup==1.2.2
fastjsonschema==2.21.1
fqdn==1.5.1
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==4.1.1
ipython==4.0.0
ipython-genutils==0.2.0
ipywidgets==7.8.5
isoduration==20.11.0
Jinja2==3.1.6
json5==0.10.0
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter-lsp==2.2.5
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyter_server==2.15.0
jupyter_server_terminals==0.5.3
jupyterlab==4.1.5
jupyterlab_pygments==0.3.0
jupyterlab_server==2.27.3
jupyterlab_widgets==1.1.11
MarkupSafe==3.0.2
mistune==3.1.3
mock==5.2.0
narwhals==1.32.0
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nose==1.3.7
notebook==7.1.3
notebook_shim==0.2.4
numpy==2.0.2
overrides==7.7.0
packaging==24.2
pandas==2.2.3
pandocfilters==1.5.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==4.3.7
plotly==6.0.1
pluggy==1.5.0
prometheus_client==0.21.1
ptyprocess==0.7.0
pycparser==2.22
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
pytz==2025.2
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
-e git+https://github.com/jupyter-incubator/sparkmagic.git@cb87d63ab3268c1c5bd63c9fc1f1e971d5a1fe31#egg=remotespark
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
Send2Trash==1.8.3
simplegeneric==0.8.1
six==1.17.0
sniffio==1.3.1
soupsieve==2.6
terminado==0.18.1
tinycss2==1.4.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
uri-template==1.3.0
urllib3==2.3.0
webcolors==24.11.1
webencodings==0.5.1
websocket-client==1.8.0
widgetsnbextension==3.6.10
zipp==3.21.0
| name: sparkmagic
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==4.9.0
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- arrow==1.3.0
- async-lru==2.0.5
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- comm==0.2.2
- decorator==5.2.1
- defusedxml==0.7.1
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- fqdn==1.5.1
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==4.1.1
- ipython==4.0.0
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- isoduration==20.11.0
- jinja2==3.1.6
- json5==0.10.0
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyter-lsp==2.2.5
- jupyter-server==2.15.0
- jupyter-server-terminals==0.5.3
- jupyterlab==4.1.5
- jupyterlab-pygments==0.3.0
- jupyterlab-server==2.27.3
- jupyterlab-widgets==1.1.11
- markupsafe==3.0.2
- mistune==3.1.3
- mock==5.2.0
- narwhals==1.32.0
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nose==1.3.7
- notebook==7.1.3
- notebook-shim==0.2.4
- numpy==2.0.2
- overrides==7.7.0
- packaging==24.2
- pandas==2.2.3
- pandocfilters==1.5.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==4.3.7
- plotly==6.0.1
- pluggy==1.5.0
- prometheus-client==0.21.1
- ptyprocess==0.7.0
- pycparser==2.22
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pytz==2025.2
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- send2trash==1.8.3
- simplegeneric==0.8.1
- six==1.17.0
- sniffio==1.3.1
- soupsieve==2.6
- terminado==0.18.1
- tinycss2==1.4.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- uri-template==1.3.0
- urllib3==2.3.0
- webcolors==24.11.1
- webencodings==0.5.1
- websocket-client==1.8.0
- widgetsnbextension==3.6.10
- zipp==3.21.0
prefix: /opt/conda/envs/sparkmagic
| [
"tests/test_livyclient.py::test_doesnt_create_sql_context_automatically",
"tests/test_livyclient.py::test_start_creates_sql_context",
"tests/test_livyclient.py::test_execute_code",
"tests/test_livyclient.py::test_execute_sql",
"tests/test_livyclient.py::test_execute_hive",
"tests/test_livyclient.py::test_serialize",
"tests/test_livyclient.py::test_close_session",
"tests/test_livyclient.py::test_kind",
"tests/test_livyclient.py::test_session_id"
]
| [
"tests/test_sparkcontroller.py::test_add_session",
"tests/test_sparkcontroller.py::test_add_session_skip",
"tests/test_sparkcontroller.py::test_delete_session",
"tests/test_sparkcontroller.py::test_cleanup",
"tests/test_sparkcontroller.py::test_run_cell",
"tests/test_sparkcontroller.py::test_get_client_keys",
"tests/test_sparkcontroller.py::test_get_all_sessions",
"tests/test_sparkcontroller.py::test_cleanup_endpoint",
"tests/test_sparkcontroller.py::test_delete_session_by_id_existent",
"tests/test_sparkcontroller.py::test_delete_session_by_id_non_existent"
]
| []
| []
| Modified BSD License | 379 | [
"remotespark/livyclientlib/livyclient.py",
"remotespark/livyclientlib/sparkcontroller.py"
]
| [
"remotespark/livyclientlib/livyclient.py",
"remotespark/livyclientlib/sparkcontroller.py"
]
|
|
joblib__joblib-297 | 4a9c63d7984bc5e630722b8bf72c0c720a6bd5c0 | 2016-01-14 10:30:19 | 40341615cc2600675ce7457d9128fb030f6f89fa | ogrisel: LGTM, can you please add a changelog entry and merge? | diff --git a/CHANGES.rst b/CHANGES.rst
index e08fc89..66531c1 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -4,6 +4,11 @@ Latest changes
Release 0.9.4
-------------
+Loïc Estève
+
+ FIX for raising non inheritable exceptions in a Parallel call. See
+ https://github.com/joblib/joblib/issues/269 for more details.
+
Alexandre Abadie
FIX joblib.hash error with mixed types sets and dicts containing mixed
diff --git a/joblib/my_exceptions.py b/joblib/my_exceptions.py
index 500fcd7..9d26cb5 100644
--- a/joblib/my_exceptions.py
+++ b/joblib/my_exceptions.py
@@ -64,14 +64,20 @@ def _mk_exception(exception, name=None):
this_exception = _exception_mapping[this_name]
else:
if exception is Exception:
- # We cannot create a subclass: we are already a trivial
- # subclass
+ # JoblibException is already a subclass of Exception. No
+ # need to use multiple inheritance
return JoblibException, this_name
- elif issubclass(exception, JoblibException):
- return JoblibException, JoblibException.__name__
- this_exception = type(
- this_name, (JoblibException, exception), {})
- _exception_mapping[this_name] = this_exception
+ try:
+ this_exception = type(
+ this_name, (JoblibException, exception), {})
+ _exception_mapping[this_name] = this_exception
+ except TypeError:
+ # This happens if "Cannot create a consistent method
+ # resolution order", e.g. because 'exception' is a
+ # subclass of JoblibException or 'exception' is not an
+ # acceptable base class
+ this_exception = JoblibException
+
return this_exception, this_name
| Error propagating exceptions on-non subclassable types
I was using joblib to run 8 optimizations using nlopt in parallel, and got this error:
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-138-0fe3676d9e18> in <module>()
31 for i in range(iterations):
32 pool = Parallel(n_jobs=max_jobs, verbose=5)
---> 33 out = pool(delayed(nlopt_optimization)(job_id) for job_id in range(max_jobs))
34 solutions.extend(out)
35
/home/federico/anaconda/lib/python2.7/site-packages/joblib/parallel.pyc in __call__(self, iterable)
658 # consumption.
659 self._iterating = False
--> 660 self.retrieve()
661 # Make sure that we get a last message telling us we are done
662 elapsed_time = time.time() - self._start_time
/home/federico/anaconda/lib/python2.7/site-packages/joblib/parallel.pyc in retrieve(self)
540 )
541 # Convert this to a JoblibException
--> 542 exception_type = _mk_exception(exception.etype)[0]
543 raise exception_type(report)
544 raise exception
/home/federico/anaconda/lib/python2.7/site-packages/joblib/my_exceptions.pyc in _mk_exception(exception, name)
67 this_exception = type(this_name, (exception, JoblibException),
68 dict(__repr__=JoblibException.__repr__,
---> 69 __str__=JoblibException.__str__),
70 )
71 _exception_mapping[this_name] = this_exception
TypeError: type 'nlopt.ForcedStop' is not an acceptable base type
```
My understanding is because a bunch of nlopt types are actually all generated using SWIG, and cannot be naturally inherited. Should there be a check before joblib tries creating a nice custom exception that the type can indeed be inherited from? | joblib/joblib | diff --git a/joblib/test/test_my_exceptions.py b/joblib/test/test_my_exceptions.py
index 7c396ca..b283434 100644
--- a/joblib/test/test_my_exceptions.py
+++ b/joblib/test/test_my_exceptions.py
@@ -42,6 +42,16 @@ def test_inheritance_special_cases():
assert_true(my_exceptions._mk_exception(exception)[0] is
my_exceptions.JoblibException)
+ # Non-inheritable exception classes should be mapped to
+ # JoblibException by _mk_exception. That can happen with classes
+ # generated with SWIG. See
+ # https://github.com/joblib/joblib/issues/269 for a concrete
+ # example.
+ non_inheritable_classes = [type(lambda: None), bool]
+ for exception in non_inheritable_classes:
+ assert_true(my_exceptions._mk_exception(exception)[0] is
+ my_exceptions.JoblibException)
+
def test__mk_exception():
# Check that _mk_exception works on a bunch of different exceptions
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"coverage",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
coverage==6.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/joblib/joblib.git@4a9c63d7984bc5e630722b8bf72c0c720a6bd5c0#egg=joblib
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: joblib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==6.2
- nose==1.3.7
prefix: /opt/conda/envs/joblib
| [
"joblib/test/test_my_exceptions.py::test_inheritance_special_cases"
]
| []
| [
"joblib/test/test_my_exceptions.py::test_inheritance",
"joblib/test/test_my_exceptions.py::test__mk_exception"
]
| []
| BSD 3-Clause "New" or "Revised" License | 380 | [
"CHANGES.rst",
"joblib/my_exceptions.py"
]
| [
"CHANGES.rst",
"joblib/my_exceptions.py"
]
|
OnroerendErfgoed__pyramid_urireferencer-11 | b9c5617a6f21cc081232826cee80fa7b2bf050e2 | 2016-01-14 15:13:21 | 067293d191dc9dd4f7c2554f71bf0c730786a872 | diff --git a/pyramid_urireferencer/models.py b/pyramid_urireferencer/models.py
index 8ef53f0..9bbd245 100644
--- a/pyramid_urireferencer/models.py
+++ b/pyramid_urireferencer/models.py
@@ -14,12 +14,13 @@ class RegistryResponse:
:param int count: How many references were found?
:param list applications: A list of application results.
'''
+
def __init__(self, query_uri, success, has_references, count, applications):
- self.query_uri = query_uri
- self.success = success
- self.has_references = has_references
- self.count = count
- self.applications = applications
+ self.query_uri = query_uri
+ self.success = success
+ self.has_references = has_references
+ self.count = count
+ self.applications = applications
@staticmethod
def load_from_json(data):
@@ -34,9 +35,19 @@ class RegistryResponse:
r.success = data['success']
r.has_references = data['has_references']
r.count = data['count']
- r.applications = [ApplicationResponse.load_from_json(a) for a in data['applications']] if data['applications'] is not None else None
+ r.applications = [ApplicationResponse.load_from_json(a) for a in data['applications']] if data[
+ 'applications'] is not None else None
return r
+ def to_json(self):
+ return {
+ "query_uri": self.query_uri,
+ "success": self.success,
+ "has_references": self.has_references,
+ "count": self.count,
+ "applications": [app.to_json() for app in self.applications]
+ }
+
class ApplicationResponse:
'''
@@ -52,14 +63,15 @@ class ApplicationResponse:
:param list items: A list of items that have a reference to the \
uri under survey. Limited to 5 items for performance reasons.
'''
+
def __init__(self, title, uri, service_url, success, has_references, count, items):
- self.title = title
- self.uri = uri
- self.service_url = service_url
- self.success = success
- self.has_references = has_references
- self.count = count
- self.items = items
+ self.title = title
+ self.uri = uri
+ self.service_url = service_url
+ self.success = success
+ self.has_references = has_references
+ self.count = count
+ self.items = items
@staticmethod
def load_from_json(data):
@@ -79,6 +91,17 @@ class ApplicationResponse:
r.items = [Item.load_from_json(a) for a in data['items']] if data['items'] is not None else None
return r
+ def to_json(self):
+ return {
+ "title": self.title,
+ "uri": self.uri,
+ "service_url": self.service_url,
+ "success": self.success,
+ "has_references": self.has_references,
+ "count": self.count,
+ "items": [item.to_json() for item in self.items]
+ }
+
class Item:
'''
@@ -87,6 +110,7 @@ class Item:
:param string title: Title of the item.
:param string uri: Uri of the item.
'''
+
def __init__(self, title, uri):
self.title = title
self.uri = uri
@@ -103,3 +127,9 @@ class Item:
i.uri = data['uri']
i.title = data['title']
return i
+
+ def to_json(self):
+ return {
+ "title": self.title,
+ "uri": self.uri
+ }
diff --git a/pyramid_urireferencer/protected_resources.py b/pyramid_urireferencer/protected_resources.py
index 2814254..db4556a 100644
--- a/pyramid_urireferencer/protected_resources.py
+++ b/pyramid_urireferencer/protected_resources.py
@@ -8,6 +8,7 @@ that might be used in external applications.
from pyramid.httpexceptions import (
HTTPInternalServerError,
HTTPConflict)
+from webob import Response
import pyramid_urireferencer
@@ -26,19 +27,41 @@ def protected_operation(fn):
:raises pyramid.httpexceptions.HTTPInternalServerError: Raised when we were
unable to check that the URI is no longer being used.
'''
+
def advice(parent_object, *args, **kw):
id = parent_object.request.matchdict['id']
referencer = pyramid_urireferencer.get_referencer(parent_object.request.registry)
uri = parent_object.uri_template.format(id)
registery_response = referencer.is_referenced(uri)
if registery_response.has_references:
- raise HTTPConflict(
- detail="Urireferencer: The uri {0} is still in use by other applications: {1}".
- format(uri, ', '.join([app_response.title for app_response in registery_response.applications
- if app_response.has_references])))
+ if parent_object.request.headers.get("Accept", None) == "application/json":
+ response = Response()
+ response.status_code = 409
+ response_json = {
+ "message": "The uri {0} is still in use by other applications. A total of {1} references have been found.".format(
+ uri, registery_response.count),
+ "errors": [],
+ "registry_response": registery_response.to_json()
+ }
+ for app_response in registery_response.applications:
+ if app_response.has_references:
+ error_string = "{0}: {1} references found, such as {2}"\
+ .format(app_response.uri,
+ app_response.count,
+ ', '.join([i.uri for i in app_response.items]))
+ response_json["errors"].append(error_string)
+ response.json_body = response_json
+ response.content_type = 'application/json'
+ return response
+ else:
+ raise HTTPConflict(
+ detail="Urireferencer: The uri {0} is still in use by other applications. A total of {1} references have been found in the following applications: {2}".
+ format(uri, registery_response.count,
+ ', '.join([app_response.title for app_response in registery_response.applications
+ if app_response.has_references])))
elif not registery_response.success:
raise HTTPInternalServerError(
- detail="Urireferencer: Something went wrong while retrieving references of the uri {0}".format(uri))
+ detail="Urireferencer: Something went wrong while retrieving references of the uri {0}".format(uri))
return fn(parent_object, *args, **kw)
- return advice
\ No newline at end of file
+ return advice
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 22621e2..10814ca 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -7,6 +7,7 @@ pytest-cov==2.1.0
webtest==2.0.18
httpretty==0.8.10
coveralls
+mock==1.3.0
#wheel
wheel==0.26.0
| Error message when a resource can't be deleted.
Currently, when the protected resources decorator finds that a resource is still in use, it raise a 409 Conflict with a text string as body.
The current error message looks like:
*Urireferencer: The uri https://id.erfgoed.net/actoren/1 is still in use by other applications: https://inventaris.onroerenderfgoed.be, https://besluiten.onroerenderfgoed.be*
I would like to see this changed to:
*The uri https://id.erfgoed.net/actoren/1 is still in use by other applications. A total of 8 references have been found in the following applications: https://inventaris.onroerenderfgoed.be, https://besluiten.onroerenderfgoed.be*
I would also like a more custom error message when request.accept = 'application/json'
```json
{
"message": "The uri https://id.erfgoed.net/actoren/1 is still in use by other applications. A total of 8 references have been found.",
"errors": [
"https://inventaris.onroerenderfgoed.be: 6 references found, such as https://id.erfgoed.net/erfgoedobjecten/56, https://id.erfgoed.net/aanduidingsobjecten/889, https://id.erfgoed.net/aanduidingsobjecten/12487, https://id.erfgoed.net/gebeurtenissen/965, https://id.erfgoed.net/themas/123.",
"https://besluiten.onroerenderfgoed.be: 2 references found, such as https://id.erfgoed.net/besluiten/1896, https://id.erfgoed.net/besluiten/23."
],
"registry_response": <Include the response the registry passed so the client can create custom error messages>
}
``` | OnroerendErfgoed/pyramid_urireferencer | diff --git a/tests/test_protected_resources.py b/tests/test_protected_resources.py
index dc47a81..1989235 100644
--- a/tests/test_protected_resources.py
+++ b/tests/test_protected_resources.py
@@ -2,23 +2,34 @@
import unittest
from pyramid import testing
from pyramid_urireferencer.protected_resources import protected_operation
-from pyramid_urireferencer.models import RegistryResponse
+from pyramid_urireferencer.models import RegistryResponse, Item, ApplicationResponse
from pyramid.httpexceptions import HTTPConflict, HTTPInternalServerError
+
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch # pragma: no cover
+
def get_app(nr):
- class Object(object):
- pass
- a = Object()
- a.title = 'App {0}'.format(nr)
- a.has_references = True if nr == 1 else False
+ items = []
+ if nr == 1:
+ items.append(Item(uri="https://dev-besluiten.onroerenderfgoed.be/besluiten/152", title="Mijn besluit"))
+ items.append(Item(uri="https://dev-besluiten.onroerenderfgoed.be/besluiten/154",
+ title="Vaststelling van de inventaris van het Bouwkundig Erfgoed op 28 november 2014"))
+ a = ApplicationResponse(
+ title='App {0}'.format(nr),
+ uri="https://dev-app-{0}.onroerenderfgoed.be/".format(nr),
+ service_url="https://dev-app-{0}.onroerenderfgoed.be/references".format(nr),
+ success=True,
+ has_references=True if nr == 1 else False,
+ count=2 if nr == 1 else 0,
+ items=items
+ )
return a
-class DummyParent(object):
+class DummyParent(object):
def __init__(self):
self.request = testing.DummyRequest()
config = testing.setUp(request=self.request)
@@ -37,7 +48,6 @@ class DummyParent(object):
class ProtectedTests(unittest.TestCase):
-
def setUp(self):
pass
@@ -52,7 +62,8 @@ class ProtectedTests(unittest.TestCase):
@patch('pyramid_urireferencer.protected_resources.pyramid_urireferencer.Referencer.is_referenced')
def test_protected_operation_409(self, is_referenced_mock):
dummy = DummyParent()
- is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', True, True, 10, [get_app(1), get_app(2)])
+ is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', True, True, 10,
+ [get_app(1), get_app(2)])
self.assertRaises(HTTPConflict, dummy.protected_dummy)
is_referenced_call = is_referenced_mock.mock_calls[0]
self.assertEqual('https://id.erfgoed.net/resources/1', is_referenced_call[1][0])
@@ -60,16 +71,32 @@ class ProtectedTests(unittest.TestCase):
@patch('pyramid_urireferencer.protected_resources.pyramid_urireferencer.Referencer.is_referenced')
def test_protected_operation_409_2(self, is_referenced_mock):
dummy = DummyParent()
- is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', False, True, 10, [get_app(1), get_app(2)])
+ is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', False, True, 10,
+ [get_app(1), get_app(2)])
self.assertRaises(HTTPConflict, dummy.protected_dummy)
is_referenced_call = is_referenced_mock.mock_calls[0]
self.assertEqual('https://id.erfgoed.net/resources/1', is_referenced_call[1][0])
+ @patch('pyramid_urireferencer.protected_resources.pyramid_urireferencer.Referencer.is_referenced')
+ def test_protected_operation_409_json(self, is_referenced_mock):
+ dummy = DummyParent()
+ dummy.request.headers = {"Accept": "application/json"}
+ is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', False, True, 2,
+ [get_app(1), get_app(2)])
+ res = dummy.protected_dummy()
+ self.assertEqual(409, res.status_code)
+ self.assertEqual(res.json_body["message"],
+ "The uri https://id.erfgoed.net/resources/1 is still in use by other applications. A total of 2 references have been found.")
+ self.assertEqual("application/json", res.content_type)
+
+ is_referenced_call = is_referenced_mock.mock_calls[0]
+ self.assertEqual('https://id.erfgoed.net/resources/1', is_referenced_call[1][0])
+
@patch('pyramid_urireferencer.protected_resources.pyramid_urireferencer.Referencer.is_referenced')
def test_protected_operation_500(self, is_referenced_mock):
dummy = DummyParent()
- is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', False, None, None, None)
+ is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', False, None, None,
+ None)
self.assertRaises(HTTPInternalServerError, dummy.protected_dummy)
is_referenced_call = is_referenced_mock.mock_calls[0]
self.assertEqual('https://id.erfgoed.net/resources/1', is_referenced_call[1][0])
-
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"webtest",
"httpretty",
"coveralls",
"wheel"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | beautifulsoup4==4.13.3
coverage==7.8.0
coveralls==4.0.1
docopt==0.6.2
exceptiongroup==1.2.2
httpretty==1.1.4
iniconfig==2.1.0
packaging==24.2
PasteDeploy==3.1.0
pluggy==1.5.0
pyramid==1.5.7
-e git+https://github.com/OnroerendErfgoed/pyramid_urireferencer.git@b9c5617a6f21cc081232826cee80fa7b2bf050e2#egg=pyramid_urireferencer
pytest==8.3.5
pytest-cov==6.0.0
repoze.lru==0.7
requests==2.7.0
soupsieve==2.6
tomli==2.2.1
translationstring==1.4
typing_extensions==4.13.0
venusian==3.1.1
waitress==3.0.2
WebOb==1.8.9
WebTest==3.0.4
zope.deprecation==5.1
zope.interface==7.2
| name: pyramid_urireferencer
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- beautifulsoup4==4.13.3
- coverage==7.8.0
- coveralls==4.0.1
- docopt==0.6.2
- exceptiongroup==1.2.2
- httpretty==1.1.4
- iniconfig==2.1.0
- packaging==24.2
- pastedeploy==3.1.0
- pluggy==1.5.0
- pyramid==1.5.7
- pytest==8.3.5
- pytest-cov==6.0.0
- repoze-lru==0.7
- requests==2.7.0
- soupsieve==2.6
- tomli==2.2.1
- translationstring==1.4
- typing-extensions==4.13.0
- venusian==3.1.1
- waitress==3.0.2
- webob==1.8.9
- webtest==3.0.4
- zope-deprecation==5.1
- zope-interface==7.2
prefix: /opt/conda/envs/pyramid_urireferencer
| [
"tests/test_protected_resources.py::ProtectedTests::test_protected_operation_409_json"
]
| []
| [
"tests/test_protected_resources.py::ProtectedTests::test_protected_operation",
"tests/test_protected_resources.py::ProtectedTests::test_protected_operation_409",
"tests/test_protected_resources.py::ProtectedTests::test_protected_operation_409_2",
"tests/test_protected_resources.py::ProtectedTests::test_protected_operation_500"
]
| []
| MIT License | 381 | [
"pyramid_urireferencer/protected_resources.py",
"requirements-dev.txt",
"pyramid_urireferencer/models.py"
]
| [
"pyramid_urireferencer/protected_resources.py",
"requirements-dev.txt",
"pyramid_urireferencer/models.py"
]
|
|
jupyter-incubator__sparkmagic-125 | ac43b2838efaae766a7071a79699b9b192899dd2 | 2016-01-16 02:37:09 | ac43b2838efaae766a7071a79699b9b192899dd2 | diff --git a/remotespark/livyclientlib/livyclient.py b/remotespark/livyclientlib/livyclient.py
index eab5eed..6d56323 100644
--- a/remotespark/livyclientlib/livyclient.py
+++ b/remotespark/livyclientlib/livyclient.py
@@ -22,6 +22,12 @@ class LivyClient(object):
def serialize(self):
return self._session.get_state().to_dict()
+ def get_logs(self):
+ try:
+ return True, self._session.logs
+ except ValueError as err:
+ return False, "{}".format(err)
+
def execute(self, commands):
self._session.wait_for_idle(self._execute_timeout_seconds)
return self._session.execute(commands)
diff --git a/remotespark/livyclientlib/livysession.py b/remotespark/livyclientlib/livysession.py
index 9993ad6..73ddcb3 100644
--- a/remotespark/livyclientlib/livysession.py
+++ b/remotespark/livyclientlib/livysession.py
@@ -112,16 +112,10 @@ class LivySession(object):
def kind(self):
return self._state.kind
- def refresh_status(self):
- (status, logs) = self._get_latest_status_and_logs()
-
- if status in Constants.possible_session_status:
- self._status = status
- self._logs = logs
- else:
- raise ValueError("Status '{}' not supported by session.".format(status))
-
- return self._status
+ @property
+ def logs(self):
+ self._refresh_logs()
+ return self._logs
@property
def http_client(self):
@@ -158,14 +152,14 @@ class LivySession(object):
Parameters:
seconds_to_wait : number of seconds to wait before giving up.
"""
- self.refresh_status()
+ self._refresh_status()
current_status = self._status
if current_status == Constants.idle_session_status:
return
if current_status in Constants.final_status:
error = "Session {} unexpectedly reached final status {}. See logs:\n{}"\
- .format(self.id, current_status, "\n".join(self._logs))
+ .format(self.id, current_status, "\n".join(self.logs))
self.logger.error(error)
raise LivyUnexpectedStatusError(error)
@@ -185,18 +179,31 @@ class LivySession(object):
def _statements_url(self):
return "/sessions/{}/statements".format(self.id)
- def _get_latest_status_and_logs(self):
- """Get current session state. Network call."""
- r = self._http_client.get("/sessions", [200])
- sessions = r.json()["sessions"]
- filtered_sessions = [s for s in sessions if s["id"] == int(self.id)]
+ def _refresh_status(self):
+ status = self._get_latest_status()
+
+ if status in Constants.possible_session_status:
+ self._status = status
+ else:
+ raise ValueError("Status '{}' not supported by session.".format(status))
+
+ return self._status
+
+ def _refresh_logs(self):
+ self._logs = self._get_latest_logs()
+
+ def _get_latest_status(self):
+ r = self._http_client.get("/sessions/{}".format(self.id), [200])
+ session = r.json()
- if len(filtered_sessions) != 1:
- raise ValueError("Expected one session of id {} and got {} sessions."
- .format(self.id, len(filtered_sessions)))
-
- session = filtered_sessions[0]
- return session['state'], session['log']
+ return session['state']
+
+ def _get_latest_logs(self):
+ r = self._http_client.get("/sessions/{}/log?from=0".format(self.id), [200])
+ log_array = r.json()['log']
+ logs = "\n".join(log_array)
+
+ return logs
def _get_statement_output(self, statement_id):
statement_running = True
diff --git a/remotespark/livyclientlib/sparkcontroller.py b/remotespark/livyclientlib/sparkcontroller.py
index 75b4854..df0924a 100644
--- a/remotespark/livyclientlib/sparkcontroller.py
+++ b/remotespark/livyclientlib/sparkcontroller.py
@@ -21,6 +21,10 @@ class SparkController(object):
else:
self.client_manager = ClientManager()
+ def get_logs(self, client_name=None):
+ client_to_use = self.get_client_by_name_or_default(client_name)
+ return client_to_use.get_logs()
+
def run_cell(self, cell, client_name=None):
client_to_use = self.get_client_by_name_or_default(client_name)
return client_to_use.execute(cell)
@@ -40,7 +44,7 @@ class SparkController(object):
session_list = [self.client_factory.create_session(self.ipython_display, connection_string, {"kind": s["kind"]}, s["id"])
for s in sessions]
for s in session_list:
- s.refresh_status()
+ s._refresh_status()
return session_list
def get_all_sessions_endpoint_info(self, connection_string):
diff --git a/remotespark/remotesparkmagics.py b/remotespark/remotesparkmagics.py
index 1a9687b..c763f16 100644
--- a/remotespark/remotesparkmagics.py
+++ b/remotespark/remotesparkmagics.py
@@ -39,7 +39,7 @@ class RemoteSparkMagics(Magics):
self.logger.debug("Will serialize to {}.".format(path_to_serialize))
- self.spark_controller = SparkController(serialize_path=path_to_serialize)
+ self.spark_controller = SparkController(self.ipython_display, serialize_path=path_to_serialize)
else:
self.logger.debug("Serialization NOT enabled.")
except KeyError:
@@ -69,6 +69,7 @@ class RemoteSparkMagics(Magics):
When the SQL context is used, the result will be a Pandas dataframe of a sample of the results.
If invoked with no subcommand, the cell will be executed against the specified session.
+
Subcommands
-----------
info
@@ -89,11 +90,14 @@ class RemoteSparkMagics(Magics):
e.g. `%%spark config {"driverMemory":"1000M", "executorCores":4}`
run
Run Spark code against a session.
- e.g. `%%spark -e testsession` will execute the cell code against the testsession previously created
- e.g. `%%spark -e testsession -c sql` will execute the SQL code against the testsession previously created
- e.g. `%%spark -e testsession -c sql -o my_var` will execute the SQL code against the testsession
+ e.g. `%%spark -s testsession` will execute the cell code against the testsession previously created
+ e.g. `%%spark -s testsession -c sql` will execute the SQL code against the testsession previously created
+ e.g. `%%spark -s testsession -c sql -o my_var` will execute the SQL code against the testsession
previously created and store the pandas dataframe created in the my_var variable in the
Python environment.
+ logs
+ Returns the logs for a given session.
+ e.g. `%%spark logs -s testsession` will return the logs for the testsession previously created
delete
Delete a Livy session. Argument is the name of the session to be deleted.
e.g. `%%spark delete defaultlivy`
@@ -107,81 +111,94 @@ class RemoteSparkMagics(Magics):
subcommand = args.command[0].lower()
- # info
- if subcommand == "info":
- if len(args.command) == 2:
- connection_string = args.command[1]
- info_sessions = self.spark_controller.get_all_sessions_endpoint_info(connection_string)
- self._print_endpoint_info(info_sessions)
- elif len(args.command) == 1:
- self._print_local_info()
- else:
- raise ValueError("Subcommand 'info' requires no value or a connection string to show all sessions. "
- "{}".format(usage))
- # config
- elif subcommand == "config":
- # Would normally do " ".join(args.command[1:]) but parse_argstring removes quotes...
- rest_of_line = user_input[7:]
- conf.override(conf.session_configs.__name__, json.loads(rest_of_line))
- # add
- elif subcommand == "add":
- if len(args.command) != 4 and len(args.command) != 5:
- raise ValueError("Subcommand 'add' requires three or four arguments. {}".format(usage))
-
- name = args.command[1].lower()
- language = args.command[2].lower()
- connection_string = args.command[3]
-
- if len(args.command) == 5:
- skip = args.command[4].lower() == "skip"
- else:
- skip = False
-
- properties = copy.deepcopy(conf.session_configs())
- properties["kind"] = self._get_livy_kind(language)
+ try:
+ # info
+ if subcommand == "info":
+ if len(args.command) == 2:
+ connection_string = args.command[1]
+ info_sessions = self.spark_controller.get_all_sessions_endpoint_info(connection_string)
+ self._print_endpoint_info(info_sessions)
+ elif len(args.command) == 1:
+ self._print_local_info()
+ else:
+ raise ValueError("Subcommand 'info' requires no value or a connection string to show all sessions.\n"
+ "{}".format(usage))
+ # config
+ elif subcommand == "config":
+ # Would normally do " ".join(args.command[1:]) but parse_argstring removes quotes...
+ rest_of_line = user_input[7:]
+ conf.override(conf.session_configs.__name__, json.loads(rest_of_line))
+ # add
+ elif subcommand == "add":
+ if len(args.command) != 4 and len(args.command) != 5:
+ raise ValueError("Subcommand 'add' requires three or four arguments.\n{}".format(usage))
- self.spark_controller.add_session(name, connection_string, skip, properties)
- # delete
- elif subcommand == "delete":
- if len(args.command) == 2:
name = args.command[1].lower()
- self.spark_controller.delete_session_by_name(name)
- elif len(args.command) == 3:
- connection_string = args.command[1]
- session_id = args.command[2]
- self.spark_controller.delete_session_by_id(connection_string, session_id)
- else:
- raise ValueError("Subcommand 'delete' requires a session name, or a connection string and id. {}"
- .format(usage))
- # cleanup
- elif subcommand == "cleanup":
- if len(args.command) == 2:
- connection_string = args.command[1]
- self.spark_controller.cleanup_endpoint(connection_string)
- elif len(args.command) == 1:
- self.spark_controller.cleanup()
- else:
- raise ValueError("Subcommand 'cleanup' requires no value or a connection string to clean up sessions. "
- "{}".format(usage))
- # run
- elif len(subcommand) == 0:
- if args.context == Constants.context_name_spark:
- (success, out) = self.spark_controller.run_cell(cell, args.session)
- if success:
- self.ipython_display.write(out)
+ language = args.command[2].lower()
+ connection_string = args.command[3]
+
+ if len(args.command) == 5:
+ skip = args.command[4].lower() == "skip"
+ else:
+ skip = False
+
+ properties = copy.deepcopy(conf.session_configs())
+ properties["kind"] = self._get_livy_kind(language)
+
+ self.spark_controller.add_session(name, connection_string, skip, properties)
+ # delete
+ elif subcommand == "delete":
+ if len(args.command) == 2:
+ name = args.command[1].lower()
+ self.spark_controller.delete_session_by_name(name)
+ elif len(args.command) == 3:
+ connection_string = args.command[1]
+ session_id = args.command[2]
+ self.spark_controller.delete_session_by_id(connection_string, session_id)
+ else:
+ raise ValueError("Subcommand 'delete' requires a session name or a connection string and id.\n{}"
+ .format(usage))
+ # cleanup
+ elif subcommand == "cleanup":
+ if len(args.command) == 2:
+ connection_string = args.command[1]
+ self.spark_controller.cleanup_endpoint(connection_string)
+ elif len(args.command) == 1:
+ self.spark_controller.cleanup()
else:
- self.ipython_display.send_error(out)
- elif args.context == Constants.context_name_sql:
- return self._execute_against_context_that_returns_df(self.spark_controller.run_cell_sql, cell,
- args.session, args.output)
- elif args.context == Constants.context_name_hive:
- return self._execute_against_context_that_returns_df(self.spark_controller.run_cell_hive, cell,
- args.session, args.output)
+ raise ValueError("Subcommand 'cleanup' requires no further values or a connection string to clean up "
+ "sessions.\n{}".format(usage))
+ # logs
+ elif subcommand == "logs":
+ if len(args.command) == 1:
+ (success, out) = self.spark_controller.get_logs(args.session)
+ if success:
+ self.ipython_display.write(out)
+ else:
+ self.ipython_display.send_error(out)
+ else:
+ raise ValueError("Subcommand 'logs' requires no further values.\n{}".format(usage))
+ # run
+ elif len(subcommand) == 0:
+ if args.context == Constants.context_name_spark:
+ (success, out) = self.spark_controller.run_cell(cell, args.session)
+ if success:
+ self.ipython_display.write(out)
+ else:
+ self.ipython_display.send_error(out)
+ elif args.context == Constants.context_name_sql:
+ return self._execute_against_context_that_returns_df(self.spark_controller.run_cell_sql, cell,
+ args.session, args.output)
+ elif args.context == Constants.context_name_hive:
+ return self._execute_against_context_that_returns_df(self.spark_controller.run_cell_hive, cell,
+ args.session, args.output)
+ else:
+ raise ValueError("Context '{}' not found".format(args.context))
+ # error
else:
- raise ValueError("Context '{}' not found".format(args.context))
- # error
- else:
- raise ValueError("Subcommand '{}' not found. {}".format(subcommand, usage))
+ raise ValueError("Subcommand '{}' not found. {}".format(subcommand, usage))
+ except ValueError as err:
+ self.ipython_display.send_error("{}".format(err))
def _execute_against_context_that_returns_df(self, method, cell, session, output_var):
try:
diff --git a/remotespark/sparkkernelbase.py b/remotespark/sparkkernelbase.py
index 626b74c..ab3dd5e 100644
--- a/remotespark/sparkkernelbase.py
+++ b/remotespark/sparkkernelbase.py
@@ -17,6 +17,7 @@ class SparkKernelBase(IPythonKernel):
info_command = "info"
delete_command = "delete"
clean_up_command = "cleanup"
+ logs_command = "logs"
force_flag = "f"
@@ -78,10 +79,10 @@ class SparkKernelBase(IPythonKernel):
if self._session_started:
if self.force_flag not in flags:
self._show_user_error("A session has already been started. In order to modify the Spark configura"
- "tion, please provide the '-f' flag at the beginning of the config magic:\n"
- "\te.g. `%config -f {}`\n\nNote that this will kill the current session and"
- " will create a new one with the configuration provided. All previously run "
- "commands in the session will be lost.")
+ "tion, please provide the '-f' flag at the beginning of the config magic:\n"
+ "\te.g. `%config -f {}`\n\nNote that this will kill the current session and"
+ " will create a new one with the configuration provided. All previously run "
+ "commands in the session will be lost.")
code_to_run = ""
else:
restart_session = True
@@ -118,6 +119,12 @@ class SparkKernelBase(IPythonKernel):
code_to_run = "%spark cleanup {}".format(self.connection_string)
return self._run_without_session(code_to_run, silent, store_history, user_expressions, allow_stdin)
+ elif subcommand == self.logs_command:
+ if self._session_started:
+ code_to_run = "%spark logs"
+ else:
+ code_to_run = "print('No logs yet.')"
+ return self._execute_cell(code_to_run, silent, store_history, user_expressions, allow_stdin)
else:
self._show_user_error("Magic '{}' not supported.".format(subcommand))
return self._run_without_session("", silent, store_history, user_expressions, allow_stdin)
@@ -145,7 +152,6 @@ ip.display_formatter.ipython_display_formatter.for_type_by_name('pandas.core.fra
def _start_session(self):
if not self._session_started:
self._session_started = True
- self._ipython_display.writeln('Starting Livy Session')
add_session_code = "%spark add {} {} {} skip".format(
self.client_name, self.session_language, self.connection_string)
@@ -266,4 +272,4 @@ ip.display_formatter.ipython_display_formatter.for_type_by_name('pandas.core.fra
error = conf.fatal_error_suggestion().format(self._fatal_error)
self._logger.error(error)
self._ipython_display.send_error(error)
- raise ValueError(self._fatal_error)
\ No newline at end of file
+ raise ValueError(self._fatal_error)
| Provide %logs for session and in wrapper kernels
This should query `/sessions/ID/log?from=0` and return output nicely formatted. | jupyter-incubator/sparkmagic | diff --git a/tests/test_livyclient.py b/tests/test_livyclient.py
index 1a2931a..b0d89c2 100644
--- a/tests/test_livyclient.py
+++ b/tests/test_livyclient.py
@@ -118,3 +118,27 @@ def test_session_id():
i = client.session_id
assert i == session_id
+
+
+def test_get_logs_returns_session_logs():
+ logs = "hi"
+ mock_spark_session = MagicMock()
+ mock_spark_session.logs = logs
+ client = LivyClient(mock_spark_session)
+
+ res, logs_r = client.get_logs()
+
+ assert res
+ assert logs_r == logs
+
+
+def test_get_logs_returns_false_with_value_error():
+ err = "err"
+ mock_spark_session = MagicMock()
+ type(mock_spark_session).logs = PropertyMock(side_effect=ValueError(err))
+ client = LivyClient(mock_spark_session)
+
+ res, logs_r = client.get_logs()
+
+ assert not res
+ assert logs_r == err
diff --git a/tests/test_livysession.py b/tests/test_livysession.py
index 72d3f58..3e576b3 100644
--- a/tests/test_livysession.py
+++ b/tests/test_livysession.py
@@ -31,14 +31,14 @@ class TestLivySession:
self.pi_result = "Pi is roughly 3.14336"
self.session_create_json = '{"id":0,"state":"starting","kind":"spark","log":[]}'
- self.ready_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"idle","kind":"spark","log":[""]}]}'
- self.error_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"error","kind":"spark","log":' \
- '[""]}]}'
- self.busy_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"busy","kind":"spark","log":[""]}]}'
+ self.ready_sessions_json = '{"id":0,"state":"idle","kind":"spark","log":[""]}'
+ self.error_sessions_json = '{"id":0,"state":"error","kind":"spark","log":[""]}'
+ self.busy_sessions_json = '{"id":0,"state":"busy","kind":"spark","log":[""]}'
self.post_statement_json = '{"id":0,"state":"running","output":null}'
self.running_statement_json = '{"total_statements":1,"statements":[{"id":0,"state":"running","output":null}]}'
self.ready_statement_json = '{"total_statements":1,"statements":[{"id":0,"state":"available","output":{"statu' \
's":"ok","execution_count":0,"data":{"text/plain":"Pi is roughly 3.14336"}}}]}'
+ self.log_json = '{"id":6,"from":0,"total":212,"log":["hi","hi"]}'
self.get_responses = []
self.post_responses = []
@@ -217,7 +217,7 @@ class TestLivySession:
http_client.post.assert_called_with(
"/sessions", [201], properties)
- def test_status_gets_latest(self):
+ def test_status_gets_latest_status(self):
http_client = MagicMock()
http_client.post.return_value = DummyResponse(201, self.session_create_json)
http_client.get.return_value = DummyResponse(200, self.ready_sessions_json)
@@ -229,11 +229,28 @@ class TestLivySession:
conf.load()
session.start()
- session.refresh_status()
+ session._refresh_status()
state = session._status
assert_equals("idle", state)
- http_client.get.assert_called_with("/sessions", [200])
+ http_client.get.assert_called_with("/sessions/0", [200])
+
+ def test_logs_gets_latest_logs(self):
+ http_client = MagicMock()
+ http_client.post.return_value = DummyResponse(201, self.session_create_json)
+ http_client.get.return_value = DummyResponse(200, self.log_json)
+ conf.override_all({
+ "status_sleep_seconds": 0.01,
+ "statement_sleep_seconds": 0.01
+ })
+ session = self._create_session(http_client=http_client)
+ conf.load()
+ session.start()
+
+ logs = session.logs
+
+ assert_equals("hi\nhi", logs)
+ http_client.get.assert_called_with("/sessions/0/log?from=0", [200])
def test_wait_for_idle_returns_when_in_state(self):
http_client = MagicMock()
@@ -253,7 +270,7 @@ class TestLivySession:
session.wait_for_idle(30)
- http_client.get.assert_called_with("/sessions", [200])
+ http_client.get.assert_called_with("/sessions/0", [200])
assert_equals(2, http_client.get.call_count)
@raises(LivyUnexpectedStatusError)
@@ -262,7 +279,8 @@ class TestLivySession:
http_client.post.return_value = DummyResponse(201, self.session_create_json)
self.get_responses = [DummyResponse(200, self.busy_sessions_json),
DummyResponse(200, self.busy_sessions_json),
- DummyResponse(200, self.error_sessions_json)]
+ DummyResponse(200, self.error_sessions_json),
+ DummyResponse(200, self.log_json)]
http_client.get.side_effect = self._next_response_get
conf.override_all({
diff --git a/tests/test_remotesparkmagics.py b/tests/test_remotesparkmagics.py
index e87e52f..ac9b96c 100644
--- a/tests/test_remotesparkmagics.py
+++ b/tests/test_remotesparkmagics.py
@@ -150,13 +150,15 @@ def test_cleanup_endpoint_command_parses():
mock_method.assert_called_once_with("conn_str")
-@raises(ValueError)
@with_setup(_setup, _teardown)
-def test_bad_command_throws_exception():
+def test_bad_command_writes_error():
line = "bad_command"
+ usage = "Please look at usage of %spark by executing `%spark?`."
magic.spark(line)
+ ipython_display.send_error.assert_called_once_with("Subcommand '{}' not found. {}".format(line, usage))
+
@with_setup(_setup, _teardown)
def test_run_cell_command_parses():
@@ -302,3 +304,33 @@ def test_run_sql_command_stores_variable_in_user_ns():
def test_get_livy_kind_covers_all_langs():
for lang in Constants.lang_supported:
RemoteSparkMagics._get_livy_kind(lang)
+
+
+@with_setup(_setup, _teardown)
+def test_logs_subcommand():
+ get_logs_method = MagicMock()
+ result_value = ""
+ get_logs_method.return_value = (True, result_value)
+ spark_controller.get_logs = get_logs_method
+
+ command = "logs -s"
+ name = "sessions_name"
+ line = " ".join([command, name])
+ cell = "cell code"
+
+ # Could get results
+ result = magic.spark(line, cell)
+
+ get_logs_method.assert_called_once_with(name)
+ assert result is None
+ ipython_display.write.assert_called_once_with(result_value)
+
+ # Could not get results
+ get_logs_method.reset_mock()
+ get_logs_method.return_value = (False, result_value)
+
+ result = magic.spark(line, cell)
+
+ get_logs_method.assert_called_once_with(name)
+ assert result is None
+ ipython_display.send_error.assert_called_once_with(result_value)
diff --git a/tests/test_sparkcontroller.py b/tests/test_sparkcontroller.py
index c1c97b8..3a8a6ae 100644
--- a/tests/test_sparkcontroller.py
+++ b/tests/test_sparkcontroller.py
@@ -179,3 +179,13 @@ def test_delete_session_by_id_non_existent():
assert len(create_session_method.mock_calls) == 0
assert len(session.delete.mock_calls) == 0
+
+
+@with_setup(_setup, _teardown)
+def test_get_logs():
+ chosen_client = MagicMock()
+ controller.get_client_by_name_or_default = MagicMock(return_value=chosen_client)
+
+ controller.get_logs()
+
+ chosen_client.get_logs.assert_called_with()
diff --git a/tests/test_sparkkernelbase.py b/tests/test_sparkkernelbase.py
index 02628f3..6bef40b 100644
--- a/tests/test_sparkkernelbase.py
+++ b/tests/test_sparkkernelbase.py
@@ -388,3 +388,21 @@ def test_register_auto_viz():
assert call("from remotespark.datawidgets.utils import display_dataframe\nip = get_ipython()\nip.display_formatter"
".ipython_display_formatter.for_type_by_name('pandas.core.frame', 'DataFrame', display_dataframe)",
True, False, None, False) in execute_cell_mock.mock_calls
+
+
+@with_setup(_setup(), _teardown())
+def test_logs_magic():
+ kernel._session_started = True
+
+ kernel.do_execute("%logs", False)
+
+ assert call("%spark logs", False, True, None, False) in execute_cell_mock.mock_calls
+
+
+@with_setup(_setup(), _teardown())
+def test_logs_magic_prints_without_session():
+ kernel._session_started = False
+
+ kernel.do_execute("%logs", False)
+
+ assert call("print('No logs yet.')", False, True, None, False) in execute_cell_mock.mock_calls
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 5
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r requirements.txt -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==4.9.0
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
arrow==1.3.0
async-lru==2.0.5
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
comm==0.2.2
decorator==5.2.1
defusedxml==0.7.1
exceptiongroup==1.2.2
fastjsonschema==2.21.1
fqdn==1.5.1
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==4.2.2
ipython==4.0.2
ipython-genutils==0.2.0
ipywidgets==7.8.5
isoduration==20.11.0
Jinja2==3.1.6
json5==0.10.0
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter-lsp==2.2.5
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyter_server==2.15.0
jupyter_server_terminals==0.5.3
jupyterlab==4.1.5
jupyterlab_pygments==0.3.0
jupyterlab_server==2.27.3
jupyterlab_widgets==1.1.11
MarkupSafe==3.0.2
mistune==3.1.3
mock==5.2.0
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nose==1.3.7
notebook==7.1.3
notebook_shim==0.2.4
numpy==2.0.2
overrides==7.7.0
packaging==24.2
pandas==2.2.3
pandocfilters==1.5.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==4.3.7
plotly==1.9.4
pluggy==1.5.0
prometheus_client==0.21.1
ptyprocess==0.7.0
pycparser==2.22
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
pytz==2025.2
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
-e git+https://github.com/jupyter-incubator/sparkmagic.git@ac43b2838efaae766a7071a79699b9b192899dd2#egg=remotespark
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
Send2Trash==1.8.3
simplegeneric==0.8.1
six==1.17.0
sniffio==1.3.1
soupsieve==2.6
terminado==0.18.1
tinycss2==1.4.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
uri-template==1.3.0
urllib3==2.3.0
webcolors==24.11.1
webencodings==0.5.1
websocket-client==1.8.0
widgetsnbextension==3.6.10
zipp==3.21.0
| name: sparkmagic
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==4.9.0
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- arrow==1.3.0
- async-lru==2.0.5
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- comm==0.2.2
- decorator==5.2.1
- defusedxml==0.7.1
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- fqdn==1.5.1
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==4.2.2
- ipython==4.0.2
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- isoduration==20.11.0
- jinja2==3.1.6
- json5==0.10.0
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyter-lsp==2.2.5
- jupyter-server==2.15.0
- jupyter-server-terminals==0.5.3
- jupyterlab==4.1.5
- jupyterlab-pygments==0.3.0
- jupyterlab-server==2.27.3
- jupyterlab-widgets==1.1.11
- markupsafe==3.0.2
- mistune==3.1.3
- mock==5.2.0
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nose==1.3.7
- notebook==7.1.3
- notebook-shim==0.2.4
- numpy==2.0.2
- overrides==7.7.0
- packaging==24.2
- pandas==2.2.3
- pandocfilters==1.5.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==4.3.7
- plotly==1.9.4
- pluggy==1.5.0
- prometheus-client==0.21.1
- ptyprocess==0.7.0
- pycparser==2.22
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pytz==2025.2
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- send2trash==1.8.3
- simplegeneric==0.8.1
- six==1.17.0
- sniffio==1.3.1
- soupsieve==2.6
- terminado==0.18.1
- tinycss2==1.4.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- uri-template==1.3.0
- urllib3==2.3.0
- webcolors==24.11.1
- webencodings==0.5.1
- websocket-client==1.8.0
- widgetsnbextension==3.6.10
- zipp==3.21.0
prefix: /opt/conda/envs/sparkmagic
| [
"tests/test_livyclient.py::test_get_logs_returns_session_logs",
"tests/test_livyclient.py::test_get_logs_returns_false_with_value_error"
]
| [
"tests/test_remotesparkmagics.py::test_info_command_parses",
"tests/test_remotesparkmagics.py::test_info_endpoint_command_parses",
"tests/test_remotesparkmagics.py::test_add_sessions_command_parses",
"tests/test_remotesparkmagics.py::test_add_sessions_command_extra_properties",
"tests/test_remotesparkmagics.py::test_delete_sessions_command_parses",
"tests/test_remotesparkmagics.py::test_cleanup_command_parses",
"tests/test_remotesparkmagics.py::test_cleanup_endpoint_command_parses",
"tests/test_remotesparkmagics.py::test_bad_command_writes_error",
"tests/test_remotesparkmagics.py::test_run_cell_command_parses",
"tests/test_remotesparkmagics.py::test_run_cell_command_writes_to_err",
"tests/test_remotesparkmagics.py::test_run_sql_command_parses",
"tests/test_remotesparkmagics.py::test_run_hive_command_parses",
"tests/test_remotesparkmagics.py::test_run_sql_command_returns_none_when_exception",
"tests/test_remotesparkmagics.py::test_run_hive_command_returns_none_when_exception",
"tests/test_remotesparkmagics.py::test_run_sql_command_stores_variable_in_user_ns",
"tests/test_remotesparkmagics.py::test_logs_subcommand",
"tests/test_sparkcontroller.py::test_add_session",
"tests/test_sparkcontroller.py::test_add_session_skip",
"tests/test_sparkcontroller.py::test_delete_session",
"tests/test_sparkcontroller.py::test_cleanup",
"tests/test_sparkcontroller.py::test_run_cell",
"tests/test_sparkcontroller.py::test_get_client_keys",
"tests/test_sparkcontroller.py::test_get_all_sessions",
"tests/test_sparkcontroller.py::test_cleanup_endpoint",
"tests/test_sparkcontroller.py::test_delete_session_by_id_existent",
"tests/test_sparkcontroller.py::test_delete_session_by_id_non_existent",
"tests/test_sparkcontroller.py::test_get_logs",
"tests/test_sparkkernelbase.py::test_set_config",
"tests/test_sparkkernelbase.py::test_do_execute_initializes_magics_if_not_run",
"tests/test_sparkkernelbase.py::test_magic_not_supported",
"tests/test_sparkkernelbase.py::test_info",
"tests/test_sparkkernelbase.py::test_delete_force",
"tests/test_sparkkernelbase.py::test_delete_not_force",
"tests/test_sparkkernelbase.py::test_cleanup_force",
"tests/test_sparkkernelbase.py::test_cleanup_not_force",
"tests/test_sparkkernelbase.py::test_call_spark",
"tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happened",
"tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happens_for_execution",
"tests/test_sparkkernelbase.py::test_call_spark_sql_new_line",
"tests/test_sparkkernelbase.py::test_call_spark_hive_new_line",
"tests/test_sparkkernelbase.py::test_register_auto_viz",
"tests/test_sparkkernelbase.py::test_logs_magic",
"tests/test_sparkkernelbase.py::test_logs_magic_prints_without_session"
]
| [
"tests/test_livyclient.py::test_doesnt_create_sql_context_automatically",
"tests/test_livyclient.py::test_start_creates_sql_context",
"tests/test_livyclient.py::test_execute_code",
"tests/test_livyclient.py::test_execute_sql",
"tests/test_livyclient.py::test_execute_hive",
"tests/test_livyclient.py::test_serialize",
"tests/test_livyclient.py::test_close_session",
"tests/test_livyclient.py::test_kind",
"tests/test_livyclient.py::test_session_id",
"tests/test_remotesparkmagics.py::test_get_livy_kind_covers_all_langs",
"tests/test_sparkkernelbase.py::test_get_config",
"tests/test_sparkkernelbase.py::test_get_config_not_set",
"tests/test_sparkkernelbase.py::test_initialize_magics",
"tests/test_sparkkernelbase.py::test_start_session",
"tests/test_sparkkernelbase.py::test_delete_session",
"tests/test_sparkkernelbase.py::test_shutdown_cleans_up"
]
| []
| Modified BSD License | 382 | [
"remotespark/remotesparkmagics.py",
"remotespark/livyclientlib/livyclient.py",
"remotespark/sparkkernelbase.py",
"remotespark/livyclientlib/livysession.py",
"remotespark/livyclientlib/sparkcontroller.py"
]
| [
"remotespark/remotesparkmagics.py",
"remotespark/livyclientlib/livyclient.py",
"remotespark/sparkkernelbase.py",
"remotespark/livyclientlib/livysession.py",
"remotespark/livyclientlib/sparkcontroller.py"
]
|
|
joshvillbrandt__wireless-9 | 03fe4987e50aae45132ad3f4c2f6cb3ff2263adc | 2016-01-17 14:02:20 | 7d62e873cb2c69185494a2ba037a9e1cc3a74e6f | diff --git a/wireless/Wireless.py b/wireless/Wireless.py
index d9bb653..389dee9 100644
--- a/wireless/Wireless.py
+++ b/wireless/Wireless.py
@@ -8,7 +8,8 @@ from time import sleep
def cmd(cmd):
return subprocess.Popen(
cmd, shell=True,
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read()
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT
+ ).stdout.read().decode()
# abstracts away wireless connection
| Python3 support
Traceback (most recent call last):
File "/home/xayon/.virtualenvs/smoothie/lib/python3.4/site-packages/rq/worker.py", line 568, in perform_job
rv = job.perform()
File "/home/xayon/.virtualenvs/smoothie/lib/python3.4/site-packages/rq/job.py", line 495, in perform
self._result = self.func(*self.args, **self.kwargs)
File "./smoothie/plugins/interfaces.py", line 28, in interfaces
return str(Interfaces())
File "./smoothie/plugins/__init__.py", line 37, in __init__
self.run()
File "./smoothie/plugins/__init__.py", line 78, in run
self.callback()
File "./smoothie/plugins/interfaces.py", line 20, in callback
ifaces = [a for a in Wireless().interfaces()
File "/home/xayon/.virtualenvs/smoothie/lib/python3.4/site-packages/wireless/Wireless.py", line 21, in __init__
self._driver_name = self._detectDriver()
File "/home/xayon/.virtualenvs/smoothie/lib/python3.4/site-packages/wireless/Wireless.py", line 47, in _detectDriver
if len(response) > 0 and 'not found' not in response:
TypeError: 'str' does not support the buffer interface
| joshvillbrandt/wireless | diff --git a/tests/TestWireless.py b/tests/TestWireless.py
index 0ca7ec0..941d8d3 100644
--- a/tests/TestWireless.py
+++ b/tests/TestWireless.py
@@ -2,6 +2,7 @@
import unittest
from wireless import Wireless
+from wireless.Wireless import cmd
class TestWireless(unittest.TestCase):
@@ -12,3 +13,26 @@ class TestWireless(unittest.TestCase):
def test_import(self):
# if this module loads, then the import worked...
self.assertTrue(hasattr(Wireless, 'connect'))
+
+
+class TestCMD(unittest.TestCase):
+ """
+ Tests against cmd function.
+ """
+ def setUp(self):
+ self.com = cmd('echo "test_ok"')
+ self.empty_com = cmd('echo -n')
+
+ def test_cmdcomparission(self):
+ """
+ Check if we can test against the output of
+ the current test command
+ """
+ self.assertTrue('test_ok' in self.com)
+
+ def test_cmdlen(self):
+ """
+ Check if the output is > 0 chars.
+ """
+ self.assertFalse(len(self.empty_com) > 0)
+ self.assertTrue(len(self.com) > 0)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"nose",
"flake8",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y pandoc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
flake8==7.2.0
iniconfig==2.1.0
mccabe==0.7.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pytest==8.3.5
tomli==2.2.1
-e git+https://github.com/joshvillbrandt/wireless.git@03fe4987e50aae45132ad3f4c2f6cb3ff2263adc#egg=wireless
| name: wireless
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- flake8==7.2.0
- iniconfig==2.1.0
- mccabe==0.7.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/wireless
| [
"tests/TestWireless.py::TestCMD::test_cmdcomparission"
]
| []
| [
"tests/TestWireless.py::TestWireless::test_import",
"tests/TestWireless.py::TestCMD::test_cmdlen"
]
| []
| Apache License 2.0 | 383 | [
"wireless/Wireless.py"
]
| [
"wireless/Wireless.py"
]
|
|
jupyter-incubator__sparkmagic-128 | c3fd225a599345175240d8e96673102aacf3f624 | 2016-01-19 04:02:31 | c3fd225a599345175240d8e96673102aacf3f624 | diff --git a/remotespark/wrapperkernel/codetransformers.py b/remotespark/wrapperkernel/codetransformers.py
index 41136ad..1cec56a 100644
--- a/remotespark/wrapperkernel/codetransformers.py
+++ b/remotespark/wrapperkernel/codetransformers.py
@@ -157,3 +157,14 @@ class LogsTransformer(UserCodeTransformerBase):
code_to_run = "print('No logs yet.')"
return code_to_run, error_to_show, begin_action, end_action, deletes_session
+
+
+class PythonTransformer(UserCodeTransformerBase):
+ def get_code_to_execute(self, session_started, connection_string, force, output_var, command):
+ error_to_show = None
+ code_to_run = command
+ begin_action = Constants.do_nothing_action
+ end_action = Constants.do_nothing_action
+ deletes_session = False
+
+ return code_to_run, error_to_show, begin_action, end_action, deletes_session
diff --git a/remotespark/wrapperkernel/sparkkernelbase.py b/remotespark/wrapperkernel/sparkkernelbase.py
index 60afcd6..583ed66 100644
--- a/remotespark/wrapperkernel/sparkkernelbase.py
+++ b/remotespark/wrapperkernel/sparkkernelbase.py
@@ -125,6 +125,8 @@ class SparkKernelBase(IPythonKernel):
return CleanUpTransformer(subcommand)
elif subcommand == UserCommandParser.logs_command:
return LogsTransformer(subcommand)
+ elif subcommand == UserCommandParser.local_command:
+ return PythonTransformer(subcommand)
else:
return NotSupportedTransformer(subcommand)
diff --git a/remotespark/wrapperkernel/usercommandparser.py b/remotespark/wrapperkernel/usercommandparser.py
index 378fb5a..538f8ff 100644
--- a/remotespark/wrapperkernel/usercommandparser.py
+++ b/remotespark/wrapperkernel/usercommandparser.py
@@ -12,6 +12,7 @@ class UserCommandParser(object):
delete_command = "delete"
clean_up_command = "cleanup"
logs_command = "logs"
+ local_command = "local"
def __init__(self):
"""Code can have a magic or no magic specified (specified with %word sign). If no magic is specified, %run will
| Expose %python for wrapper kernels
So that user can execute custom python code, maybe in conjunction with -o for %sql results. | jupyter-incubator/sparkmagic | diff --git a/tests/test_codetransformers.py b/tests/test_codetransformers.py
index c03390e..698a763 100644
--- a/tests/test_codetransformers.py
+++ b/tests/test_codetransformers.py
@@ -227,3 +227,17 @@ def test_logs_transformer_no_session():
assert_equals(begin_action, Constants.do_nothing_action)
assert_equals(end_action, Constants.do_nothing_action)
assert_equals(deletes_session, False)
+
+
+@with_setup(_setup, _teardown)
+def test_python_transformer():
+ transformer = PythonTransformer("command")
+
+ code_to_run, error_to_show, begin_action, end_action, deletes_session = \
+ transformer.get_code_to_execute(False, conn, False, None, code)
+
+ assert_equals(code, code_to_run)
+ assert error_to_show is None
+ assert_equals(begin_action, Constants.do_nothing_action)
+ assert_equals(end_action, Constants.do_nothing_action)
+ assert_equals(deletes_session, False)
diff --git a/tests/test_sparkkernelbase.py b/tests/test_sparkkernelbase.py
index b5e2e8f..4ec7916 100644
--- a/tests/test_sparkkernelbase.py
+++ b/tests/test_sparkkernelbase.py
@@ -129,6 +129,7 @@ def test_returns_right_transformer():
assert type(kernel._get_code_transformer(UserCommandParser.clean_up_command)) is CleanUpTransformer
assert type(kernel._get_code_transformer(UserCommandParser.logs_command)) is LogsTransformer
assert type(kernel._get_code_transformer("whatever")) is NotSupportedTransformer
+ assert type(kernel._get_code_transformer(UserCommandParser.local_command)) is PythonTransformer
@with_setup(_setup, _teardown)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"mkdir ~/.sparkmagic",
"cp remotespark/default_config.json ~/.sparkmagic/config.json"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==3.7.1
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
attrs==24.2.0
beautifulsoup4==4.13.3
bleach==6.0.0
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
charset-normalizer==3.4.1
comm==0.1.4
decorator==5.1.1
defusedxml==0.7.1
entrypoints==0.4
exceptiongroup==1.2.2
fastjsonschema==2.21.1
idna==3.10
importlib-metadata==6.7.0
importlib-resources==5.12.0
iniconfig==2.0.0
ipykernel==4.2.2
ipython==4.0.2
ipython-genutils==0.2.0
ipywidgets==7.8.5
Jinja2==3.1.6
jsonschema==4.17.3
jupyter-server==1.24.0
jupyter_client==7.4.9
jupyter_core==4.12.0
jupyterlab-pygments==0.2.2
jupyterlab_widgets==1.1.11
MarkupSafe==2.1.5
mistune==3.0.2
mock==5.2.0
nbclassic==1.2.0
nbclient==0.7.4
nbconvert==7.6.0
nbformat==5.8.0
nest-asyncio==1.6.0
nose==1.3.7
notebook==6.5.7
notebook_shim==0.2.4
numpy==1.21.6
packaging==24.0
pandas==1.3.5
pandocfilters==1.5.1
pexpect==4.9.0
pickleshare==0.7.5
pkgutil_resolve_name==1.3.10
plotly==1.9.4
pluggy==1.2.0
prometheus-client==0.17.1
ptyprocess==0.7.0
pycparser==2.21
Pygments==2.17.2
pyrsistent==0.19.3
pytest==7.4.4
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==26.2.1
-e git+https://github.com/jupyter-incubator/sparkmagic.git@c3fd225a599345175240d8e96673102aacf3f624#egg=remotespark
requests==2.31.0
Send2Trash==1.8.3
simplegeneric==0.8.1
six==1.17.0
sniffio==1.3.1
soupsieve==2.4.1
terminado==0.17.1
tinycss2==1.2.1
tomli==2.0.1
tornado==6.2
traitlets==5.9.0
typing_extensions==4.7.1
urllib3==2.0.7
webencodings==0.5.1
websocket-client==1.6.1
widgetsnbextension==3.6.10
zipp==3.15.0
| name: sparkmagic
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==3.7.1
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- attrs==24.2.0
- beautifulsoup4==4.13.3
- bleach==6.0.0
- cffi==1.15.1
- charset-normalizer==3.4.1
- comm==0.1.4
- decorator==5.1.1
- defusedxml==0.7.1
- entrypoints==0.4
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- idna==3.10
- importlib-metadata==6.7.0
- importlib-resources==5.12.0
- iniconfig==2.0.0
- ipykernel==4.2.2
- ipython==4.0.2
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jinja2==3.1.6
- jsonschema==4.17.3
- jupyter-client==7.4.9
- jupyter-core==4.12.0
- jupyter-server==1.24.0
- jupyterlab-pygments==0.2.2
- jupyterlab-widgets==1.1.11
- markupsafe==2.1.5
- mistune==3.0.2
- mock==5.2.0
- nbclassic==1.2.0
- nbclient==0.7.4
- nbconvert==7.6.0
- nbformat==5.8.0
- nest-asyncio==1.6.0
- nose==1.3.7
- notebook==6.5.7
- notebook-shim==0.2.4
- numpy==1.21.6
- packaging==24.0
- pandas==1.3.5
- pandocfilters==1.5.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pkgutil-resolve-name==1.3.10
- plotly==1.9.4
- pluggy==1.2.0
- prometheus-client==0.17.1
- ptyprocess==0.7.0
- pycparser==2.21
- pygments==2.17.2
- pyrsistent==0.19.3
- pytest==7.4.4
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==26.2.1
- requests==2.31.0
- send2trash==1.8.3
- simplegeneric==0.8.1
- six==1.17.0
- sniffio==1.3.1
- soupsieve==2.4.1
- terminado==0.17.1
- tinycss2==1.2.1
- tomli==2.0.1
- tornado==6.2
- traitlets==5.9.0
- typing-extensions==4.7.1
- urllib3==2.0.7
- webencodings==0.5.1
- websocket-client==1.6.1
- widgetsnbextension==3.6.10
- zipp==3.15.0
prefix: /opt/conda/envs/sparkmagic
| [
"tests/test_codetransformers.py::test_python_transformer",
"tests/test_sparkkernelbase.py::test_returns_right_transformer"
]
| []
| [
"tests/test_codetransformers.py::test_not_supported_transformer",
"tests/test_codetransformers.py::test_config_transformer_no_session",
"tests/test_codetransformers.py::test_config_transformer_session_no_flag",
"tests/test_codetransformers.py::test_config_transformer_session_flag",
"tests/test_codetransformers.py::test_spark_transformer",
"tests/test_codetransformers.py::test_sql_transformer",
"tests/test_codetransformers.py::test_sql_transformer_output_var",
"tests/test_codetransformers.py::test_hive_transformer",
"tests/test_codetransformers.py::test_info_transformer",
"tests/test_codetransformers.py::test_delete_transformer_no_force",
"tests/test_codetransformers.py::test_delete_transformer_force",
"tests/test_codetransformers.py::test_cleanup_transformer_no_force",
"tests/test_codetransformers.py::test_cleanup_transformer_force",
"tests/test_codetransformers.py::test_logs_transformer_session",
"tests/test_codetransformers.py::test_logs_transformer_no_session",
"tests/test_sparkkernelbase.py::test_get_config",
"tests/test_sparkkernelbase.py::test_get_config_not_set",
"tests/test_sparkkernelbase.py::test_get_config_not_set_empty_strings",
"tests/test_sparkkernelbase.py::test_initialize_magics",
"tests/test_sparkkernelbase.py::test_start_session",
"tests/test_sparkkernelbase.py::test_delete_session",
"tests/test_sparkkernelbase.py::test_instructions_from_parser_are_passed_to_transformer",
"tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_code",
"tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_error",
"tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_deletes_session",
"tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_begin_start",
"tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_begin_delete",
"tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_end_start",
"tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_end_delete",
"tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_begin_start_end_delete",
"tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happened",
"tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happens_for_execution",
"tests/test_sparkkernelbase.py::test_shutdown_cleans_up",
"tests/test_sparkkernelbase.py::test_register_auto_viz"
]
| []
| Modified BSD License | 389 | [
"remotespark/wrapperkernel/codetransformers.py",
"remotespark/wrapperkernel/usercommandparser.py",
"remotespark/wrapperkernel/sparkkernelbase.py"
]
| [
"remotespark/wrapperkernel/codetransformers.py",
"remotespark/wrapperkernel/usercommandparser.py",
"remotespark/wrapperkernel/sparkkernelbase.py"
]
|
|
wong2__pick-3 | 38bab1f33ff03936906435d5458765493e4c2c1c | 2016-01-20 08:40:03 | 38bab1f33ff03936906435d5458765493e4c2c1c | diff --git a/example/scroll.py b/example/scroll.py
new file mode 100644
index 0000000..6f34b22
--- /dev/null
+++ b/example/scroll.py
@@ -0,0 +1,10 @@
+#-*-coding:utf-8-*-
+
+from __future__ import print_function
+
+from pick import pick
+
+title = 'Select:'
+options = ['foo.bar%s.baz' % x for x in range(1, 71)]
+option, index = pick(options, title)
+print(option, index)
diff --git a/pick/__init__.py b/pick/__init__.py
index 1f8ccfc..f004b84 100644
--- a/pick/__init__.py
+++ b/pick/__init__.py
@@ -49,14 +49,13 @@ class Picker(object):
"""
return self.options[self.index], self.index
- def draw(self):
- """draw the curses ui on the screen"""
- self.screen.clear()
-
- x, y = 1, 1
+ def get_title_lines(self):
if self.title:
- self.screen.addstr(y, x, self.title)
- y += 2
+ return self.title.split('\n') + ['']
+ return []
+
+ def get_option_lines(self):
+ lines = []
for index, option in enumerate(self.options):
if index == self.index:
@@ -64,13 +63,53 @@ class Picker(object):
else:
prefix = len(self.indicator) * ' '
line = '{0} {1}'.format(prefix, option)
+ lines.append(line)
+
+ return lines
+
+ def get_lines(self):
+ title_lines = self.get_title_lines()
+ option_lines = self.get_option_lines()
+ lines = title_lines + option_lines
+ current_line = self.index + len(title_lines) + 1
+ return lines, current_line
+
+ def draw(self):
+ """draw the curses ui on the screen, handle scroll if needed"""
+ self.screen.clear()
+
+ x, y = 1, 1 # start point
+ max_y, max_x = self.screen.getmaxyx()
+ max_rows = max_y - y # the max rows we can draw
+
+ lines, current_line = self.get_lines()
+
+ # calculate how many lines we should scroll, relative to the top
+ scroll_top = getattr(self, 'scroll_top', 0)
+ if current_line <= scroll_top:
+ scroll_top = 0
+ elif current_line - scroll_top > max_rows:
+ scroll_top = current_line - max_rows
+ self.scroll_top = scroll_top
+
+ lines_to_draw = lines[scroll_top:scroll_top+max_rows]
+
+ for line in lines_to_draw:
self.screen.addstr(y, x, line)
y += 1
self.screen.refresh()
- def start(self):
- return curses.wrapper(self.run_loop)
+ def run_loop(self):
+ while True:
+ self.draw()
+ c = self.screen.getch()
+ if c in KEYS_UP:
+ self.move_up()
+ elif c in KEYS_DOWN:
+ self.move_down()
+ elif c in KEYS_ENTER:
+ return self.get_selected()
def config_curses(self):
# use the default colors of the terminal
@@ -78,21 +117,13 @@ class Picker(object):
# hide the cursor
curses.curs_set(0)
- def run_loop(self, screen):
- self.config_curses()
+ def _start(self, screen):
self.screen = screen
- self.draw()
+ self.config_curses()
+ return self.run_loop()
- while True:
- c = self.screen.getch()
- if c in KEYS_UP:
- self.move_up()
- self.draw()
- elif c in KEYS_DOWN:
- self.move_down()
- self.draw()
- elif c in KEYS_ENTER:
- return self.get_selected()
+ def start(self):
+ return curses.wrapper(self._start)
def pick(options, title=None, indicator='*', default_index=0):
| Long lists issue
I have an issue with `pick` while passing a long list, here is an example you can try it:
```python
from pick import pick
title = 'Select: '
options = ['foo.bar1.baz', 'foo.bar2.baz', 'foo.bar3.baz', 'foo.bar4.baz', 'foo.bar5.baz', 'foo.bar6.baz', 'foo.bar7.baz','foo.bar8.baz', 'foo.bar9.baz', 'foo.bar10.baz','foo.bar11.baz', 'foo.bar12.baz', 'foo.bar13.baz', 'foo.bar14.baz', 'foo.bar15.baz', 'foo.bar16.baz', 'foo.bar17.baz','foo.bar18.baz', 'foo.bar19.baz', 'foo.bar20.baz','foo.bar21.baz', 'foo.bar22.baz', 'foo.bar23.baz', 'foo.bar24.baz', 'foo.bar25.baz', 'foo.bar26.baz', 'foo.bar27.baz','foo.bar28.baz', 'foo.bar29.baz', 'foo.bar30.baz','foo.bar31.baz', 'foo.bar32.baz', 'foo.bar33.baz', 'foo.bar34.baz', 'foo.bar35.baz', 'foo.bar36.baz', 'foo.bar37.baz','foo.bar38.baz', 'foo.bar39.baz', 'foo.bar40.baz','foo.bar41.baz', 'foo.bar42.baz', 'foo.bar43.baz', 'foo.bar44.baz', 'foo.bar45.baz', 'foo.bar46.baz', 'foo.bar47.baz','foo.bar48.baz', 'foo.bar49.baz', 'foo.bar50.baz','foo.bar51.baz', 'foo.bar52.baz', 'foo.bar53.baz', 'foo.bar54.baz', 'foo.bar55.baz', 'foo.bar56.baz', 'foo.bar57.baz','foo.bar58.baz', 'foo.bar59.baz', 'foo.bar60.baz']
option, index = pick(options, title)
```
the result will be:
```
Traceback (most recent call last):
File "pick_test.py", line 5, in <module>
option, index = pick(options, title)
File "/usr/lib/python2.7/site-packages/pick/__init__.py", line 109, in pick
return picker.start()
File "/usr/lib/python2.7/site-packages/pick/__init__.py", line 73, in start
return curses.wrapper(self.run_loop)
File "/usr/lib64/python2.7/curses/wrapper.py", line 43, in wrapper
return func(stdscr, *args, **kwds)
File "/usr/lib/python2.7/site-packages/pick/__init__.py", line 84, in run_loop
self.draw()
File "/usr/lib/python2.7/site-packages/pick/__init__.py", line 67, in draw
self.screen.addstr(y, x, line)
_curses.error: addstr() returned ERR
``` | wong2/pick | diff --git a/tests/test_pick.py b/tests/test_pick.py
index f5b7dd4..21eecc4 100644
--- a/tests/test_pick.py
+++ b/tests/test_pick.py
@@ -1,12 +1,12 @@
#-*-coding:utf-8-*-
import unittest
-from pick import pick, Picker
+from pick import Picker
class TestPick(unittest.TestCase):
- def test_pick(self):
+ def test_move_up_down(self):
title = 'Please choose an option: '
options = ['option1', 'option2', 'option3']
picker = Picker(options, title)
@@ -16,6 +16,26 @@ class TestPick(unittest.TestCase):
picker.move_down()
assert picker.get_selected() == ('option2', 1)
+ def test_default_index(self):
+ title = 'Please choose an option: '
+ options = ['option1', 'option2', 'option3']
+ picker = Picker(options, title, default_index=1)
+ assert picker.get_selected() == ('option2', 1)
+
+ def test_get_lines(self):
+ title = 'Please choose an option: '
+ options = ['option1', 'option2', 'option3']
+ picker = Picker(options, title, indicator='*')
+ lines, current_line = picker.get_lines()
+ assert lines == [title, '', '* option1', ' option2', ' option3']
+ assert current_line == 3
+
+ def test_no_title(self):
+ options = ['option1', 'option2', 'option3']
+ picker = Picker(options)
+ lines, current_line = picker.get_lines()
+ assert current_line == 1
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
nose==1.3.7
packaging @ file:///croot/packaging_1734472117206/work
-e git+https://github.com/wong2/pick.git@38bab1f33ff03936906435d5458765493e4c2c1c#egg=pick
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: pick
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
prefix: /opt/conda/envs/pick
| [
"tests/test_pick.py::TestPick::test_get_lines",
"tests/test_pick.py::TestPick::test_no_title"
]
| []
| [
"tests/test_pick.py::TestPick::test_default_index",
"tests/test_pick.py::TestPick::test_move_up_down"
]
| []
| MIT License | 390 | [
"example/scroll.py",
"pick/__init__.py"
]
| [
"example/scroll.py",
"pick/__init__.py"
]
|
|
phobson__paramnormal-24 | 9e0395b03e042d0ad4f0d6be12c768103233dc27 | 2016-01-21 15:07:30 | 9e0395b03e042d0ad4f0d6be12c768103233dc27 | diff --git a/docs/tutorial/fitting.ipynb b/docs/tutorial/fitting.ipynb
index b5434fa..364bccb 100644
--- a/docs/tutorial/fitting.ipynb
+++ b/docs/tutorial/fitting.ipynb
@@ -11,6 +11,17 @@
"Again, we'll demonstrate with a lognormal distribution and compare parameter estimatation with scipy."
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "%matplotlib inline"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
@@ -28,9 +39,7 @@
"clean_bkgd = {'axes.facecolor':'none', 'figure.facecolor':'none'}\n",
"seaborn.set(style='ticks', rc=clean_bkgd)\n",
"\n",
- "import paramnormal\n",
- "\n",
- "%matplotlib inline"
+ "import paramnormal"
]
},
{
diff --git a/docs/tutorial/overview.ipynb b/docs/tutorial/overview.ipynb
index 26e7506..d3f0fc6 100644
--- a/docs/tutorial/overview.ipynb
+++ b/docs/tutorial/overview.ipynb
@@ -11,6 +11,17 @@
"The main problem that `paramnormal` is trying to solve is that sometimes, creating a probability distribution using these parameters (and others) in `scipy.stats` can be confusing. Also the parameters in `numpy.random` can be inconsistently named (admittedly, just a minor inconvenience). "
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "%matplotlib inline"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
@@ -147,11 +158,7 @@
},
"outputs": [],
"source": [
- "distributions = filter(\n",
- " lambda d: not d.startswith('__') and d not in ['BaseDist_Mixin', 'stats', 'namedtuple', 'numpy', 'utils'], \n",
- " dir(paramnormal.paramnormal)\n",
- ")\n",
- "for d in distributions:\n",
+ "for d in paramnormal.paramnormal.__all__:\n",
" print(d)"
]
},
diff --git a/paramnormal/paramnormal.py b/paramnormal/paramnormal.py
index 6faae1a..7228ea0 100644
--- a/paramnormal/paramnormal.py
+++ b/paramnormal/paramnormal.py
@@ -439,7 +439,7 @@ class beta(BaseDist_Mixin):
>>> # silly fake data
>>> numpy.random.seed(0)
- >>> pn.beta(alpha=2, beta=5).rvs(size=37)
+ >>> data = pn.beta(alpha=2, beta=5).rvs(size=37)
>>> # pretend `data` is unknown and we want to fit a dist. to it
>>> pn.beta.fit(data)
params(alpha=1.6784891179355, beta=4.2459121691279, loc=0, scale=1)
@@ -526,7 +526,7 @@ class gamma(BaseDist_Mixin):
>>> # silly fake data
>>> numpy.random.seed(0)
- >>> pn.gamma(k=2, θ=5).rvs(size=37)
+ >>> data = pn.gamma(k=2, θ=5).rvs(size=37)
>>> # pretend `data` is unknown and we want to fit a dist. to it
>>> pn.gamma.fit(data)
params(k=1.3379069223213478, loc=0, theta=7.5830062081633587)
@@ -607,7 +607,7 @@ class chi_squared(BaseDist_Mixin):
>>> # silly fake data
>>> numpy.random.seed(0)
- >>> pn.chi_squared(k=2).rvs(size=37)
+ >>> data = pn.chi_squared(k=2).rvs(size=37)
>>> # pretend `data` is unknown and we want to fit a dist. to it
>>> pn.chi_squared.fit(data)
params(k=2.2668945312500028, loc=0, scale=1)
@@ -652,8 +652,8 @@ class pareto(BaseDist_Mixin):
Use scipy's maximum likelihood estimation methods to estimate
the parameters of the data's distribution. By default, `loc`
and `scale` are fixed at 0 and 1, respectively. Thus, only
- `alpha` and `beta` are estimated unless `loc` or `scale` are
- explicitly set to `None`.
+ `alpha` is estimated unless `loc` or `scale` are explicitly
+ set to `None`.
from_params(params)
Create a new distribution instances from the namedtuple result
of the :meth:`~fit` method.
@@ -668,7 +668,7 @@ class pareto(BaseDist_Mixin):
respectively.
.. note ::
- When fitting a beta distribution to a dataset, this will
+ When fitting a pareto distribution to a dataset, this will
be fixed at its default value unless you explicitly set
it to other values. Set to `None` if you wish that it be
estimated entirely from scratch.
@@ -688,7 +688,7 @@ class pareto(BaseDist_Mixin):
>>> # silly fake data
>>> numpy.random.seed(0)
- >>> pn.pareto(alpha=2).rvs(size=37)
+ >>> data = pn.pareto(alpha=2).rvs(size=37)
>>> # pretend `data` is unknown and we want to fit a dist. to it
>>> pn.pareto.fit(data)
params(alpha=1.7850585937500019, loc=0, scale=1)
@@ -699,7 +699,7 @@ class pareto(BaseDist_Mixin):
References
----------
- http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chi2.html
+ http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pareto.html
https://en.wikipedia.org/wiki/pareto_distribution
See Also
@@ -721,3 +721,95 @@ class pareto(BaseDist_Mixin):
else:
key = 'b'
return {key: alpha, loc_key: loc, scale_key: scale}
+
+
+class exponential(BaseDist_Mixin):
+ """
+ Create and fit data to an exponential distribution.
+
+ Methods
+ -------
+ fit(data, **guesses)
+ Use scipy's maximum likelihood estimation methods to estimate
+ the parameters of the data's distribution. By default, `loc`
+ isfixed at 0. Thus, only `lambda_` is estimated unless `loc` is
+ explicitly set to `None`.
+ from_params(params)
+ Create a new distribution instances from the namedtuple result
+ of the :meth:`~fit` method.
+
+ Parameters
+ ----------
+ lambda_ : float
+ The shape parameter of the distribution.
+ loc : float, optional
+ Location parameter of the distribution. This default to, and
+ should probably be left at, 0,
+
+ .. note ::
+ When fitting an exponential distribution to a dataset, this
+ will be fixed at its default value unless you explicitly set
+ it to other values. Set to `None` if you wish that it be
+ estimated entirely from scratch.
+
+ Examples
+ --------
+ >>> import numpy
+ >>> import paramnormal as pn
+ >>> numpy.random.seed(0)
+ >>> pn.exponential(lambda_=2).rvs(size=3)
+ array([ 0.39793725, 0.62796538, 0.46161157])
+
+ >>> # you can also use greek letters
+ >>> numpy.random.seed(0)
+ >>> pn.exponential(λ=2).rvs(size=3)
+ array([ 0.39793725, 0.62796538, 0.46161157])
+
+ >>> # silly fake data
+ >>> numpy.random.seed(0)
+ >>> data = pn.exponential(λ=2).rvs(size=37)
+ >>> # pretend `data` is unknown and we want to fit a dist. to it
+ >>> pn.exponential.fit(data)
+ params(lambda_=1.7849050026146085, loc=0)
+
+ >>> # include `loc` in the estimate
+ >>> pn.exponential.fit(data, loc=None)
+ params(lambda_=1.8154701618164411, loc=0.0094842718426853996)
+
+ References
+ ----------
+ http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.expon.html
+ https://en.wikipedia.org/wiki/exponential_distribution
+
+ See Also
+ --------
+ scipy.stats.expon
+ numpy.random.exponential
+
+ """
+ dist = stats.expon
+ param_template = namedtuple('params', ['lambda_', 'loc'])
+
+ @staticmethod
+ @utils.greco_deco
+ def _process_args(lambda_=None, loc=0, fit=False):
+ loc_key, scale_key = utils._get_loc_scale_keys(fit=fit)
+ return {loc_key: loc, scale_key: lambda_**-1 if lambda_ is not None else lambda_}
+
+ @classmethod
+ def fit(cls, data, **guesses):
+ params = cls._fit(data, **guesses)
+ return cls.param_template(loc=params[0], lambda_=params[1]**-1)
+
+
+__all__ = [
+ 'normal',
+ 'lognormal',
+ 'weibull',
+ 'alpha',
+ 'beta',
+ 'gamma',
+ 'chi_squared',
+ 'pareto',
+ 'exponential',
+]
\ No newline at end of file
diff --git a/paramnormal/utils.py b/paramnormal/utils.py
index fd3df0e..da50755 100644
--- a/paramnormal/utils.py
+++ b/paramnormal/utils.py
@@ -8,7 +8,8 @@ SYMBOLS = {
'α': 'alpha',
'β': 'beta',
'γ': 'gamma',
- 'θ': 'theta'
+ 'λ': 'lambda_',
+ 'θ': 'theta',
}
| add exponential distribution
seems pretty critical | phobson/paramnormal | diff --git a/paramnormal/tests/test_paramnormal.py b/paramnormal/tests/test_paramnormal.py
index 8fde0c3..73de100 100644
--- a/paramnormal/tests/test_paramnormal.py
+++ b/paramnormal/tests/test_paramnormal.py
@@ -342,3 +342,36 @@ class Test_pareto(CheckDist_Mixin):
(params.loc, 0),
(params.scale, 1),
)
+
+
+class Test_exponential(CheckDist_Mixin):
+ def setup(self):
+ self.dist = paramnormal.exponential
+ self.cargs = []
+ self.ckwds = dict(lambda_=2)
+
+ self.np_rand_fxn = numpy.random.exponential
+ self.npargs = [0.5]
+ self.npkwds = dict()
+
+ def test_process_args(self):
+ nt.assert_dict_equal(
+ self.dist._process_args(lambda_=2.0),
+ dict(loc=0, scale=0.5)
+ )
+
+ nt.assert_dict_equal(
+ self.dist._process_args(lambda_=2.0, fit=True),
+ dict(floc=0, fscale=0.5)
+ )
+
+ @seed
+ def test_fit(self):
+ data = numpy.random.exponential(0.5, size=37)
+ params = self.dist.fit(data)
+ check_params(
+ (params.lambda_, 1.7849050026146085),
+ (params.loc, 0),
+ )
+
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
nose==1.3.7
numpy==1.19.5
packaging==21.3
-e git+https://github.com/phobson/paramnormal.git@9e0395b03e042d0ad4f0d6be12c768103233dc27#egg=paramnormal
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
scipy==1.5.4
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: paramnormal
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- scipy==1.5.4
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/paramnormal
| [
"paramnormal/tests/test_paramnormal.py::Test_exponential::test_random_0010",
"paramnormal/tests/test_paramnormal.py::Test_exponential::test_random_0037",
"paramnormal/tests/test_paramnormal.py::Test_exponential::test_random_0100",
"paramnormal/tests/test_paramnormal.py::Test_exponential::test_random_3737",
"paramnormal/tests/test_paramnormal.py::Test_exponential::test_from_params",
"paramnormal/tests/test_paramnormal.py::Test_exponential::test_process_args"
]
| [
"paramnormal/tests/test_paramnormal.py::Test_exponential::test_fit"
]
| [
"paramnormal/tests/test_paramnormal.py::Test_normal::test_random_0010",
"paramnormal/tests/test_paramnormal.py::Test_normal::test_random_0037",
"paramnormal/tests/test_paramnormal.py::Test_normal::test_random_0100",
"paramnormal/tests/test_paramnormal.py::Test_normal::test_random_3737",
"paramnormal/tests/test_paramnormal.py::Test_normal::test_from_params",
"paramnormal/tests/test_paramnormal.py::Test_normal::test_processargs",
"paramnormal/tests/test_paramnormal.py::Test_normal::test_fit",
"paramnormal/tests/test_paramnormal.py::Test_lognormal::test_random_0010",
"paramnormal/tests/test_paramnormal.py::Test_lognormal::test_random_0037",
"paramnormal/tests/test_paramnormal.py::Test_lognormal::test_random_0100",
"paramnormal/tests/test_paramnormal.py::Test_lognormal::test_random_3737",
"paramnormal/tests/test_paramnormal.py::Test_lognormal::test_from_params",
"paramnormal/tests/test_paramnormal.py::Test_lognormal::test_process_args",
"paramnormal/tests/test_paramnormal.py::Test_lognormal::test_process_args_no_offset",
"paramnormal/tests/test_paramnormal.py::Test_lognormal::test_fit",
"paramnormal/tests/test_paramnormal.py::Test_weibull::test_random_0010",
"paramnormal/tests/test_paramnormal.py::Test_weibull::test_random_0037",
"paramnormal/tests/test_paramnormal.py::Test_weibull::test_random_0100",
"paramnormal/tests/test_paramnormal.py::Test_weibull::test_random_3737",
"paramnormal/tests/test_paramnormal.py::Test_weibull::test_from_params",
"paramnormal/tests/test_paramnormal.py::Test_weibull::test_process_args",
"paramnormal/tests/test_paramnormal.py::Test_weibull::test_fit",
"paramnormal/tests/test_paramnormal.py::Test_alpha::test_random_0010",
"paramnormal/tests/test_paramnormal.py::Test_alpha::test_random_0037",
"paramnormal/tests/test_paramnormal.py::Test_alpha::test_random_0100",
"paramnormal/tests/test_paramnormal.py::Test_alpha::test_random_3737",
"paramnormal/tests/test_paramnormal.py::Test_alpha::test_from_params",
"paramnormal/tests/test_paramnormal.py::Test_alpha::test_process_args",
"paramnormal/tests/test_paramnormal.py::Test_alpha::test_fit",
"paramnormal/tests/test_paramnormal.py::Test_beta::test_random_0010",
"paramnormal/tests/test_paramnormal.py::Test_beta::test_random_0037",
"paramnormal/tests/test_paramnormal.py::Test_beta::test_random_0100",
"paramnormal/tests/test_paramnormal.py::Test_beta::test_random_3737",
"paramnormal/tests/test_paramnormal.py::Test_beta::test_from_params",
"paramnormal/tests/test_paramnormal.py::Test_beta::test_process_args",
"paramnormal/tests/test_paramnormal.py::Test_beta::test_fit",
"paramnormal/tests/test_paramnormal.py::Test_gamma::test_random_0010",
"paramnormal/tests/test_paramnormal.py::Test_gamma::test_random_0037",
"paramnormal/tests/test_paramnormal.py::Test_gamma::test_random_0100",
"paramnormal/tests/test_paramnormal.py::Test_gamma::test_random_3737",
"paramnormal/tests/test_paramnormal.py::Test_gamma::test_from_params",
"paramnormal/tests/test_paramnormal.py::Test_gamma::test_process_args",
"paramnormal/tests/test_paramnormal.py::Test_gamma::test_fit",
"paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_random_0010",
"paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_random_0037",
"paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_random_0100",
"paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_random_3737",
"paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_from_params",
"paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_process_args",
"paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_fit",
"paramnormal/tests/test_paramnormal.py::Test_pareto::test_random_0010",
"paramnormal/tests/test_paramnormal.py::Test_pareto::test_random_0037",
"paramnormal/tests/test_paramnormal.py::Test_pareto::test_random_0100",
"paramnormal/tests/test_paramnormal.py::Test_pareto::test_random_3737",
"paramnormal/tests/test_paramnormal.py::Test_pareto::test_from_params",
"paramnormal/tests/test_paramnormal.py::Test_pareto::test_process_args",
"paramnormal/tests/test_paramnormal.py::Test_pareto::test_fit"
]
| []
| MIT License | 391 | [
"paramnormal/paramnormal.py",
"docs/tutorial/fitting.ipynb",
"paramnormal/utils.py",
"docs/tutorial/overview.ipynb"
]
| [
"paramnormal/paramnormal.py",
"docs/tutorial/fitting.ipynb",
"paramnormal/utils.py",
"docs/tutorial/overview.ipynb"
]
|
|
cdent__wsgi-intercept-35 | 3048a0921675822f5f27567d63ecca094077dc25 | 2016-01-21 19:21:32 | 3048a0921675822f5f27567d63ecca094077dc25 | diff --git a/wsgi_intercept/__init__.py b/wsgi_intercept/__init__.py
index 487cd76..b659d75 100644
--- a/wsgi_intercept/__init__.py
+++ b/wsgi_intercept/__init__.py
@@ -50,6 +50,11 @@ Note especially that ``app_create_fn`` is a *function object* returning a WSGI
application; ``script_name`` becomes ``SCRIPT_NAME`` in the WSGI app's
environment, if set.
+Note also that if ``http_proxy`` or ``https_proxy`` is set in the environment
+this can cause difficulties with some of the intercepted libraries. If
+requests or urllib is being used, these will raise an exception if one of
+those variables is set.
+
Install
=======
diff --git a/wsgi_intercept/requests_intercept.py b/wsgi_intercept/requests_intercept.py
index cdd304a..586b752 100644
--- a/wsgi_intercept/requests_intercept.py
+++ b/wsgi_intercept/requests_intercept.py
@@ -1,6 +1,7 @@
"""Intercept HTTP connections that use `requests <http://docs.python-requests.org/en/latest/>`_.
"""
+import os
import sys
from . import WSGI_HTTPConnection, WSGI_HTTPSConnection, wsgi_fake_socket
@@ -32,6 +33,9 @@ class HTTPS_WSGIInterceptor(WSGI_HTTPSConnection, HTTPSConnection):
def install():
+ if 'http_proxy' in os.environ or 'https_proxy' in os.environ:
+ raise RuntimeError(
+ 'http_proxy or https_proxy set in environment, please unset')
HTTPConnectionPool.ConnectionCls = HTTP_WSGIInterceptor
HTTPSConnectionPool.ConnectionCls = HTTPS_WSGIInterceptor
diff --git a/wsgi_intercept/urllib_intercept.py b/wsgi_intercept/urllib_intercept.py
index 3eca406..31d8f46 100644
--- a/wsgi_intercept/urllib_intercept.py
+++ b/wsgi_intercept/urllib_intercept.py
@@ -1,5 +1,8 @@
"""Intercept HTTP connections that use urllib.request (Py3) aka urllib2 (Python 2).
"""
+
+import os
+
try:
import urllib.request as url_lib
except ImportError:
@@ -27,6 +30,9 @@ class WSGI_HTTPSHandler(url_lib.HTTPSHandler):
def install_opener():
+ if 'http_proxy' in os.environ or 'https_proxy' in os.environ:
+ raise RuntimeError(
+ 'http_proxy or https_proxy set in environment, please unset')
handlers = [WSGI_HTTPHandler()]
if WSGI_HTTPSHandler is not None:
handlers.append(WSGI_HTTPSHandler())
| kwarg error when $http_proxy is set in environ
```bash
$ echo $http_proxy
some_proxy.com:1234
```
and run the [requests example](http://wsgi-intercept.readthedocs.org/en/latest/requests.html), I got this error:
```
Traceback (most recent call last):
File "test.py", line 20, in <module>
resp = requests.get(url)
File "/usr/local/lib/python2.7/site-packages/requests/api.py", line 60, in get
return request('get', url, **kwargs)
File "/usr/local/lib/python2.7/site-packages/requests/api.py", line 49, in request
return session.request(method=method, url=url, **kwargs)
File "/usr/local/lib/python2.7/site-packages/requests/sessions.py", line 457, in request
resp = self.send(prep, **send_kwargs)
File "/usr/local/lib/python2.7/site-packages/requests/sessions.py", line 569, in send
r = adapter.send(request, **kwargs)
File "/usr/local/lib/python2.7/site-packages/requests/adapters.py", line 362, in send
timeout=timeout
File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/connectionpool.py", line 511, in urlopen
conn = self._get_conn(timeout=pool_timeout)
File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/connectionpool.py", line 231, in _get_conn
return conn or self._new_conn()
File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/connectionpool.py", line 192, in _new_conn
strict=self.strict, **self.conn_kw)
File "/usr/local/lib/python2.7/site-packages/wsgi_intercept/requests_intercept.py", line 22, in __init__
WSGI_HTTPConnection.__init__(self, *args, **kwargs)
TypeError: __init__() got an unexpected keyword argument 'socket_options'
```
and the kwargs of wsgi_intercept/requests_intercept.py:HTTP_WSGIInterceptor.__init__ is
```
{'strict': False, 'host': 'some_proxy.com', 'socket_options': [], 'timeout': <object object at 0x1028490e0>, 'port': 1234}
``` | cdent/wsgi-intercept | diff --git a/test/install.py b/test/install.py
index 225e109..65262cd 100644
--- a/test/install.py
+++ b/test/install.py
@@ -1,9 +1,10 @@
+import os
import wsgi_intercept
class BaseInstalledApp(object):
def __init__(self, app, host, port=80, script_name='',
- install=None, uninstall=None):
+ install=None, uninstall=None, proxy=None):
self.app = app
self.host = host
self.port = port
@@ -12,6 +13,7 @@ class BaseInstalledApp(object):
self._uninstall = uninstall or (lambda: None)
self._hits = 0
self._internals = {}
+ self._proxy = proxy
def __call__(self, environ, start_response):
self._hits += 1
@@ -32,10 +34,14 @@ class BaseInstalledApp(object):
wsgi_intercept.remove_wsgi_intercept(self.host, self.port)
def install(self):
+ if self._proxy:
+ os.environ['http_proxy'] = self._proxy
self._install()
self.install_wsgi_intercept()
def uninstall(self):
+ if self._proxy:
+ del os.environ['http_proxy']
self.uninstall_wsgi_intercept()
self._uninstall()
@@ -56,9 +62,9 @@ def installer_class(module=None, install=None, uninstall=None):
uninstall = uninstall or getattr(module, 'uninstall', None)
class InstalledApp(BaseInstalledApp):
- def __init__(self, app, host, port=80, script_name=''):
+ def __init__(self, app, host, port=80, script_name='', proxy=None):
BaseInstalledApp.__init__(
self, app=app, host=host, port=port, script_name=script_name,
- install=install, uninstall=uninstall)
+ install=install, uninstall=uninstall, proxy=proxy)
return InstalledApp
diff --git a/test/test_http_client.py b/test/test_http_client.py
index 3ca09b4..0622f6e 100644
--- a/test/test_http_client.py
+++ b/test/test_http_client.py
@@ -42,6 +42,18 @@ def test_other():
assert app.success()
+def test_proxy_handling():
+ """Proxy variable no impact."""
+ with InstalledApp(wsgi_app.simple_app, host=HOST, port=80,
+ proxy='some.host:1234') as app:
+ http_client = http_lib.HTTPConnection(HOST)
+ http_client.request('GET', '/')
+ content = http_client.getresponse().read()
+ http_client.close()
+ assert content == b'WSGI intercept successful!\n'
+ assert app.success()
+
+
def test_app_error():
with InstalledApp(wsgi_app.raises_app, host=HOST, port=80):
http_client = http_lib.HTTPConnection(HOST)
diff --git a/test/test_httplib2.py b/test/test_httplib2.py
index 9fa91d1..9a67e28 100644
--- a/test/test_httplib2.py
+++ b/test/test_httplib2.py
@@ -47,6 +47,17 @@ def test_bogus_domain():
'httplib2_intercept.HTTP_WSGIInterceptorWithTimeout("_nonexistant_domain_").connect()')
+def test_proxy_handling():
+ """Proxy has no impact."""
+ with InstalledApp(wsgi_app.simple_app, host=HOST, port=80,
+ proxy='some_proxy.com:1234') as app:
+ http = httplib2.Http()
+ resp, content = http.request(
+ 'http://some_hopefully_nonexistant_domain:80/')
+ assert content == b'WSGI intercept successful!\n'
+ assert app.success()
+
+
def test_https():
with InstalledApp(wsgi_app.simple_app, host=HOST, port=443) as app:
http = httplib2.Http()
diff --git a/test/test_requests.py b/test/test_requests.py
index 304d178..8005f93 100644
--- a/test/test_requests.py
+++ b/test/test_requests.py
@@ -1,3 +1,4 @@
+import os
import py.test
from wsgi_intercept import requests_intercept, WSGIAppError
from test import wsgi_app
@@ -40,6 +41,18 @@ def test_bogus_domain():
'requests.get("http://_nonexistant_domain_")')
+def test_proxy_handling():
+ with py.test.raises(RuntimeError) as exc:
+ with InstalledApp(wsgi_app.simple_app, host=HOST, port=80,
+ proxy='some_proxy.com:1234'):
+ requests.get('http://some_hopefully_nonexistant_domain:80/')
+ assert 'http_proxy or https_proxy set in environment' in str(exc.value)
+ # We need to do this by hand because the exception was raised
+ # during the entry of the context manager, so the exit handler
+ # wasn't reached.
+ del os.environ['http_proxy']
+
+
def test_https():
with InstalledApp(wsgi_app.simple_app, host=HOST, port=443) as app:
resp = requests.get('https://some_hopefully_nonexistant_domain:443/')
diff --git a/test/test_urllib.py b/test/test_urllib.py
index 82daff7..83d793f 100644
--- a/test/test_urllib.py
+++ b/test/test_urllib.py
@@ -1,3 +1,4 @@
+import os
import py.test
from wsgi_intercept import urllib_intercept, WSGIAppError
from test import wsgi_app
@@ -32,6 +33,19 @@ def test_http_other_port():
assert environ['wsgi.url_scheme'] == 'http'
+def test_proxy_handling():
+ """Like requests, urllib gets confused about proxy early on."""
+ with py.test.raises(RuntimeError) as exc:
+ with InstalledApp(wsgi_app.simple_app, host=HOST, port=80,
+ proxy='some.host:1234'):
+ url_lib.urlopen('http://some_hopefully_nonexistant_domain:80/')
+ assert 'http_proxy or https_proxy set in environment' in str(exc.value)
+ # We need to do this by hand because the exception was raised
+ # during the entry of the context manager, so the exit handler
+ # wasn't reached.
+ del os.environ['http_proxy']
+
+
def test_https():
with InstalledApp(wsgi_app.simple_app, host=HOST, port=443) as app:
url_lib.urlopen('https://some_hopefully_nonexistant_domain:443/')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 3
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[testing]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"pytest>=2.4",
"httplib2",
"requests>=2.0.1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
httplib2==0.22.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
-e git+https://github.com/cdent/wsgi-intercept.git@3048a0921675822f5f27567d63ecca094077dc25#egg=wsgi_intercept
zipp==3.6.0
| name: wsgi-intercept
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- httplib2==0.22.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/wsgi-intercept
| [
"test/test_requests.py::test_proxy_handling",
"test/test_urllib.py::test_proxy_handling"
]
| [
"test/test_httplib2.py::test_bogus_domain",
"test/test_httplib2.py::test_https",
"test/test_httplib2.py::test_https_default_port",
"test/test_requests.py::test_bogus_domain",
"test/test_requests.py::test_https",
"test/test_requests.py::test_https_default_port",
"test/test_requests.py::test_https_not_intercepted"
]
| [
"test/test_http_client.py::test_http",
"test/test_http_client.py::test_https",
"test/test_http_client.py::test_other",
"test/test_http_client.py::test_proxy_handling",
"test/test_http_client.py::test_app_error",
"test/test_http_client.py::test_http_not_intercepted",
"test/test_http_client.py::test_https_not_intercepted",
"test/test_httplib2.py::test_http",
"test/test_httplib2.py::test_http_default_port",
"test/test_httplib2.py::test_http_other_port",
"test/test_httplib2.py::test_proxy_handling",
"test/test_httplib2.py::test_app_error",
"test/test_requests.py::test_http",
"test/test_requests.py::test_http_default_port",
"test/test_requests.py::test_http_other_port",
"test/test_requests.py::test_app_error",
"test/test_requests.py::test_http_not_intercepted",
"test/test_urllib.py::test_http",
"test/test_urllib.py::test_http_default_port",
"test/test_urllib.py::test_http_other_port",
"test/test_urllib.py::test_https",
"test/test_urllib.py::test_https_default_port",
"test/test_urllib.py::test_app_error",
"test/test_urllib.py::test_http_not_intercepted",
"test/test_urllib.py::test_https_not_intercepted"
]
| []
| null | 392 | [
"wsgi_intercept/requests_intercept.py",
"wsgi_intercept/__init__.py",
"wsgi_intercept/urllib_intercept.py"
]
| [
"wsgi_intercept/requests_intercept.py",
"wsgi_intercept/__init__.py",
"wsgi_intercept/urllib_intercept.py"
]
|
|
joke2k__faker-325 | 326e22d5752e0a28baee59c57ed0f49935de9059 | 2016-01-22 19:57:09 | 883576c2d718ad7f604415e02a898f1f917d5b86 | diff --git a/faker/providers/lorem/__init__.py b/faker/providers/lorem/__init__.py
index 5f07712d..dea4dd36 100644
--- a/faker/providers/lorem/__init__.py
+++ b/faker/providers/lorem/__init__.py
@@ -8,7 +8,8 @@ class Provider(BaseProvider):
@classmethod
def word(cls):
"""
- :example 'Lorem'
+ Generate a random word
+ :example 'lorem'
"""
return cls.random_element(cls.word_list)
diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py
index 0b452478..4e5477b8 100644
--- a/faker/providers/python/__init__.py
+++ b/faker/providers/python/__init__.py
@@ -25,7 +25,7 @@ class Provider(BaseProvider):
@classmethod
def pystr(cls, max_chars=20):
- return Lorem.text(max_chars)
+ return "".join(cls.random_letter() for i in range(max_chars))
@classmethod
def pyfloat(cls, left_digits=None, right_digits=None, positive=False):
| Add ability to generate lorem characters without punctuation
Sometimes I want to generate a string of characters of a specific length without any punctuation or capitalization but the lorem provider currently does not allow for this. | joke2k/faker | diff --git a/faker/tests/__init__.py b/faker/tests/__init__.py
index 8c20e3d5..802bee4c 100644
--- a/faker/tests/__init__.py
+++ b/faker/tests/__init__.py
@@ -499,6 +499,20 @@ class FactoryTestCase(unittest.TestCase):
sentence = provider.sentence(0)
self.assertEqual(sentence, '')
+ def test_random_pystr_characters(self):
+ from faker.providers.python import Provider
+ provider = Provider(None)
+
+ characters = provider.pystr()
+ self.assertEqual(len(characters), 20)
+ characters = provider.pystr(max_chars=255)
+ self.assertEqual(len(characters), 255)
+ characters = provider.pystr(max_chars=0)
+ self.assertEqual(characters, '')
+ characters = provider.pystr(max_chars=-10)
+ self.assertEqual(characters, '')
+
+
def test_us_ssn_valid(self):
from faker.providers.ssn.en_US import Provider
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 2
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
-e git+https://github.com/joke2k/faker.git@326e22d5752e0a28baee59c57ed0f49935de9059#egg=fake_factory
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
six==1.17.0
tomli==2.2.1
| name: faker
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/faker
| [
"faker/tests/__init__.py::FactoryTestCase::test_random_pystr_characters"
]
| []
| [
"faker/tests/__init__.py::ShimsTestCase::test_counter",
"faker/tests/__init__.py::UtilsTestCase::test_add_dicts",
"faker/tests/__init__.py::UtilsTestCase::test_choice_distribution",
"faker/tests/__init__.py::UtilsTestCase::test_find_available_locales",
"faker/tests/__init__.py::UtilsTestCase::test_find_available_providers",
"faker/tests/__init__.py::FactoryTestCase::test_add_provider_gives_priority_to_newly_added_provider",
"faker/tests/__init__.py::FactoryTestCase::test_command",
"faker/tests/__init__.py::FactoryTestCase::test_command_custom_provider",
"faker/tests/__init__.py::FactoryTestCase::test_date_time_between_dates",
"faker/tests/__init__.py::FactoryTestCase::test_date_time_between_dates_with_tzinfo",
"faker/tests/__init__.py::FactoryTestCase::test_date_time_this_period",
"faker/tests/__init__.py::FactoryTestCase::test_date_time_this_period_with_tzinfo",
"faker/tests/__init__.py::FactoryTestCase::test_datetime_safe",
"faker/tests/__init__.py::FactoryTestCase::test_datetimes_with_and_without_tzinfo",
"faker/tests/__init__.py::FactoryTestCase::test_documentor",
"faker/tests/__init__.py::FactoryTestCase::test_email",
"faker/tests/__init__.py::FactoryTestCase::test_format_calls_formatter_on_provider",
"faker/tests/__init__.py::FactoryTestCase::test_format_transfers_arguments_to_formatter",
"faker/tests/__init__.py::FactoryTestCase::test_get_formatter_returns_callable",
"faker/tests/__init__.py::FactoryTestCase::test_get_formatter_returns_correct_formatter",
"faker/tests/__init__.py::FactoryTestCase::test_get_formatter_throws_exception_on_incorrect_formatter",
"faker/tests/__init__.py::FactoryTestCase::test_magic_call_calls_format",
"faker/tests/__init__.py::FactoryTestCase::test_magic_call_calls_format_with_arguments",
"faker/tests/__init__.py::FactoryTestCase::test_no_words_paragraph",
"faker/tests/__init__.py::FactoryTestCase::test_no_words_sentence",
"faker/tests/__init__.py::FactoryTestCase::test_parse_returns_same_string_when_it_contains_no_curly_braces",
"faker/tests/__init__.py::FactoryTestCase::test_parse_returns_string_with_tokens_replaced_by_formatters",
"faker/tests/__init__.py::FactoryTestCase::test_password",
"faker/tests/__init__.py::FactoryTestCase::test_prefix_suffix_always_string",
"faker/tests/__init__.py::FactoryTestCase::test_random_element",
"faker/tests/__init__.py::FactoryTestCase::test_slugify",
"faker/tests/__init__.py::FactoryTestCase::test_timezone_conversion",
"faker/tests/__init__.py::FactoryTestCase::test_us_ssn_valid",
"faker/tests/__init__.py::GeneratorTestCase::test_get_random",
"faker/tests/__init__.py::GeneratorTestCase::test_random_seed_doesnt_seed_system_random"
]
| []
| MIT License | 393 | [
"faker/providers/python/__init__.py",
"faker/providers/lorem/__init__.py"
]
| [
"faker/providers/python/__init__.py",
"faker/providers/lorem/__init__.py"
]
|
|
PyCQA__mccabe-38 | c9bb16e5c66997153e0347ab5a1ee39e7d2a2e76 | 2016-01-24 03:31:04 | c9bb16e5c66997153e0347ab5a1ee39e7d2a2e76 | diff --git a/mccabe.py b/mccabe.py
index f5ef5d9..72b8bdd 100644
--- a/mccabe.py
+++ b/mccabe.py
@@ -133,6 +133,8 @@ class PathGraphingAstVisitor(ASTVisitor):
self.graphs["%s%s" % (self.classname, node.name)] = self.graph
self.reset()
+ visitAsyncFunctionDef = visitFunctionDef
+
def visitClassDef(self, node):
old_classname = self.classname
self.classname += node.name + "."
@@ -158,13 +160,13 @@ class PathGraphingAstVisitor(ASTVisitor):
visitAssert = visitAssign = visitAugAssign = visitDelete = visitPrint = \
visitRaise = visitYield = visitImport = visitCall = visitSubscript = \
visitPass = visitContinue = visitBreak = visitGlobal = visitReturn = \
- visitSimpleStatement
+ visitAwait = visitSimpleStatement
def visitLoop(self, node):
name = "Loop %d" % node.lineno
self._subgraph(node, name)
- visitFor = visitWhile = visitLoop
+ visitAsyncFor = visitFor = visitWhile = visitLoop
def visitIf(self, node):
name = "If %d" % node.lineno
@@ -216,6 +218,8 @@ class PathGraphingAstVisitor(ASTVisitor):
self.appendPathNode(name)
self.dispatch_list(node.body)
+ visitAsyncWith = visitWith
+
class McCabeChecker(object):
"""McCabe cyclomatic complexity checker."""
diff --git a/setup.cfg b/setup.cfg
index 5e40900..519ba68 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,2 +1,5 @@
[wheel]
universal = 1
+
+[aliases]
+test = pytest
diff --git a/setup.py b/setup.py
index fb8b970..bf8d7d7 100644
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,8 @@ setup(
license='Expat license',
py_modules=['mccabe'],
zip_safe=False,
- test_suite='test_mccabe',
+ setup_requires=['pytest-runner'],
+ tests_require=['pytest'],
entry_points={
'flake8.extension': [
'C90 = mccabe:McCabeChecker',
diff --git a/tox.ini b/tox.ini
index a472aa4..6e7e690 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
[tox]
envlist =
- py26,py27,py33,py34,flake8
+ py26,py27,py33,py34,py35,flake8
[testenv]
deps =
| Not working with python 3.5 "async def ..."
It looks like mccabe is ignoring python 3.5 coroutines defined like
```python
async def foobar(a, b, c):
whatever(a, b, c)
```
I tried it via flake8 version:
2.5.1 (pep8: 1.7.0, pyflakes: 1.0.0, mccabe: 0.3.1) CPython 3.5.0+ on Linux | PyCQA/mccabe | diff --git a/test_mccabe.py b/test_mccabe.py
index 07d8d78..44fb565 100644
--- a/test_mccabe.py
+++ b/test_mccabe.py
@@ -5,6 +5,8 @@ try:
except ImportError:
from io import StringIO
+import pytest
+
import mccabe
from mccabe import get_code_complexity
@@ -84,6 +86,19 @@ else:
print(4)
"""
+async_keywords = """\
+async def foobar(a, b, c):
+ await whatever(a, b, c)
+ if await b:
+ pass
+
+ async with c:
+ pass
+
+ async for x in a:
+ pass
+"""
+
def get_complexity_number(snippet, strio, max=0):
"""Get the complexity number from the printed string."""
@@ -164,6 +179,13 @@ class McCabeTestCase(unittest.TestCase):
def test_try_else(self):
self.assert_complexity(try_else, 4)
+ @pytest.mark.skipif(sys.version_info < (3, 5),
+ reason="Async keywords are only valid on Python 3.5+")
+ def test_async_keywords(self):
+ """Validate that we properly process async keyword usage."""
+ complexity = get_complexity_number(async_keywords, self.strio)
+ self.assertEqual(complexity, 3)
+
class RegressionTests(unittest.TestCase):
def setUp(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 4
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/PyCQA/mccabe.git@c9bb16e5c66997153e0347ab5a1ee39e7d2a2e76#egg=mccabe
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pycodestyle==2.13.0
pyflakes==3.3.1
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mccabe
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- flake8==7.2.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
prefix: /opt/conda/envs/mccabe
| [
"test_mccabe.py::McCabeTestCase::test_async_keywords"
]
| []
| [
"test_mccabe.py::McCabeTestCase::test_for_else_snippet",
"test_mccabe.py::McCabeTestCase::test_for_loop_snippet",
"test_mccabe.py::McCabeTestCase::test_if_elif_else_dead_path_snippet",
"test_mccabe.py::McCabeTestCase::test_nested_functions_snippet",
"test_mccabe.py::McCabeTestCase::test_print_message",
"test_mccabe.py::McCabeTestCase::test_recursive_snippet",
"test_mccabe.py::McCabeTestCase::test_sequential_snippet",
"test_mccabe.py::McCabeTestCase::test_sequential_unencapsulated_snippet",
"test_mccabe.py::McCabeTestCase::test_trivial",
"test_mccabe.py::McCabeTestCase::test_try_else",
"test_mccabe.py::RegressionTests::test_max_complexity_is_always_an_int"
]
| []
| Expat License | 395 | [
"setup.py",
"tox.ini",
"setup.cfg",
"mccabe.py"
]
| [
"setup.py",
"tox.ini",
"setup.cfg",
"mccabe.py"
]
|
|
guykisel__inline-plz-20 | f0fb68fa031c40920731a6f31526f6b455768f37 | 2016-01-25 06:19:06 | f0fb68fa031c40920731a6f31526f6b455768f37 | diff --git a/.travis.yml b/.travis.yml
index 74e2d43..9f6e2d7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -21,8 +21,7 @@ install:
# command to run tests, e.g. python setup.py test
script:
- tox
- - prospector --zero-exit > lint.txt
- - inline-plz --parser=prospector --filename=lint.txt
+ - inline-plz
# After you create the Github repo and add it to Travis, run the
# travis_pypi_setup.py script to finish PyPI deployment setup
diff --git a/inlineplz/env/local.py b/inlineplz/env/local.py
index 2a60f74..31f9c9c 100644
--- a/inlineplz/env/local.py
+++ b/inlineplz/env/local.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
-import subprocess
+import os
from inlineplz.env.base import EnvBase
import inlineplz.util.git as git
@@ -9,5 +9,5 @@ import inlineplz.util.git as git
class Local(EnvBase):
def __init__(self):
- if subprocess.check_call(['git status']):
+ if os.path.exists('.git'):
self.commit = git.current_sha()
diff --git a/inlineplz/linters/__init__.py b/inlineplz/linters/__init__.py
new file mode 100644
index 0000000..e123811
--- /dev/null
+++ b/inlineplz/linters/__init__.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+
+"""Linter configurations."""
+
+from __future__ import absolute_import
+
+import os
+import subprocess
+
+from inlineplz import parsers
+
+LINTERS = {
+ 'prospector': {
+ 'run': ['prospector', '--zero-exit'],
+ 'dotfile': '.prospector.yaml',
+ 'parser': parsers.ProspectorParser
+ }
+}
+
+
+def lint():
+ messages = []
+ for config in LINTERS.values():
+ if config.get('dotfile') in os.listdir(os.getcwd()):
+ output = subprocess.check_output(config.get('run')).decode('utf-8')
+ messages.extend(config.get('parser')().parse(output))
+ return messages
diff --git a/inlineplz/main.py b/inlineplz/main.py
index 073f14d..d9819c3 100644
--- a/inlineplz/main.py
+++ b/inlineplz/main.py
@@ -8,6 +8,7 @@ import argparse
from inlineplz import interfaces
from inlineplz import parsers
from inlineplz import env
+from inlineplz import linters
def main():
@@ -17,8 +18,6 @@ def main():
parser.add_argument('--repo', type=str)
parser.add_argument('--repo-slug', type=str)
parser.add_argument('--token', type=str)
- parser.add_argument('--filename', type=str, required=True)
- parser.add_argument('--parser', type=str, required=True, choices=parsers.PARSERS)
parser.add_argument('--interface', type=str, choices=interfaces.INTERFACES)
parser.add_argument('--url', type=str)
parser.add_argument('--dryrun', action='store_true')
@@ -34,8 +33,6 @@ def inline(args):
Parse input file with the specified parser and post messages based on lint output
:param args: Contains the following
- filename: Linter output
- parser: Use a different parser based on the lint tool
interface: How are we going to post comments?
owner: Username of repo owner
repo: Repository name
@@ -53,9 +50,8 @@ def inline(args):
owner = args.owner
repo = args.repo
- with open(args.filename) as inputfile:
- my_parser = parsers.PARSERS[args.parser]()
- messages = my_parser.parse(inputfile.read())
+ messages = linters.lint()
+
# TODO: implement dryrun as an interface instead of a special case here
if args.dryrun:
for msg in messages:
diff --git a/inlineplz/message.py b/inlineplz/message.py
index 2191043..61011a4 100644
--- a/inlineplz/message.py
+++ b/inlineplz/message.py
@@ -18,4 +18,8 @@ Message:
@property
def content(self):
- return '```\n' + '\n'.join(self.comments) + '\n```'
+ if not self.comments:
+ return ''
+ if len(self.comments) > 1:
+ return '```\n' + '\n'.join(self.comments) + '\n```'
+ return '`{0}`'.format(self.comments[0].strip())
| detect when running in ci and run/configure automatically
if CI envvars (jenkins/travis/etc) are present, then automatically look up the current console log, find linter command line invocations, and parse them. | guykisel/inline-plz | diff --git a/tests/parsers/test_prospector.py b/tests/parsers/test_prospector.py
index 2913435..f673b81 100644
--- a/tests/parsers/test_prospector.py
+++ b/tests/parsers/test_prospector.py
@@ -17,10 +17,14 @@ prospector_path = os.path.join(
def test_prospector():
with open(prospector_path) as inputfile:
messages = prospector.ProspectorParser().parse(inputfile.read())
- assert messages[0].content == '```\npylint: syntax-error / invalid syntax\n```'
+ assert messages[0].content == '`pylint: syntax-error / invalid syntax`'
assert messages[0].line_number == 34
assert messages[0].path == 'docs/conf.py'
- assert messages[1].content == '```\npylint: unused-import / Unused Message imported from message\n```'
+ assert messages[1].content == '`pylint: unused-import / Unused Message imported from message`'
assert messages[1].line_number == 4
assert messages[1].path == 'inline-plz/parsers/base.py'
+ assert messages[9].content == ('```\npylint: misplaced-comparison-constant / Comparison '
+ 'should be __name__ == \'__main__\' (col 3)\npylint: '
+ 'pretend this is a real message\n```')
+ assert messages[9].line_number == 113
assert len(messages) == 11
diff --git a/tests/testdata/parsers/prospector.txt b/tests/testdata/parsers/prospector.txt
index 7402079..7c9ed99 100644
--- a/tests/testdata/parsers/prospector.txt
+++ b/tests/testdata/parsers/prospector.txt
@@ -28,6 +28,7 @@ travis_pypi_setup.py
pylint: bare-except / No exception type(s) specified
Line: 113
pylint: misplaced-comparison-constant / Comparison should be __name__ == '__main__' (col 3)
+ pylint: pretend this is a real message
Line: 114
pylint: wrong-import-position / Import "import argparse" should be placed at the top of the module (col 4)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"numpy>=1.16.0",
"pandas>=1.0.0",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
cryptography==44.0.2
exceptiongroup==1.2.2
github3.py==4.0.1
idna==3.10
iniconfig==2.1.0
-e git+https://github.com/guykisel/inline-plz.git@f0fb68fa031c40920731a6f31526f6b455768f37#egg=inlineplz
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pluggy==1.5.0
pycparser==2.22
PyJWT==2.10.1
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.32.3
six==1.17.0
tomli==2.2.1
tzdata==2025.2
unidiff==0.7.5
uritemplate==4.1.1
urllib3==2.3.0
| name: inline-plz
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- cryptography==44.0.2
- exceptiongroup==1.2.2
- github3-py==4.0.1
- idna==3.10
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pluggy==1.5.0
- pycparser==2.22
- pyjwt==2.10.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.32.3
- six==1.17.0
- tomli==2.2.1
- tzdata==2025.2
- unidiff==0.7.5
- uritemplate==4.1.1
- urllib3==2.3.0
prefix: /opt/conda/envs/inline-plz
| [
"tests/parsers/test_prospector.py::test_prospector"
]
| []
| []
| []
| ISC License | 396 | [
"inlineplz/linters/__init__.py",
".travis.yml",
"inlineplz/env/local.py",
"inlineplz/main.py",
"inlineplz/message.py"
]
| [
"inlineplz/linters/__init__.py",
".travis.yml",
"inlineplz/env/local.py",
"inlineplz/main.py",
"inlineplz/message.py"
]
|
|
scrapy__scrapy-1735 | 7d24df37380cd5a5b7394cd2534e240bd2eff0ca | 2016-01-27 15:11:13 | 6aa85aee2a274393307ac3e777180fcbdbdc9848 | diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
deleted file mode 100644
index 95b4a7e3c..000000000
--- a/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# Contributor Code of Conduct
-
-As contributors and maintainers of this project, and in the interest of
-fostering an open and welcoming community, we pledge to respect all people who
-contribute through reporting issues, posting feature requests, updating
-documentation, submitting pull requests or patches, and other activities.
-
-We are committed to making participation in this project a harassment-free
-experience for everyone, regardless of level of experience, gender, gender
-identity and expression, sexual orientation, disability, personal appearance,
-body size, race, ethnicity, age, religion, or nationality.
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing other's private information, such as physical or electronic
- addresses, without explicit permission
-* Other unethical or unprofessional conduct
-
-Project maintainers have the right and responsibility to remove, edit, or
-reject comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct, or to ban temporarily or
-permanently any contributor for other behaviors that they deem inappropriate,
-threatening, offensive, or harmful.
-
-By adopting this Code of Conduct, project maintainers commit themselves to
-fairly and consistently applying these principles to every aspect of managing
-this project. Project maintainers who do not follow or enforce the Code of
-Conduct may be permanently removed from the project team.
-
-This Code of Conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community.
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported by contacting a project maintainer at [email protected]. All
-complaints will be reviewed and investigated and will result in a response that
-is deemed necessary and appropriate to the circumstances. Maintainers are
-obligated to maintain confidentiality with regard to the reporter of an
-incident.
-
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage],
-version 1.3.0, available at
-[http://contributor-covenant.org/version/1/3/0/][version]
-
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/3/0/
diff --git a/README.rst b/README.rst
index 3e050bb1e..6cbed75ee 100644
--- a/README.rst
+++ b/README.rst
@@ -73,12 +73,6 @@ See http://scrapy.org/community/
Contributing
============
-Please note that this project is released with a Contributor Code of Conduct
-(see https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md).
-
-By participating in this project you agree to abide by its terms.
-Please report unacceptable behavior to [email protected].
-
See http://doc.scrapy.org/en/master/contributing.html
Companies using Scrapy
diff --git a/docs/topics/request-response.rst b/docs/topics/request-response.rst
index 82e674cee..ea64d1599 100644
--- a/docs/topics/request-response.rst
+++ b/docs/topics/request-response.rst
@@ -445,10 +445,10 @@ Response objects
.. attribute:: Response.body
- The body of this Response. Keep in mind that Response.body
- is always a bytes object. If you want the unicode version use
- :attr:`TextResponse.text` (only available in :class:`TextResponse`
- and subclasses).
+ A str containing the body of this Response. Keep in mind that Response.body
+ is always a str. If you want the unicode version use
+ :meth:`TextResponse.body_as_unicode` (only available in
+ :class:`TextResponse` and subclasses).
This attribute is read-only. To change the body of a Response use
:meth:`replace`.
@@ -542,21 +542,6 @@ TextResponse objects
:class:`TextResponse` objects support the following attributes in addition
to the standard :class:`Response` ones:
- .. attribute:: TextResponse.text
-
- Response body, as unicode.
-
- The same as ``response.body.decode(response.encoding)``, but the
- result is cached after the first call, so you can access
- ``response.text`` multiple times without extra overhead.
-
- .. note::
-
- ``unicode(response.body)`` is not a correct way to convert response
- body to unicode: you would be using the system default encoding
- (typically `ascii`) instead of the response encoding.
-
-
.. attribute:: TextResponse.encoding
A string with the encoding of this response. The encoding is resolved by
@@ -583,6 +568,20 @@ TextResponse objects
:class:`TextResponse` objects support the following methods in addition to
the standard :class:`Response` ones:
+ .. method:: TextResponse.body_as_unicode()
+
+ Returns the body of the response as unicode. This is equivalent to::
+
+ response.body.decode(response.encoding)
+
+ But **not** equivalent to::
+
+ unicode(response.body)
+
+ Since, in the latter case, you would be using the system default encoding
+ (typically `ascii`) to convert the body to unicode, instead of the response
+ encoding.
+
.. method:: TextResponse.xpath(query)
A shortcut to ``TextResponse.selector.xpath(query)``::
@@ -595,11 +594,6 @@ TextResponse objects
response.css('p')
- .. method:: TextResponse.body_as_unicode()
-
- The same as :attr:`text`, but available as a method. This method is
- kept for backwards compatibility; please prefer ``response.text``.
-
HtmlResponse objects
--------------------
diff --git a/scrapy/downloadermiddlewares/ajaxcrawl.py b/scrapy/downloadermiddlewares/ajaxcrawl.py
index da373eca2..6b543b823 100644
--- a/scrapy/downloadermiddlewares/ajaxcrawl.py
+++ b/scrapy/downloadermiddlewares/ajaxcrawl.py
@@ -63,7 +63,7 @@ class AjaxCrawlMiddleware(object):
Return True if a page without hash fragment could be "AJAX crawlable"
according to https://developers.google.com/webmasters/ajax-crawling/docs/getting-started.
"""
- body = response.text[:self.lookup_bytes]
+ body = response.body_as_unicode()[:self.lookup_bytes]
return _has_ajaxcrawlable_meta(body)
diff --git a/scrapy/downloadermiddlewares/robotstxt.py b/scrapy/downloadermiddlewares/robotstxt.py
index d4a33dc36..6fdba90cc 100644
--- a/scrapy/downloadermiddlewares/robotstxt.py
+++ b/scrapy/downloadermiddlewares/robotstxt.py
@@ -83,8 +83,8 @@ class RobotsTxtMiddleware(object):
def _parse_robots(self, response, netloc):
rp = robotparser.RobotFileParser(response.url)
body = ''
- if hasattr(response, 'text'):
- body = response.text
+ if hasattr(response, 'body_as_unicode'):
+ body = response.body_as_unicode()
else: # last effort try
try:
body = response.body.decode('utf-8')
@@ -101,4 +101,6 @@ class RobotsTxtMiddleware(object):
rp_dfd.callback(rp)
def _robots_error(self, failure, netloc):
- self._parsers.pop(netloc).callback(None)
+ rp_dfd = self._parsers[netloc]
+ self._parsers[netloc] = None
+ rp_dfd.callback(None)
diff --git a/scrapy/http/request/form.py b/scrapy/http/request/form.py
index 2862dc096..5501634d3 100644
--- a/scrapy/http/request/form.py
+++ b/scrapy/http/request/form.py
@@ -64,8 +64,8 @@ def _urlencode(seq, enc):
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
- root = create_root_node(response.text, lxml.html.HTMLParser,
- base_url=get_base_url(response))
+ text = response.body_as_unicode()
+ root = create_root_node(text, lxml.html.HTMLParser, base_url=get_base_url(response))
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
diff --git a/scrapy/http/response/text.py b/scrapy/http/response/text.py
index 9c667ab7e..1c416bf82 100644
--- a/scrapy/http/response/text.py
+++ b/scrapy/http/response/text.py
@@ -67,11 +67,6 @@ class TextResponse(Response):
self._cached_ubody = html_to_unicode(charset, self.body)[1]
return self._cached_ubody
- @property
- def text(self):
- """ Body as unicode """
- return self.body_as_unicode()
-
def urljoin(self, url):
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
diff --git a/scrapy/selector/unified.py b/scrapy/selector/unified.py
index 15f3d26df..5d77f7624 100644
--- a/scrapy/selector/unified.py
+++ b/scrapy/selector/unified.py
@@ -60,7 +60,7 @@ class Selector(_ParselSelector, object_ref):
response = _response_from_text(text, st)
if response is not None:
- text = response.text
+ text = response.body_as_unicode()
kwargs.setdefault('base_url', response.url)
self.response = response
diff --git a/scrapy/utils/iterators.py b/scrapy/utils/iterators.py
index 73857b410..b0688791e 100644
--- a/scrapy/utils/iterators.py
+++ b/scrapy/utils/iterators.py
@@ -137,7 +137,7 @@ def _body_or_str(obj, unicode=True):
if not unicode:
return obj.body
elif isinstance(obj, TextResponse):
- return obj.text
+ return obj.body_as_unicode()
else:
return obj.body.decode('utf-8')
elif isinstance(obj, six.text_type):
diff --git a/scrapy/utils/response.py b/scrapy/utils/response.py
index 73db2641e..c4ad52f14 100644
--- a/scrapy/utils/response.py
+++ b/scrapy/utils/response.py
@@ -25,7 +25,7 @@ _baseurl_cache = weakref.WeakKeyDictionary()
def get_base_url(response):
"""Return the base url of the given response, joined with the response url"""
if response not in _baseurl_cache:
- text = response.text[0:4096]
+ text = response.body_as_unicode()[0:4096]
_baseurl_cache[response] = html.get_base_url(text, response.url,
response.encoding)
return _baseurl_cache[response]
@@ -37,7 +37,7 @@ _metaref_cache = weakref.WeakKeyDictionary()
def get_meta_refresh(response):
"""Parse the http-equiv refrsh parameter from the given response"""
if response not in _metaref_cache:
- text = response.text[0:4096]
+ text = response.body_as_unicode()[0:4096]
text = _noscript_re.sub(u'', text)
text = _script_re.sub(u'', text)
_metaref_cache[response] = html.get_meta_refresh(text, response.url,
| KeyError in robotstxt middleware
I'm getting these errors in robots.txt middleware:
```
2016-01-27 16:18:21 [scrapy.core.scraper] ERROR: Error downloading <GET http://yellowpages.co.th>
Traceback (most recent call last):
File "/Users/kmike/envs/scraping/lib/python2.7/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred
result = f(*args, **kw)
File "/Users/kmike/svn/scrapy/scrapy/downloadermiddlewares/robotstxt.py", line 65, in robot_parser
if isinstance(self._parsers[netloc], Deferred):
KeyError: 'yellowpages.co.th'
```
It looks like https://github.com/scrapy/scrapy/pull/1473 caused it (I can't get this issue in Scrapy 1.0.4, but it present in Scrapy master). It happens when page failed to download and HTTP cache is enabled. I haven't debugged it further. | scrapy/scrapy | diff --git a/tests/test_downloadermiddleware_robotstxt.py b/tests/test_downloadermiddleware_robotstxt.py
index 5f45dcb82..f2e94e171 100644
--- a/tests/test_downloadermiddleware_robotstxt.py
+++ b/tests/test_downloadermiddleware_robotstxt.py
@@ -123,6 +123,18 @@ class RobotsTxtMiddlewareTest(unittest.TestCase):
deferred.addCallback(lambda _: self.assertTrue(middleware._logerror.called))
return deferred
+ def test_robotstxt_immediate_error(self):
+ self.crawler.settings.set('ROBOTSTXT_OBEY', True)
+ err = error.DNSLookupError('Robotstxt address not found')
+ def immediate_failure(request, spider):
+ deferred = Deferred()
+ deferred.errback(failure.Failure(err))
+ return deferred
+ self.crawler.engine.download.side_effect = immediate_failure
+
+ middleware = RobotsTxtMiddleware(self.crawler)
+ return self.assertNotIgnored(Request('http://site.local'), middleware)
+
def test_ignore_robotstxt_request(self):
self.crawler.settings.set('ROBOTSTXT_OBEY', True)
def ignore_request(request, spider):
diff --git a/tests/test_engine.py b/tests/test_engine.py
index baf6ef1bf..9f2c02bff 100644
--- a/tests/test_engine.py
+++ b/tests/test_engine.py
@@ -55,11 +55,12 @@ class TestSpider(Spider):
def parse_item(self, response):
item = self.item_cls()
- m = self.name_re.search(response.text)
+ body = response.body_as_unicode()
+ m = self.name_re.search(body)
if m:
item['name'] = m.group(1)
item['url'] = response.url
- m = self.price_re.search(response.text)
+ m = self.price_re.search(body)
if m:
item['price'] = m.group(1)
return item
diff --git a/tests/test_http_response.py b/tests/test_http_response.py
index c7f36687a..710a5b29d 100644
--- a/tests/test_http_response.py
+++ b/tests/test_http_response.py
@@ -107,11 +107,9 @@ class BaseResponseTest(unittest.TestCase):
body_bytes = body
assert isinstance(response.body, bytes)
- assert isinstance(response.text, six.text_type)
self._assert_response_encoding(response, encoding)
self.assertEqual(response.body, body_bytes)
self.assertEqual(response.body_as_unicode(), body_unicode)
- self.assertEqual(response.text, body_unicode)
def _assert_response_encoding(self, response, encoding):
self.assertEqual(response.encoding, resolve_encoding(encoding))
@@ -173,10 +171,6 @@ class TextResponseTest(BaseResponseTest):
self.assertTrue(isinstance(r1.body_as_unicode(), six.text_type))
self.assertEqual(r1.body_as_unicode(), unicode_string)
- # check response.text
- self.assertTrue(isinstance(r1.text, six.text_type))
- self.assertEqual(r1.text, unicode_string)
-
def test_encoding(self):
r1 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=utf-8"]}, body=b"\xc2\xa3")
r2 = self.response_class("http://www.example.com", encoding='utf-8', body=u"\xa3")
@@ -225,12 +219,12 @@ class TextResponseTest(BaseResponseTest):
headers={"Content-type": ["text/html; charset=utf-8"]},
body=b"\xef\xbb\xbfWORD\xe3\xab")
self.assertEqual(r6.encoding, 'utf-8')
- self.assertEqual(r6.text, u'WORD\ufffd\ufffd')
+ self.assertEqual(r6.body_as_unicode(), u'WORD\ufffd\ufffd')
def test_bom_is_removed_from_body(self):
# Inferring encoding from body also cache decoded body as sideeffect,
# this test tries to ensure that calling response.encoding and
- # response.text in indistint order doesn't affect final
+ # response.body_as_unicode() in indistint order doesn't affect final
# values for encoding and decoded body.
url = 'http://example.com'
body = b"\xef\xbb\xbfWORD"
@@ -239,9 +233,9 @@ class TextResponseTest(BaseResponseTest):
# Test response without content-type and BOM encoding
response = self.response_class(url, body=body)
self.assertEqual(response.encoding, 'utf-8')
- self.assertEqual(response.text, u'WORD')
+ self.assertEqual(response.body_as_unicode(), u'WORD')
response = self.response_class(url, body=body)
- self.assertEqual(response.text, u'WORD')
+ self.assertEqual(response.body_as_unicode(), u'WORD')
self.assertEqual(response.encoding, 'utf-8')
# Body caching sideeffect isn't triggered when encoding is declared in
@@ -249,9 +243,9 @@ class TextResponseTest(BaseResponseTest):
# body
response = self.response_class(url, headers=headers, body=body)
self.assertEqual(response.encoding, 'utf-8')
- self.assertEqual(response.text, u'WORD')
+ self.assertEqual(response.body_as_unicode(), u'WORD')
response = self.response_class(url, headers=headers, body=body)
- self.assertEqual(response.text, u'WORD')
+ self.assertEqual(response.body_as_unicode(), u'WORD')
self.assertEqual(response.encoding, 'utf-8')
def test_replace_wrong_encoding(self):
@@ -259,18 +253,18 @@ class TextResponseTest(BaseResponseTest):
r = self.response_class("http://www.example.com", encoding='utf-8', body=b'PREFIX\xe3\xabSUFFIX')
# XXX: Policy for replacing invalid chars may suffer minor variations
# but it should always contain the unicode replacement char (u'\ufffd')
- assert u'\ufffd' in r.text, repr(r.text)
- assert u'PREFIX' in r.text, repr(r.text)
- assert u'SUFFIX' in r.text, repr(r.text)
+ assert u'\ufffd' in r.body_as_unicode(), repr(r.body_as_unicode())
+ assert u'PREFIX' in r.body_as_unicode(), repr(r.body_as_unicode())
+ assert u'SUFFIX' in r.body_as_unicode(), repr(r.body_as_unicode())
# Do not destroy html tags due to encoding bugs
r = self.response_class("http://example.com", encoding='utf-8', \
body=b'\xf0<span>value</span>')
- assert u'<span>value</span>' in r.text, repr(r.text)
+ assert u'<span>value</span>' in r.body_as_unicode(), repr(r.body_as_unicode())
# FIXME: This test should pass once we stop using BeautifulSoup's UnicodeDammit in TextResponse
- #r = self.response_class("http://www.example.com", body=b'PREFIX\xe3\xabSUFFIX')
- #assert u'\ufffd' in r.text, repr(r.text)
+ #r = self.response_class("http://www.example.com", body='PREFIX\xe3\xabSUFFIX')
+ #assert u'\ufffd' in r.body_as_unicode(), repr(r.body_as_unicode())
def test_selector(self):
body = b"<html><head><title>Some page</title><body></body></html>"
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 9
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
Automat==24.8.1
cffi==1.17.1
constantly==23.10.4
cryptography==44.0.2
cssselect==1.3.0
exceptiongroup==1.2.2
hyperlink==21.0.0
idna==3.10
incremental==24.7.2
iniconfig==2.1.0
jmespath==1.0.1
lxml==5.3.1
packaging==24.2
parsel==1.10.0
pluggy==1.5.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycparser==2.22
PyDispatcher==2.0.7
pyOpenSSL==25.0.0
pytest==8.3.5
queuelib==1.7.0
-e git+https://github.com/scrapy/scrapy.git@7d24df37380cd5a5b7394cd2534e240bd2eff0ca#egg=Scrapy
service-identity==24.2.0
six==1.17.0
tomli==2.2.1
Twisted==24.11.0
typing_extensions==4.13.0
w3lib==2.3.1
zope.interface==7.2
| name: scrapy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- automat==24.8.1
- cffi==1.17.1
- constantly==23.10.4
- cryptography==44.0.2
- cssselect==1.3.0
- exceptiongroup==1.2.2
- hyperlink==21.0.0
- idna==3.10
- incremental==24.7.2
- iniconfig==2.1.0
- jmespath==1.0.1
- lxml==5.3.1
- packaging==24.2
- parsel==1.10.0
- pluggy==1.5.0
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycparser==2.22
- pydispatcher==2.0.7
- pyopenssl==25.0.0
- pytest==8.3.5
- queuelib==1.7.0
- service-identity==24.2.0
- six==1.17.0
- tomli==2.2.1
- twisted==24.11.0
- typing-extensions==4.13.0
- w3lib==2.3.1
- zope-interface==7.2
prefix: /opt/conda/envs/scrapy
| [
"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_immediate_error"
]
| [
"tests/test_engine.py::EngineTest::test_crawler",
"tests/test_http_response.py::TextResponseTest::test_invalid_utf8_encoded_body_with_valid_utf8_BOM",
"tests/test_http_response.py::TextResponseTest::test_selector",
"tests/test_http_response.py::TextResponseTest::test_selector_shortcuts",
"tests/test_http_response.py::HtmlResponseTest::test_invalid_utf8_encoded_body_with_valid_utf8_BOM",
"tests/test_http_response.py::HtmlResponseTest::test_selector",
"tests/test_http_response.py::HtmlResponseTest::test_selector_shortcuts",
"tests/test_http_response.py::XmlResponseTest::test_invalid_utf8_encoded_body_with_valid_utf8_BOM",
"tests/test_http_response.py::XmlResponseTest::test_selector",
"tests/test_http_response.py::XmlResponseTest::test_selector_shortcuts"
]
| [
"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_ignore_robotstxt_request",
"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt",
"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_empty_response",
"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_error",
"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_garbage",
"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_meta",
"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_ready_parser",
"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_settings",
"tests/test_engine.py::EngineTest::test_close_downloader",
"tests/test_engine.py::EngineTest::test_close_engine_spiders_downloader",
"tests/test_engine.py::EngineTest::test_close_spiders_downloader",
"tests/test_http_response.py::BaseResponseTest::test_copy",
"tests/test_http_response.py::BaseResponseTest::test_copy_inherited_classes",
"tests/test_http_response.py::BaseResponseTest::test_copy_meta",
"tests/test_http_response.py::BaseResponseTest::test_immutable_attributes",
"tests/test_http_response.py::BaseResponseTest::test_init",
"tests/test_http_response.py::BaseResponseTest::test_replace",
"tests/test_http_response.py::BaseResponseTest::test_urljoin",
"tests/test_http_response.py::TextResponseTest::test_bom_is_removed_from_body",
"tests/test_http_response.py::TextResponseTest::test_copy",
"tests/test_http_response.py::TextResponseTest::test_copy_inherited_classes",
"tests/test_http_response.py::TextResponseTest::test_copy_meta",
"tests/test_http_response.py::TextResponseTest::test_declared_encoding_invalid",
"tests/test_http_response.py::TextResponseTest::test_encoding",
"tests/test_http_response.py::TextResponseTest::test_immutable_attributes",
"tests/test_http_response.py::TextResponseTest::test_init",
"tests/test_http_response.py::TextResponseTest::test_replace",
"tests/test_http_response.py::TextResponseTest::test_replace_wrong_encoding",
"tests/test_http_response.py::TextResponseTest::test_unicode_body",
"tests/test_http_response.py::TextResponseTest::test_unicode_url",
"tests/test_http_response.py::TextResponseTest::test_urljoin",
"tests/test_http_response.py::TextResponseTest::test_urljoin_with_base_url",
"tests/test_http_response.py::TextResponseTest::test_utf16",
"tests/test_http_response.py::HtmlResponseTest::test_bom_is_removed_from_body",
"tests/test_http_response.py::HtmlResponseTest::test_copy",
"tests/test_http_response.py::HtmlResponseTest::test_copy_inherited_classes",
"tests/test_http_response.py::HtmlResponseTest::test_copy_meta",
"tests/test_http_response.py::HtmlResponseTest::test_declared_encoding_invalid",
"tests/test_http_response.py::HtmlResponseTest::test_encoding",
"tests/test_http_response.py::HtmlResponseTest::test_html5_meta_charset",
"tests/test_http_response.py::HtmlResponseTest::test_html_encoding",
"tests/test_http_response.py::HtmlResponseTest::test_immutable_attributes",
"tests/test_http_response.py::HtmlResponseTest::test_init",
"tests/test_http_response.py::HtmlResponseTest::test_replace",
"tests/test_http_response.py::HtmlResponseTest::test_replace_wrong_encoding",
"tests/test_http_response.py::HtmlResponseTest::test_unicode_body",
"tests/test_http_response.py::HtmlResponseTest::test_unicode_url",
"tests/test_http_response.py::HtmlResponseTest::test_urljoin",
"tests/test_http_response.py::HtmlResponseTest::test_urljoin_with_base_url",
"tests/test_http_response.py::HtmlResponseTest::test_utf16",
"tests/test_http_response.py::XmlResponseTest::test_bom_is_removed_from_body",
"tests/test_http_response.py::XmlResponseTest::test_copy",
"tests/test_http_response.py::XmlResponseTest::test_copy_inherited_classes",
"tests/test_http_response.py::XmlResponseTest::test_copy_meta",
"tests/test_http_response.py::XmlResponseTest::test_declared_encoding_invalid",
"tests/test_http_response.py::XmlResponseTest::test_encoding",
"tests/test_http_response.py::XmlResponseTest::test_immutable_attributes",
"tests/test_http_response.py::XmlResponseTest::test_init",
"tests/test_http_response.py::XmlResponseTest::test_replace",
"tests/test_http_response.py::XmlResponseTest::test_replace_encoding",
"tests/test_http_response.py::XmlResponseTest::test_replace_wrong_encoding",
"tests/test_http_response.py::XmlResponseTest::test_unicode_body",
"tests/test_http_response.py::XmlResponseTest::test_unicode_url",
"tests/test_http_response.py::XmlResponseTest::test_urljoin",
"tests/test_http_response.py::XmlResponseTest::test_urljoin_with_base_url",
"tests/test_http_response.py::XmlResponseTest::test_utf16",
"tests/test_http_response.py::XmlResponseTest::test_xml_encoding"
]
| []
| BSD 3-Clause "New" or "Revised" License | 399 | [
"README.rst",
"scrapy/http/response/text.py",
"scrapy/downloadermiddlewares/ajaxcrawl.py",
"scrapy/utils/response.py",
"CODE_OF_CONDUCT.md",
"scrapy/utils/iterators.py",
"docs/topics/request-response.rst",
"scrapy/selector/unified.py",
"scrapy/http/request/form.py",
"scrapy/downloadermiddlewares/robotstxt.py"
]
| [
"README.rst",
"scrapy/http/response/text.py",
"scrapy/downloadermiddlewares/ajaxcrawl.py",
"scrapy/utils/response.py",
"CODE_OF_CONDUCT.md",
"scrapy/utils/iterators.py",
"docs/topics/request-response.rst",
"scrapy/selector/unified.py",
"scrapy/http/request/form.py",
"scrapy/downloadermiddlewares/robotstxt.py"
]
|
|
abh1nav__gnippy-16 | f9fc42bf37cb9f415df18fd7f72a238d1cbd69e0 | 2016-01-28 04:30:48 | f9fc42bf37cb9f415df18fd7f72a238d1cbd69e0 | diff --git a/gnippy/rules.py b/gnippy/rules.py
index 0309b5d..f5f1d04 100644
--- a/gnippy/rules.py
+++ b/gnippy/rules.py
@@ -2,6 +2,11 @@
import json
+try:
+ from urllib.parse import urlparse
+except:
+ from urlparse import urlparse
+
import requests
from six import string_types
@@ -82,6 +87,17 @@ def _post(conf, built_rules):
error_text = "HTTP Response Code: %s, Text: '%s'" % (str(r.status_code), r.text)
raise RuleAddFailedException(error_text)
+def _generate_delete_url(conf):
+ """
+ Generate the Rules URL for a DELETE request.
+ """
+ generated_url = _generate_rules_url(conf['url'])
+ parsed_url = urlparse(generated_url)
+ query = parsed_url.query
+ if query != '':
+ return generated_url.replace(query, query + "&_method=delete")
+ else:
+ return generated_url + "?_method=delete"
def _delete(conf, built_rules):
"""
@@ -99,7 +115,7 @@ def _delete(conf, built_rules):
built_rules: A single or list of built rules.
"""
_check_rules_list(built_rules)
- rules_url = _generate_rules_url(conf['url']) + "?_method=delete"
+ rules_url = _generate_delete_url(conf)
delete_data = json.dumps(_generate_post_object(built_rules))
r = requests.post(rules_url, auth=conf['auth'], data=delete_data)
if not r.status_code in range(200,300):
diff --git a/setup.py b/setup.py
index 42ddcbc..d25cd66 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
import os
import sys
-version = "0.5.0"
+version = "0.5.1"
try:
from setuptools import setup
| Rules URL is incorrectly generated if endpoint URL has parameters
This does not work if `conf['url']` has params.
```
rules_url = _generate_rules_url(conf['url']) + "?_method=delete"
```
Example generated URL:
```
https://api.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod/rules.json?client=2?_method=delete
``` | abh1nav/gnippy | diff --git a/gnippy/test/test_rules.py b/gnippy/test/test_rules.py
index 4310376..a79a857 100644
--- a/gnippy/test/test_rules.py
+++ b/gnippy/test/test_rules.py
@@ -258,6 +258,20 @@ class RulesTestCase(unittest.TestCase):
r = rules.get_rules(config_file_path=test_utils.test_config_path)
self.assertEqual(1, len(r))
+ def test_generate_delete_url_normal_case(self):
+ """ Check if the Delete URL is generated correctly. """
+ conf = { 'url': 'https://stream.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod.json' }
+ url = rules._generate_delete_url(conf)
+ self.assertEqual('https://api.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod/rules.json?_method=delete',
+ url)
+
+ def test_generate_delete_url_with_query(self):
+ """ Account for https://github.com/abh1nav/gnippy/issues/15 """
+ conf = { 'url': 'https://stream.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod.json?client=2' }
+ url = rules._generate_delete_url(conf)
+ self.assertEqual('https://api.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod/rules.json?client=2&_method=delete',
+ url)
+
@mock.patch('requests.post', good_delete)
def test_delete_rules_single(self):
""" Delete one rule. """
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
-e git+https://github.com/abh1nav/gnippy.git@f9fc42bf37cb9f415df18fd7f72a238d1cbd69e0#egg=gnippy
iniconfig==2.1.0
mock==1.0.1
nose==1.3.0
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pyproject-api==1.9.0
pytest==8.3.5
requests==2.8.1
six==1.10.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
virtualenv==20.29.3
| name: gnippy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- iniconfig==2.1.0
- mock==1.0.1
- nose==1.3.0
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pyproject-api==1.9.0
- pytest==8.3.5
- requests==2.8.1
- six==1.10.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/gnippy
| [
"gnippy/test/test_rules.py::RulesTestCase::test_generate_delete_url_normal_case",
"gnippy/test/test_rules.py::RulesTestCase::test_generate_delete_url_with_query"
]
| []
| [
"gnippy/test/test_rules.py::RulesTestCase::test_add_many_rules_no_creds",
"gnippy/test/test_rules.py::RulesTestCase::test_add_many_rules_not_ok",
"gnippy/test/test_rules.py::RulesTestCase::test_add_many_rules_ok",
"gnippy/test/test_rules.py::RulesTestCase::test_add_one_rule_no_creds",
"gnippy/test/test_rules.py::RulesTestCase::test_add_one_rule_not_ok",
"gnippy/test/test_rules.py::RulesTestCase::test_add_one_rule_ok",
"gnippy/test/test_rules.py::RulesTestCase::test_build_post_object",
"gnippy/test/test_rules.py::RulesTestCase::test_build_rule_bad_args",
"gnippy/test/test_rules.py::RulesTestCase::test_build_rule_with_tag",
"gnippy/test/test_rules.py::RulesTestCase::test_build_rule_without_tag",
"gnippy/test/test_rules.py::RulesTestCase::test_build_rules_url",
"gnippy/test/test_rules.py::RulesTestCase::test_build_rules_url_bad",
"gnippy/test/test_rules.py::RulesTestCase::test_check_many_rules_ok",
"gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_extra_stuff_in_rule",
"gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_ok",
"gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_typo_tag",
"gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_typo_values",
"gnippy/test/test_rules.py::RulesTestCase::test_check_rule_tag_none",
"gnippy/test/test_rules.py::RulesTestCase::test_delete_rules_multiple",
"gnippy/test/test_rules.py::RulesTestCase::test_delete_rules_single",
"gnippy/test/test_rules.py::RulesTestCase::test_get_rules_bad_json",
"gnippy/test/test_rules.py::RulesTestCase::test_get_rules_bad_status_code",
"gnippy/test/test_rules.py::RulesTestCase::test_get_rules_no_rules_field_json",
"gnippy/test/test_rules.py::RulesTestCase::test_get_rules_requests_get_exception",
"gnippy/test/test_rules.py::RulesTestCase::test_get_rules_success_no_rules",
"gnippy/test/test_rules.py::RulesTestCase::test_get_rules_success_one_rule"
]
| []
| Apache License 2.0 | 400 | [
"setup.py",
"gnippy/rules.py"
]
| [
"setup.py",
"gnippy/rules.py"
]
|
|
docker__docker-py-911 | 446e6d08dd569194a27bb354a184b7d94ecf5e48 | 2016-01-29 00:27:19 | 4c34be5d4ab8a5a017950712e9c96b56d78d1c58 | diff --git a/docker/client.py b/docker/client.py
index fb186cc7..7d1f7c46 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -45,17 +45,17 @@ class Client(
timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False):
super(Client, self).__init__()
- if tls and (not base_url or not base_url.startswith('https://')):
+ if tls and not base_url:
raise errors.TLSParameterError(
- 'If using TLS, the base_url argument must begin with '
- '"https://".')
+ 'If using TLS, the base_url argument must be provided.'
+ )
self.base_url = base_url
self.timeout = timeout
self._auth_configs = auth.load_config()
- base_url = utils.parse_host(base_url, sys.platform)
+ base_url = utils.parse_host(base_url, sys.platform, tls=bool(tls))
if base_url.startswith('http+unix://'):
self._custom_adapter = unixconn.UnixAdapter(base_url, timeout)
self.mount('http+docker://', self._custom_adapter)
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 1ce1867c..dc46f1ef 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -345,7 +345,7 @@ def parse_repository_tag(repo_name):
# fd:// protocol unsupported (for obvious reasons)
# Added support for http and https
# Protocol translation: tcp -> http, unix -> http+unix
-def parse_host(addr, platform=None):
+def parse_host(addr, platform=None, tls=False):
proto = "http+unix"
host = DEFAULT_HTTP_HOST
port = None
@@ -381,7 +381,7 @@ def parse_host(addr, platform=None):
raise errors.DockerException(
"Invalid bind address protocol: {0}".format(addr)
)
- proto = "http"
+ proto = "https" if tls else "http"
if proto != "http+unix" and ":" in addr:
host_parts = addr.split(':')
| Problem when using the DOCKER_HOST variable in combination with docker-compose and https://
Hi,
I'm trying to use docker & docker-compose with the DOCKER_HOST env-variable to control a remote docker-host.
at first I configured the variables on the docker client machine as follows:
export DOCKER_CERT_PATH=/vagrant/docker
export DOCKER_TLS_VERIFY=1
export DOCKER_HOST=my-docker-vm.cloudapp.azure.com:2376
This lead to an error in docker-compose as discussed in this issue docker/compose#2634
Once I added the `https://` protocol prefix to the hostname the connection problem with docker-compose went away...
export DOCKER_HOST=https://my-docker-vm.cloudapp.azure.com:2376
But with this format of the DOCKER_HOST variable now the docker CLI is complaining about the format...
docker ps
Invalid bind address format: https://my-docker-vm.cloudapp.azure.com:2376
I think this error is triggered here: https://github.com/docker/docker-py/blob/62d9964cc1881f6f3cd021594cd40fd80a8fc855/docker/utils/utils.py#L388
The code is not expecting to find two occurrences of double-dots `:` in the host variable, but for some reason it does.
PS: also the ambiguity of the two error messages in `utils.py` could be improved, since at L368 & at L388 both errors emit the same message, but for different reasons ;)
Thanks & Regards,
Wolfgang | docker/docker-py | diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index 83d2a98d..63ea10e7 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -360,6 +360,11 @@ class ParseHostTest(base.BaseTestCase):
assert parse_host(val, 'win32') == tcp_port
+ def test_parse_host_tls(self):
+ host_value = 'myhost.docker.net:3348'
+ expected_result = 'https://myhost.docker.net:3348'
+ self.assertEqual(parse_host(host_value, None, True), expected_result)
+
class ParseRepositoryTagTest(base.BaseTestCase):
sha = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 1.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/docker/docker-py.git@446e6d08dd569194a27bb354a184b7d94ecf5e48#egg=docker_py
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
requests==2.5.3
six==1.17.0
tomli==2.2.1
websocket_client==0.32.0
| name: docker-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.5.3
- six==1.17.0
- tomli==2.2.1
- websocket-client==0.32.0
prefix: /opt/conda/envs/docker-py
| [
"tests/unit/utils_test.py::ParseHostTest::test_parse_host_tls"
]
| []
| [
"tests/unit/utils_test.py::HostConfigTest::test_create_endpoint_config_with_aliases",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_invalid_cpu_cfs_types",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_no_options",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_no_options_newer_api_version",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_period",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_quota",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_oom_kill_disable",
"tests/unit/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit",
"tests/unit/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit_capitals",
"tests/unit/utils_test.py::UlimitTest::test_create_host_config_obj_ulimit",
"tests/unit/utils_test.py::UlimitTest::test_ulimit_invalid_type",
"tests/unit/utils_test.py::LogConfigTest::test_create_host_config_dict_logconfig",
"tests/unit/utils_test.py::LogConfigTest::test_create_host_config_obj_logconfig",
"tests/unit/utils_test.py::LogConfigTest::test_logconfig_invalid_config_type",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_empty",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_no_cert_path",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_tls",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_compact",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_complete",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_empty",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_list",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_no_mode",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_unicode_bytes_input",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_unicode_unicode_input",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_commented_line",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_invalid_line",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_proper",
"tests/unit/utils_test.py::ParseHostTest::test_parse_host",
"tests/unit/utils_test.py::ParseHostTest::test_parse_host_empty_value",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_no_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_sha",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_user_image_no_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_user_image_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_no_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_sha",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_tag",
"tests/unit/utils_test.py::ParseDeviceTest::test_dict",
"tests/unit/utils_test.py::ParseDeviceTest::test_full_string_definition",
"tests/unit/utils_test.py::ParseDeviceTest::test_hybrid_list",
"tests/unit/utils_test.py::ParseDeviceTest::test_partial_string_definition",
"tests/unit/utils_test.py::ParseDeviceTest::test_permissionless_string_definition",
"tests/unit/utils_test.py::ParseBytesTest::test_parse_bytes_float",
"tests/unit/utils_test.py::ParseBytesTest::test_parse_bytes_invalid",
"tests/unit/utils_test.py::ParseBytesTest::test_parse_bytes_maxint",
"tests/unit/utils_test.py::ParseBytesTest::test_parse_bytes_valid",
"tests/unit/utils_test.py::UtilsTest::test_convert_filters",
"tests/unit/utils_test.py::UtilsTest::test_create_ipam_config",
"tests/unit/utils_test.py::UtilsTest::test_decode_json_header",
"tests/unit/utils_test.py::SplitCommandTest::test_split_command_with_unicode",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_port_ranges",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_ports",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_port_ranges",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_ports",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_one_port",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_port_range",
"tests/unit/utils_test.py::PortsTest::test_host_only_with_colon",
"tests/unit/utils_test.py::PortsTest::test_non_matching_length_port_ranges",
"tests/unit/utils_test.py::PortsTest::test_port_and_range_invalid",
"tests/unit/utils_test.py::PortsTest::test_port_only_with_colon",
"tests/unit/utils_test.py::PortsTest::test_split_port_invalid",
"tests/unit/utils_test.py::PortsTest::test_split_port_no_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_no_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_with_host_ip_no_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_with_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_with_protocol",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_host_ip",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_host_ip_no_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_protocol",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_single_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_subdir_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_trailing_slash",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_wildcard_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_exclude_custom_dockerfile",
"tests/unit/utils_test.py::ExcludePathsTest::test_exclude_dockerfile_dockerignore",
"tests/unit/utils_test.py::ExcludePathsTest::test_no_dupes",
"tests/unit/utils_test.py::ExcludePathsTest::test_no_excludes",
"tests/unit/utils_test.py::ExcludePathsTest::test_question_mark",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_filename_trailing_slash",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_single_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_wildcard_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_subdirectory",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_exclude",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_filename_end",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_filename_start",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_subdir_single_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_subdir_wildcard_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_with_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_with_wildcard_exception",
"tests/unit/utils_test.py::TarTest::test_tar_with_directory_symlinks",
"tests/unit/utils_test.py::TarTest::test_tar_with_empty_directory",
"tests/unit/utils_test.py::TarTest::test_tar_with_excludes",
"tests/unit/utils_test.py::TarTest::test_tar_with_file_symlinks"
]
| []
| Apache License 2.0 | 401 | [
"docker/utils/utils.py",
"docker/client.py"
]
| [
"docker/utils/utils.py",
"docker/client.py"
]
|
|
scrapy__scrapy-1746 | a35aec71e96b0c0288c370afa425e8e700dca8b3 | 2016-01-29 18:00:12 | 6aa85aee2a274393307ac3e777180fcbdbdc9848 | diff --git a/scrapy/commands/settings.py b/scrapy/commands/settings.py
index 0e73f4f58..bce4e6086 100644
--- a/scrapy/commands/settings.py
+++ b/scrapy/commands/settings.py
@@ -1,5 +1,8 @@
from __future__ import print_function
+import json
+
from scrapy.commands import ScrapyCommand
+from scrapy.settings import BaseSettings
class Command(ScrapyCommand):
@@ -28,7 +31,11 @@ class Command(ScrapyCommand):
def run(self, args, opts):
settings = self.crawler_process.settings
if opts.get:
- print(settings.get(opts.get))
+ s = settings.get(opts.get)
+ if isinstance(s, BaseSettings):
+ print(json.dumps(s.copy_to_dict()))
+ else:
+ print(s)
elif opts.getbool:
print(settings.getbool(opts.getbool))
elif opts.getint:
diff --git a/scrapy/settings/__init__.py b/scrapy/settings/__init__.py
index 342d2585e..7b7808959 100644
--- a/scrapy/settings/__init__.py
+++ b/scrapy/settings/__init__.py
@@ -4,6 +4,7 @@ import copy
import warnings
from collections import MutableMapping
from importlib import import_module
+from pprint import pformat
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.exceptions import ScrapyDeprecationWarning
@@ -368,11 +369,31 @@ class BaseSettings(MutableMapping):
def __len__(self):
return len(self.attributes)
- def __str__(self):
- return str(self.attributes)
+ def _to_dict(self):
+ return {k: (v._to_dict() if isinstance(v, BaseSettings) else v)
+ for k, v in six.iteritems(self)}
+
+ def copy_to_dict(self):
+ """
+ Make a copy of current settings and convert to a dict.
- def __repr__(self):
- return "<%s %s>" % (self.__class__.__name__, self.attributes)
+ This method returns a new dict populated with the same values
+ and their priorities as the current settings.
+
+ Modifications to the returned dict won't be reflected on the original
+ settings.
+
+ This method can be useful for example for printing settings
+ in Scrapy shell.
+ """
+ settings = self.copy()
+ return settings._to_dict()
+
+ def _repr_pretty_(self, p, cycle):
+ if cycle:
+ p.text(repr(self))
+ else:
+ p.text(pformat(self.copy_to_dict()))
@property
def overrides(self):
| BaseSettings.__repr__ is too verbose for scrapy shell
Settings object got `__repr__` in https://github.com/scrapy/scrapy/pull/1149, but it looks weird in `scrapy shell`:
> [s] Available Scrapy objects:
[s] crawler <scrapy.crawler.Crawler object at 0x10c042fd0>
[s] item {}
[s] request <GET http://yellowpages.co.th>
[s] response <302 http://yellowpages.co.th>
[s] settings {'CLOSESPIDER_ITEMCOUNT': <SettingsAttribute value=0 priority=0>, 'RETRY_HTTP_CODES': <SettingsAttribute value=[500, 502, 503, 504, 408] priority=0>, 'HTTPCACHE_DIR': <SettingsAttribute value='httpcache-2' priority=20>, 'MEMUSAGE_REPORT': <SettingsAttribute value=False priority=0>, 'DOWNLOAD_HANDLERS_BASE': <SettingsAttribute value=<BaseSettings {'s3': <SettingsAttribute value='scrapy.core.downloader.handlers.s3.S3DownloadHandler' priority=0>, 'ftp': <SettingsAttribute value='scrapy.core.downloader.handlers.ftp.FTPDownloadHandler' priority=0>, 'http': <SettingsAttribute value='scrapy.core.downloader.handlers.http.HTTPDownloadHandler' priority=0>, 'https': <SettingsAttribute value='scrapy.core.downloader.handlers.http.HTTPDownloadHandler' priority=0>, 'file': <SettingsAttribute value='scrapy.core.downloader.handlers.file.FileDownloadHandler' priority=0>}> priority=0>, 'RETRY_PRIORITY_ADJUST': <SettingsAttribute value=-1 priority=0>, 'MAIL_FROM': <SettingsAttribute value='scrapy@localhost' priority=0>, 'HTTPCACHE_EXPIRATION_SECS': <SettingsAttribute value=86400 priority=20>, 'SPIDER_LOADER_CLASS': <SettingsAttribute value='scrapy.spiderloader.SpiderLoader' priority=0>, 'COMPRESSION_ENABLED': <SettingsAttribute value=True priority=0>, 'DOWNLOAD_TIMEOUT': <SettingsAttribute value=180 priority=0>, 'MAIL_PASS': <SettingsAttribute value=None priority=0>, 'MEMUSAGE_LIMIT_MB': <SettingsAttribute value=0 priority=0>, 'EXTENSIONS': <SettingsAttribute value=<BaseSettings {}> priority=0>, 'DEPTH_PRIORITY': <SettingsAttribute value=0 priority=0>, 'TELNETCONSOLE_HOST': <SettingsAttribute value='127.0.0.1' priority=0>, 'MEMDEBUG_NOTIFY': <SettingsAttribute value=[] priority=0>, 'HTTPPROXY_AUTH_ENCODING': <SettingsAttribute value='latin-1' priority=0>, 'DOWNLOAD_WARNSIZE': <SettingsAttribute value=33554432 priority=0>, 'SPIDER_MODULES': <SettingsAttribute value=['acrawler.spiders'] priority=20>, 'RETRY_TIMES': <SettingsAttribute value=2 priority=0>, 'TELNETCONSOLE_PORT': <SettingsAttribute value=[6023, 6073] priority=0>, 'TELNETCONSOLE_ENABLED': <SettingsAttribute value=False priority=20>, 'DOWNLOADER_MIDDLEWARES': <SettingsAttribute value=<BaseSettings {}> priority=0>, 'HTTPCACHE_DBM_MODULE': <SettingsAttribute value='anydbm' priority=0>, 'ROBOTSTXT_OBEY': <SettingsAttribute value=True priority=20>, 'DEPTH_LIMIT': <SettingsAttribute value=0 priority=0>, 'REACTOR_THREADPOOL_MAXSIZE': <SettingsAttribute value=10 priority=0>, 'FEED_EXPORT_FIELDS': <SettingsAttribute value=None priority=0>, 'CLOSESPIDER_PAGECOUNT': <SettingsAttribute value=0 priority=0>, 'LOG_SHORT_NAMES': <SettingsAttribute value=False priority=0>, 'AUTOTHROTTLE_MAX_DELAY': <SettingsAttribute value=60 priority=20>, 'URLLENGTH_LIMIT': <SettingsAttribute value=2083 priority=0>, 'FEED_EXPORTERS': <SettingsAttribute value=<BaseSettings {}> priority=0>, 'LOG_ENCODING': <SettingsAttribute value='utf-8' priority=0>, 'FEED_EXPORTERS_BASE': <SettingsAttribute value=<BaseSettings {'xml': <SettingsAttribute value='scrapy.exporters.XmlItemExporter' priority=0>, 'jsonlines': <SettingsAttribute value='scrapy.exporters.JsonLinesItemExporter' priority=0>, 'jl': <SettingsAttribute value='scrapy.exporters.JsonLinesItemExporter' priority=0>, 'json': <SettingsAttribute value='scrapy.exporters.JsonItemExporter' priority=0>, 'csv': <SettingsAttribute value='scrapy.exporters.CsvItemExporter' priority=0>, 'pickle': <SettingsAttribute value='scrapy.exporters.PickleItemExporter' priority=0>, 'marshal': <SettingsAttribute value='scrapy.exporters.MarshalItemExporter' priority=0>}> priority=0>, 'FEED_FORMAT': <SettingsAttribute value='jsonlines' priority=0>, 'DOWNLOAD_DELAY': <SettingsAttribute value=0 priority=0>, 'HTTPCACHE_GZIP': <SettingsAttribute value=False priority=0>, 'DOWNLOADER_MIDDLEWARES_BASE': <SettingsAttribute value=<BaseSettings {'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': <SettingsAttribute value=400 priority=0>, 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': <SettingsAttribute value=550 priority=0>, 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': <SettingsAttribute value=590 priority=0>, 'scrapy.downloadermiddlewares.chunked.ChunkedTransferMiddleware': <SettingsAttribute value=830 priority=0>, 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': <SettingsAttribute value=100 priority=0>, 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': <SettingsAttribute value=350 priority=0>, 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': <SettingsAttribute value=600 priority=0>, 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': <SettingsAttribute value=560 priority=0>, 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': <SettingsAttribute value=750 priority=0>, 'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': <SettingsAttribute value=900 priority=0>, 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': <SettingsAttribute value=300 priority=0>, 'scrapy.downloadermiddlewares.retry.RetryMiddleware': <SettingsAttribute value=500 priority=0>, 'scrapy.downloadermiddlewares.stats.DownloaderStats': <SettingsAttribute value=850 priority=0>, 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': <SettingsAttribute value=700 priority=0>, 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': <SettingsAttribute value=580 priority=0>}> priority=0>, 'DNSCACHE_ENABLED': <SettingsAttribute value=True priority=0>, 'CONCURRENT_REQUESTS_PER_IP': <SettingsAttribute value=0 priority=0>, 'EDITOR': <SettingsAttribute value='nano' priority=0>, 'MAIL_HOST': <SettingsAttribute value='localhost' priority=0>, 'CONCURRENT_REQUESTS': <SettingsAttribute value=100 priority=20>, 'AUTOTHROTTLE_START_DELAY': <SettingsAttribute value=1 priority=20>, 'CLOSESPIDER_ERRORCOUNT': <SettingsAttribute value=0 priority=0>, 'STATS_CLASS': <SettingsAttribute value='scrapy.statscollectors.MemoryStatsCollector' priority=0>, 'FEED_STORAGES_BASE': <SettingsAttribute value=<BaseSettings {'': <SettingsAttribute value='scrapy.extensions.feedexport.FileFeedStorage' priority=0>, 's3': <SettingsAttribute value='scrapy.extensions.feedexport.S3FeedStorage' priority=0>, 'ftp': <SettingsAttribute value='scrapy.extensions.feedexport.FTPFeedStorage' priority=0>, 'file': <SettingsAttribute value='scrapy.extensions.feedexport.FileFeedStorage' priority=0>, 'stdout': <SettingsAttribute value='scrapy.extensions.feedexport.StdoutFeedStorage' priority=0>}> priority=0>, 'REDIRECT_ENABLED': <SettingsAttribute value=True priority=0>, 'AUTOTHROTTLE_ENABLED': <SettingsAttribute value=True priority=20>, 'COMMANDS_MODULE': <SettingsAttribute value='' priority=0>, 'AUTOTHROTTLE_DEBUG': <SettingsAttribute value=False priority=0>, 'NEWSPIDER_MODULE': <SettingsAttribute value='acrawler.spiders' priority=20>, 'LOG_UNSERIALIZABLE_REQUESTS': <SettingsAttribute value=False priority=0>, 'DOWNLOAD_MAXSIZE': <SettingsAttribute value=1073741824 priority=0>, 'MAIL_PORT': <SettingsAttribute value=25 priority=0>, 'REFERER_ENABLED': <SettingsAttribute value=True priority=0>, 'HTTPCACHE_POLICY': <SettingsAttribute value='scrapy.extensions.httpcache.DummyPolicy' priority=0>, 'STATS_DUMP': <SettingsAttribute value=True priority=0>, 'MEMUSAGE_NOTIFY_MAIL': <SettingsAttribute value=[] priority=0>, 'DOWNLOAD_HANDLERS': <SettingsAttribute value=<BaseSettings {}> priority=0>, 'LOG_DATEFORMAT': <SettingsAttribute value='%Y-%m-%d %H:%M:%S' priority=0>, 'LOG_LEVEL': <SettingsAttribute value='DEBUG' priority=0>, 'DOWNLOADER_HTTPCLIENTFACTORY': <SettingsAttribute value='scrapy.core.downloader.webclient.ScrapyHTTPClientFactory' priority=0>, 'REDIRECT_MAX_TIMES': <SettingsAttribute value=20 priority=0>, 'REDIRECT_PRIORITY_ADJUST': <SettingsAttribute value=2 priority=0>, 'DUPEFILTER_CLASS': <SettingsAttribute value='scrapy.dupefilters.BaseDupeFilter' priority=10>, 'RETRY_ENABLED': <SettingsAttribute value=True priority=0>, 'SPIDER_CONTRACTS': <SettingsAttribute value=<BaseSettings {}> priority=0>, 'HTTPCACHE_ENABLED': <SettingsAttribute value=True priority=20>, 'LOG_ENABLED': <SettingsAttribute value=True priority=0>, 'MAIL_USER': <SettingsAttribute value=None priority=0>, 'HTTPCACHE_ALWAYS_STORE': <SettingsAttribute value=False priority=0>, 'LOGSTATS_INTERVAL': <SettingsAttribute value=0 priority=10>, 'DEFAULT_ITEM_CLASS': <SettingsAttribute value='scrapy.item.Item' priority=0>, 'DNS_TIMEOUT': <SettingsAttribute value=60 priority=0>, 'DEPTH_STATS': <SettingsAttribute value=True priority=0>, 'DOWNLOADER_CLIENTCONTEXTFACTORY': <SettingsAttribute value='scrapy.core.downloader.contextfactory.ScrapyClientContextFactory' priority=0>, 'MEMUSAGE_CHECK_INTERVAL_SECONDS': <SettingsAttribute value=60.0 priority=0>, 'EXTENSIONS_BASE': <SettingsAttribute value=<BaseSettings {'scrapy.extensions.corestats.CoreStats': <SettingsAttribute value=0 priority=0>, 'scrapy.extensions.feedexport.FeedExporter': <SettingsAttribute value=0 priority=0>, 'scrapy.extensions.memdebug.MemoryDebugger': <SettingsAttribute value=0 priority=0>, 'scrapy.extensions.memusage.MemoryUsage': <SettingsAttribute value=0 priority=0>, 'scrapy.extensions.logstats.LogStats': <SettingsAttribute value=0 priority=0>, 'scrapy.extensions.telnet.TelnetConsole': <SettingsAttribute value=0 priority=0>, 'scrapy.extensions.closespider.CloseSpider': <SettingsAttribute value=0 priority=0>, 'scrapy.extensions.spiderstate.SpiderState': <SettingsAttribute value=0 priority=0>, 'scrapy.extensions.throttle.AutoThrottle': <SettingsAttribute value=0 priority=0>}> priority=0>, 'FEED_STORAGES': <SettingsAttribute value=<BaseSettings {}> priority=0>, 'BOT_NAME': <SettingsAttribute value='acrawler' priority=20>, 'SPIDER_CONTRACTS_BASE': <SettingsAttribute value=<BaseSettings {'scrapy.contracts.default.ScrapesContract': <SettingsAttribute value=3 priority=0>, 'scrapy.contracts.default.UrlContract': <SettingsAttribute value=1 priority=0>, 'scrapy.contracts.default.ReturnsContract': <SettingsAttribute value=2 priority=0>}> priority=0>, 'METAREFRESH_MAXDELAY': <SettingsAttribute value=100 priority=0>, 'CONCURRENT_REQUESTS_PER_DOMAIN': <SettingsAttribute value=8 priority=0>, 'HTTPCACHE_IGNORE_HTTP_CODES': <SettingsAttribute value=[] priority=0>, 'KEEP_ALIVE': <SettingsAttribute value=True priority=10>, 'ITEM_PROCESSOR': <SettingsAttribute value='scrapy.pipelines.ItemPipelineManager' priority=0>, 'MEMUSAGE_WARNING_MB': <SettingsAttribute value=0 priority=0>, 'FEED_STORE_EMPTY': <SettingsAttribute value=False priority=0>, 'COOKIES_DEBUG': <SettingsAttribute value=False priority=0>, 'FEED_URI': <SettingsAttribute value=None priority=0>, 'SPIDER_MIDDLEWARES': <SettingsAttribute value=<BaseSettings {}> priority=0>, 'DOWNLOADER': <SettingsAttribute value='scrapy.core.downloader.Downloader' priority=0>, 'AUTOTHROTTLE_TARGET_CONCURRENCY': <SettingsAttribute value=1.0 priority=20>, 'USER_AGENT': <SettingsAttribute value='acrawler' priority=20>, 'AJAXCRAWL_ENABLED': <SettingsAttribute value=False priority=0>, 'COOKIES_ENABLED': <SettingsAttribute value=False priority=20>, 'DNSCACHE_SIZE': <SettingsAttribute value=10000 priority=0>, 'LOG_FORMAT': <SettingsAttribute value='%(asctime)s [%(name)s] %(levelname)s: %(message)s' priority=0>, 'ITEM_PIPELINES': <SettingsAttribute value=<BaseSettings {}> priority=0>, 'LOG_FORMATTER': <SettingsAttribute value='scrapy.logformatter.LogFormatter' priority=0>, 'HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS': <SettingsAttribute value=[] priority=0>, 'METAREFRESH_ENABLED': <SettingsAttribute value=True priority=0>, 'HTTPCACHE_IGNORE_MISSING': <SettingsAttribute value=False priority=0>, 'HTTPCACHE_IGNORE_SCHEMES': <SettingsAttribute value=['file'] priority=0>, 'SCHEDULER_MEMORY_QUEUE': <SettingsAttribute value='scrapy.squeues.LifoMemoryQueue' priority=0>, 'SCHEDULER_DISK_QUEUE': <SettingsAttribute value='scrapy.squeues.PickleLifoDiskQueue' priority=0>, 'RANDOMIZE_DOWNLOAD_DELAY': <SettingsAttribute value=True priority=0>, 'SETTINGS_MODULE': <SettingsAttribute value='acrawler.settings' priority=20>, 'TEMPLATES_DIR': <SettingsAttribute value='/Users/kmike/svn/scrapy/scrapy/templates' priority=0>, 'LOG_STDOUT': <SettingsAttribute value=False priority=0>, 'CONCURRENT_ITEMS': <SettingsAttribute value=100 priority=0>, 'DOWNLOADER_STATS': <SettingsAttribute value=True priority=0>, 'LOG_FILE': <SettingsAttribute value=None priority=0>, 'HTTPCACHE_STORAGE': <SettingsAttribute value='scrapy.extensions.httpcache.FilesystemCacheStorage' priority=20>, 'MEMDEBUG_ENABLED': <SettingsAttribute value=False priority=0>, 'FEED_URI_PARAMS': <SettingsAttribute value=None priority=0>, 'DEFAULT_REQUEST_HEADERS': <SettingsAttribute value=<BaseSettings {'Accept-Language': <SettingsAttribute value='en' priority=0>, 'Accept': <SettingsAttribute value='text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' priority=0>}> priority=0>, 'CLOSESPIDER_TIMEOUT': <SettingsAttribute value=0 priority=0>, 'SCHEDULER': <SettingsAttribute value='scrapy.core.scheduler.Scheduler' priority=0>, 'SPIDER_MIDDLEWARES_BASE': <SettingsAttribute value=<BaseSettings {'scrapy.spidermiddlewares.referer.RefererMiddleware': <SettingsAttribute value=700 priority=0>, 'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': <SettingsAttribute value=50 priority=0>, 'scrapy.spidermiddlewares.depth.DepthMiddleware': <SettingsAttribute value=900 priority=0>, 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': <SettingsAttribute value=800 priority=0>, 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': <SettingsAttribute value=500 priority=0>}> priority=0>, 'ITEM_PIPELINES_BASE': <SettingsAttribute value=<BaseSettings {}> priority=0>, 'STATSMAILER_RCPTS': <SettingsAttribute value=[] priority=0>, 'MEMUSAGE_ENABLED': <SettingsAttribute value=False priority=0>}
[s] spider <DefaultSpider 'default' at 0x10ebdcf90>
[s] Useful shortcuts:
[s] shelp() Shell help (print this help)
[s] fetch(req_or_url) Fetch request (or URL) and update local objects
[s] view(response) View response in a browser | scrapy/scrapy | diff --git a/tests/test_cmdline/__init__.py b/tests/test_cmdline/__init__.py
index c2de4fbc8..7733e7180 100644
--- a/tests/test_cmdline/__init__.py
+++ b/tests/test_cmdline/__init__.py
@@ -68,4 +68,4 @@ class CmdlineTest(unittest.TestCase):
settingsstr = settingsstr.replace(char, '"')
settingsdict = json.loads(settingsstr)
six.assertCountEqual(self, settingsdict.keys(), EXTENSIONS.keys())
- self.assertIn('value=200', settingsdict[EXT_PATH])
+ self.assertEquals(200, settingsdict[EXT_PATH])
diff --git a/tests/test_settings/__init__.py b/tests/test_settings/__init__.py
index 44b9b6df3..4acf22cba 100644
--- a/tests/test_settings/__init__.py
+++ b/tests/test_settings/__init__.py
@@ -302,6 +302,21 @@ class BaseSettingsTest(unittest.TestCase):
self.assertListEqual(copy.get('TEST_LIST_OF_LISTS')[0],
['first_one', 'first_two'])
+ def test_copy_to_dict(self):
+ s = BaseSettings({'TEST_STRING': 'a string',
+ 'TEST_LIST': [1, 2],
+ 'TEST_BOOLEAN': False,
+ 'TEST_BASE': BaseSettings({1: 1, 2: 2}, 'project'),
+ 'TEST': BaseSettings({1: 10, 3: 30}, 'default'),
+ 'HASNOBASE': BaseSettings({3: 3000}, 'default')})
+ self.assertDictEqual(s.copy_to_dict(),
+ {'HASNOBASE': {3: 3000},
+ 'TEST': {1: 10, 3: 30},
+ 'TEST_BASE': {1: 1, 2: 2},
+ 'TEST_BOOLEAN': False,
+ 'TEST_LIST': [1, 2],
+ 'TEST_STRING': 'a string'})
+
def test_freeze(self):
self.settings.freeze()
with self.assertRaises(TypeError) as cm:
@@ -343,14 +358,6 @@ class BaseSettingsTest(unittest.TestCase):
self.assertEqual(self.settings.defaults.get('BAR'), 'foo')
self.assertIn('BAR', self.settings.defaults)
- def test_repr(self):
- settings = BaseSettings()
- self.assertEqual(repr(settings), "<BaseSettings {}>")
- attr = SettingsAttribute('testval', 15)
- settings['testkey'] = attr
- self.assertEqual(repr(settings),
- "<BaseSettings {'testkey': %s}>" % repr(attr))
-
class SettingsTest(unittest.TestCase):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
Automat==24.8.1
cffi==1.17.1
constantly==23.10.4
coverage==7.8.0
cryptography==44.0.2
cssselect==1.3.0
exceptiongroup==1.2.2
execnet==2.1.1
hyperlink==21.0.0
idna==3.10
incremental==24.7.2
iniconfig==2.1.0
jmespath==1.0.1
lxml==5.3.1
packaging==24.2
parsel==1.10.0
pluggy==1.5.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycparser==2.22
PyDispatcher==2.0.7
pyOpenSSL==25.0.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
queuelib==1.7.0
-e git+https://github.com/scrapy/scrapy.git@a35aec71e96b0c0288c370afa425e8e700dca8b3#egg=Scrapy
service-identity==24.2.0
six==1.17.0
tomli==2.2.1
Twisted==24.11.0
typing_extensions==4.13.0
w3lib==2.3.1
zope.interface==7.2
| name: scrapy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- automat==24.8.1
- cffi==1.17.1
- constantly==23.10.4
- coverage==7.8.0
- cryptography==44.0.2
- cssselect==1.3.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- hyperlink==21.0.0
- idna==3.10
- incremental==24.7.2
- iniconfig==2.1.0
- jmespath==1.0.1
- lxml==5.3.1
- packaging==24.2
- parsel==1.10.0
- pluggy==1.5.0
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycparser==2.22
- pydispatcher==2.0.7
- pyopenssl==25.0.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- queuelib==1.7.0
- service-identity==24.2.0
- six==1.17.0
- tomli==2.2.1
- twisted==24.11.0
- typing-extensions==4.13.0
- w3lib==2.3.1
- zope-interface==7.2
prefix: /opt/conda/envs/scrapy
| [
"tests/test_settings/__init__.py::BaseSettingsTest::test_copy_to_dict"
]
| [
"tests/test_cmdline/__init__.py::CmdlineTest::test_default_settings",
"tests/test_cmdline/__init__.py::CmdlineTest::test_override_dict_settings",
"tests/test_cmdline/__init__.py::CmdlineTest::test_override_settings_using_envvar",
"tests/test_cmdline/__init__.py::CmdlineTest::test_override_settings_using_set_arg",
"tests/test_cmdline/__init__.py::CmdlineTest::test_profiling"
]
| [
"tests/test_settings/__init__.py::SettingsGlobalFuncsTest::test_get_settings_priority",
"tests/test_settings/__init__.py::SettingsAttributeTest::test_overwrite_basesettings",
"tests/test_settings/__init__.py::SettingsAttributeTest::test_repr",
"tests/test_settings/__init__.py::SettingsAttributeTest::test_set_equal_priority",
"tests/test_settings/__init__.py::SettingsAttributeTest::test_set_greater_priority",
"tests/test_settings/__init__.py::SettingsAttributeTest::test_set_less_priority",
"tests/test_settings/__init__.py::BaseSettingsTest::test_copy",
"tests/test_settings/__init__.py::BaseSettingsTest::test_delete",
"tests/test_settings/__init__.py::BaseSettingsTest::test_deprecated_attribute_defaults",
"tests/test_settings/__init__.py::BaseSettingsTest::test_deprecated_attribute_overrides",
"tests/test_settings/__init__.py::BaseSettingsTest::test_freeze",
"tests/test_settings/__init__.py::BaseSettingsTest::test_frozencopy",
"tests/test_settings/__init__.py::BaseSettingsTest::test_get",
"tests/test_settings/__init__.py::BaseSettingsTest::test_getpriority",
"tests/test_settings/__init__.py::BaseSettingsTest::test_getwithbase",
"tests/test_settings/__init__.py::BaseSettingsTest::test_maxpriority",
"tests/test_settings/__init__.py::BaseSettingsTest::test_set_calls_settings_attributes_methods_on_update",
"tests/test_settings/__init__.py::BaseSettingsTest::test_set_instance_identity_on_update",
"tests/test_settings/__init__.py::BaseSettingsTest::test_set_new_attribute",
"tests/test_settings/__init__.py::BaseSettingsTest::test_set_settingsattribute",
"tests/test_settings/__init__.py::BaseSettingsTest::test_setdict_alias",
"tests/test_settings/__init__.py::BaseSettingsTest::test_setitem",
"tests/test_settings/__init__.py::BaseSettingsTest::test_setmodule_alias",
"tests/test_settings/__init__.py::BaseSettingsTest::test_setmodule_by_path",
"tests/test_settings/__init__.py::BaseSettingsTest::test_setmodule_only_load_uppercase_vars",
"tests/test_settings/__init__.py::BaseSettingsTest::test_update",
"tests/test_settings/__init__.py::BaseSettingsTest::test_update_jsonstring",
"tests/test_settings/__init__.py::SettingsTest::test_autopromote_dicts",
"tests/test_settings/__init__.py::SettingsTest::test_getdict_autodegrade_basesettings",
"tests/test_settings/__init__.py::SettingsTest::test_initial_defaults",
"tests/test_settings/__init__.py::SettingsTest::test_initial_values",
"tests/test_settings/__init__.py::CrawlerSettingsTest::test_deprecated_crawlersettings"
]
| []
| BSD 3-Clause "New" or "Revised" License | 402 | [
"scrapy/commands/settings.py",
"scrapy/settings/__init__.py"
]
| [
"scrapy/commands/settings.py",
"scrapy/settings/__init__.py"
]
|
|
networkx__networkx-1963 | ec6dfae2aaebbbbf0a4620002ab795efa6430c25 | 2016-01-29 18:00:19 | ec6dfae2aaebbbbf0a4620002ab795efa6430c25 | diff --git a/networkx/algorithms/core.py b/networkx/algorithms/core.py
index 2091bb97f..c98c7d77c 100644
--- a/networkx/algorithms/core.py
+++ b/networkx/algorithms/core.py
@@ -4,30 +4,41 @@
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
+#
+# Authors: Dan Schult ([email protected])
+# Jason Grout ([email protected])
+# Aric Hagberg ([email protected])
"""
Find the k-cores of a graph.
The k-core is found by recursively pruning nodes with degrees less than k.
-See the following reference for details:
+See the following references for details:
An O(m) Algorithm for Cores Decomposition of Networks
Vladimir Batagelj and Matjaz Zaversnik, 2003.
http://arxiv.org/abs/cs.DS/0310049
-"""
-
-__author__ = "\n".join(['Dan Schult ([email protected])',
- 'Jason Grout ([email protected])',
- 'Aric Hagberg ([email protected])'])
+Generalized Cores
+Vladimir Batagelj and Matjaz Zaversnik, 2002.
+http://arxiv.org/pdf/cs/0202039
-__all__ = ['core_number','k_core','k_shell','k_crust','k_corona','find_cores']
+For directed graphs a more general notion is that of D-cores which
+looks at (k, l) restrictions on (in, out) degree. The (k, k) D-core
+is the k-core.
+D-cores: Measuring Collaboration of Directed Graphs Based on Degeneracy
+Christos Giatsidis, Dimitrios M. Thilikos, Michalis Vazirgiannis, ICDM 2011.
+http://www.graphdegeneracy.org/dcores_ICDM_2011.pdf
+"""
import networkx as nx
-from networkx import all_neighbors
from networkx.exception import NetworkXError
from networkx.utils import not_implemented_for
+__all__ = ['core_number', 'find_cores', 'k_core',
+ 'k_shell', 'k_crust', 'k_corona']
+
+
@not_implemented_for('multigraph')
def core_number(G):
"""Return the core number for each vertex.
@@ -50,7 +61,8 @@ def core_number(G):
Raises
------
NetworkXError
- The k-core is not defined for graphs with self loops or parallel edges.
+ The k-core is not implemented for graphs with self loops
+ or parallel edges.
Notes
-----
@@ -66,9 +78,9 @@ def core_number(G):
http://arxiv.org/abs/cs.DS/0310049
"""
if G.number_of_selfloops() > 0:
- raise NetworkXError(
- 'Input graph has self loops; the core number is not defined.'
- ' Consider using G.remove_edges_from(G.selfloop_edges()).')
+ msg = ('Input graph has self loops which is not permitted; '
+ 'Consider using G.remove_edges_from(G.selfloop_edges()).')
+ raise NetworkXError(msg)
degrees = dict(G.degree())
# Sort nodes by degree.
nodes = sorted(degrees, key=degrees.get)
@@ -81,7 +93,7 @@ def core_number(G):
node_pos = {v: pos for pos, v in enumerate(nodes)}
# The initial guess for the core number of a node is its degree.
core = degrees
- nbrs = {v: set(all_neighbors(G, v)) for v in G}
+ nbrs = {v: list(nx.all_neighbors(G, v)) for v in G}
for v in nodes:
for u in nbrs[v]:
if core[u] > core[v]:
@@ -99,34 +111,34 @@ def core_number(G):
find_cores = core_number
-def _core_helper(G, func, k=None, core=None):
- """Returns the subgraph induced by all nodes for which ``func``
- returns ``True``.
-
- ``G`` is a NetworkX graph.
-
- ``func`` is a function that takes three inputs: a node of ``G``, the
- maximum core value, and the core number of the graph. The function
- must return a Boolean value.
+def _core_subgraph(G, k_filter, k=None, core=None):
+ """Returns the subgraph induced by nodes passing filter ``k_filter``.
- ``k`` is the order of the core. If not specified, the maximum over
- all core values will be returned.
-
- ``core`` is a dictionary mapping node to core numbers for that
- node. If you have already computed it, you should provide it
- here. If not specified, the core numbers will be computed from the
- graph.
+ Parameters
+ ----------
+ G : NetworkX graph
+ The graph or directed graph to process
+ k_filter : filter function
+ This function filters the nodes chosen. It takes three inputs:
+ A node of G, the filter's cutoff, and the core dict of the graph.
+ The function should return a Boolean value.
+ k : int, optional
+ The order of the core. If not specified use the max core number.
+ This value is used as the cutoff for the filter.
+ core : dict, optional
+ Precomputed core numbers keyed by node for the graph ``G``.
+ If not specified, the core numbers will be computed from ``G``.
"""
if core is None:
core = core_number(G)
if k is None:
k = max(core.values())
- nodes = [v for v in core if func(v, k, core)]
+ nodes = (v for v in core if k_filter(v, k, core))
return G.subgraph(nodes).copy()
-def k_core(G,k=None,core_number=None):
+def k_core(G, k=None, core_number=None):
"""Return the k-core of G.
A k-core is a maximal subgraph that contains nodes of degree k or more.
@@ -171,21 +183,23 @@ def k_core(G,k=None,core_number=None):
Vladimir Batagelj and Matjaz Zaversnik, 2003.
http://arxiv.org/abs/cs.DS/0310049
"""
- func = lambda v, k, core_number: core_number[v] >= k
- return _core_helper(G, func, k, core_number)
+ def k_filter(v, k, c):
+ return c[v] >= k
+ return _core_subgraph(G, k_filter, k, core_number)
-def k_shell(G,k=None,core_number=None):
+def k_shell(G, k=None, core_number=None):
"""Return the k-shell of G.
- The k-shell is the subgraph of nodes in the k-core but not in the (k+1)-core.
+ The k-shell is the subgraph induced by nodes with core number k.
+ That is, nodes in the k-core that are not in the (k+1)-core.
Parameters
----------
G : NetworkX graph
A graph or directed graph.
k : int, optional
- The order of the shell. If not specified return the main shell.
+ The order of the shell. If not specified return the outer shell.
core_number : dictionary, optional
Precomputed core numbers for the graph G.
@@ -198,7 +212,8 @@ def k_shell(G,k=None,core_number=None):
Raises
------
NetworkXError
- The k-shell is not defined for graphs with self loops or parallel edges.
+ The k-shell is not implemented for graphs with self loops
+ or parallel edges.
Notes
-----
@@ -225,11 +240,12 @@ def k_shell(G,k=None,core_number=None):
and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154
http://www.pnas.org/content/104/27/11150.full
"""
- func = lambda v, k, core_number: core_number[v] == k
- return _core_helper(G, func, k, core_number)
+ def k_filter(v, k, c):
+ return c[v] == k
+ return _core_subgraph(G, k_filter, k, core_number)
-def k_crust(G,k=None,core_number=None):
+def k_crust(G, k=None, core_number=None):
"""Return the k-crust of G.
The k-crust is the graph G with the k-core removed.
@@ -251,7 +267,8 @@ def k_crust(G,k=None,core_number=None):
Raises
------
NetworkXError
- The k-crust is not defined for graphs with self loops or parallel edges.
+ The k-crust is not implemented for graphs with self loops
+ or parallel edges.
Notes
-----
@@ -276,16 +293,14 @@ def k_crust(G,k=None,core_number=None):
and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154
http://www.pnas.org/content/104/27/11150.full
"""
- func = lambda v, k, core_number: core_number[v] <= k
- # HACK These two checks are done in _core_helper, but this function
- # requires k to be one less than the maximum core value instead of
- # just the maximum. Therefore we duplicate the checks here. A better
- # solution should exist...
+ # Default for k is one less than in _core_subgraph, so just inline.
+ # Filter is c[v] <= k
if core_number is None:
- core_number = nx.core_number(G)
+ core_number = find_cores(G)
if k is None:
k = max(core_number.values()) - 1
- return _core_helper(G, func, k, core_number)
+ nodes = (v for v in core_number if core_number[v] <= k)
+ return G.subgraph(nodes).copy()
def k_corona(G, k, core_number=None):
@@ -335,5 +350,6 @@ def k_corona(G, k, core_number=None):
Phys. Rev. E 73, 056101 (2006)
http://link.aps.org/doi/10.1103/PhysRevE.73.056101
"""
- func = lambda v, k, c: c[v] == k and sum(1 for w in G[v] if c[w] >= k) == k
- return _core_helper(G, func, k, core_number)
+ def func(v, k, c):
+ return c[v] == k and k == sum(1 for w in G[v] if c[w] >= k)
+ return _core_subgraph(G, func, k, core_number)
| k-core algorithm produces incorrect output for DiGraph
As per title, calling `nx.k_core(G, k = x)` does not return the x-core of a graph, if `G` is a `DiGraph`.
See attached file.
[6954_2011.txt](https://github.com/networkx/networkx/files/105086/6954_2011.txt)
To reproduce, run:
```python
import networkx as nx
G = nx.DiGraph()
with open("6954_2011.txt", 'r') as f:
for line in f:
fields = line.strip().split('\t')
G.add_edge(fields[0], fields[1])
core = nx.k_core(G, k = 12)
core.number_of_nodes() # Outputs "24"; expected output: "12"
```
There are only 12 nodes with (in+out) degree 12 once you remove recursively all those which don't qualify. These are:
IND
AUT
CHE
BEL
USA
ESP
CHN
FRA
NLD
GBR
ITA
DEU
While
```python
core.nodes()
```
says `['BEL', 'SWE', 'DEU', 'GBR', 'KOR', 'USA', 'SGP', 'MYS', 'POL', 'NLD', 'HKG', 'FRA', 'CHE', 'ESP', 'CHN', 'AUT', 'THA', 'JPN', 'TUR', 'ITA', 'IND', 'RUS', 'NOR', 'CZE']`
The method seems to work ok for `nx.Graph`, or at least I've yet to find a counter-example. | networkx/networkx | diff --git a/networkx/algorithms/tests/test_core.py b/networkx/algorithms/tests/test_core.py
index 48399aeed..7119159c8 100644
--- a/networkx/algorithms/tests/test_core.py
+++ b/networkx/algorithms/tests/test_core.py
@@ -2,8 +2,8 @@
from nose.tools import *
import networkx as nx
-class TestCore:
+class TestCore:
def setUp(self):
# G is the example graph in Figure 1 from Batagelj and
# Zaversnik's paper titled An O(m) Algorithm for Cores
@@ -12,103 +12,114 @@ class TestCore:
# shown, the 3-core is given by nodes 1-8, the 2-core by nodes
# 9-16, the 1-core by nodes 17-20 and node 21 is in the
# 0-core.
- t1=nx.convert_node_labels_to_integers(nx.tetrahedral_graph(),1)
- t2=nx.convert_node_labels_to_integers(t1,5)
- G=nx.union(t1,t2)
- G.add_edges_from( [(3,7), (2,11), (11,5), (11,12), (5,12), (12,19),
- (12,18), (3,9), (7,9), (7,10), (9,10), (9,20),
- (17,13), (13,14), (14,15), (15,16), (16,13)])
+ t1 = nx.convert_node_labels_to_integers(nx.tetrahedral_graph(), 1)
+ t2 = nx.convert_node_labels_to_integers(t1, 5)
+ G = nx.union(t1, t2)
+ G.add_edges_from([(3, 7), (2, 11), (11, 5), (11, 12), (5, 12),
+ (12, 19), (12, 18), (3, 9), (7, 9), (7, 10),
+ (9, 10), (9, 20), (17, 13), (13, 14), (14, 15),
+ (15, 16), (16, 13)])
G.add_node(21)
- self.G=G
+ self.G = G
# Create the graph H resulting from the degree sequence
- # [0,1,2,2,2,2,3] when using the Havel-Hakimi algorithm.
+ # [0, 1, 2, 2, 2, 2, 3] when using the Havel-Hakimi algorithm.
- degseq=[0,1,2,2,2,2,3]
+ degseq = [0, 1, 2, 2, 2, 2, 3]
H = nx.havel_hakimi_graph(degseq)
- mapping = {6:0, 0:1, 4:3, 5:6, 3:4, 1:2, 2:5 }
+ mapping = {6: 0, 0: 1, 4: 3, 5: 6, 3: 4, 1: 2, 2: 5}
self.H = nx.relabel_nodes(H, mapping)
def test_trivial(self):
"""Empty graph"""
G = nx.Graph()
- assert_equal(nx.find_cores(G),{})
+ assert_equal(nx.find_cores(G), {})
def test_find_cores(self):
- cores=nx.find_cores(self.G)
- nodes_by_core=[]
- for val in [0,1,2,3]:
- nodes_by_core.append( sorted([k for k in cores if cores[k]==val]))
- assert_equal(nodes_by_core[0],[21])
- assert_equal(nodes_by_core[1],[17, 18, 19, 20])
- assert_equal(nodes_by_core[2],[9, 10, 11, 12, 13, 14, 15, 16])
+ core = nx.find_cores(self.G)
+ nodes_by_core = [sorted([n for n in core if core[n] == val])
+ for val in range(4)]
+ assert_equal(nodes_by_core[0], [21])
+ assert_equal(nodes_by_core[1], [17, 18, 19, 20])
+ assert_equal(nodes_by_core[2], [9, 10, 11, 12, 13, 14, 15, 16])
assert_equal(nodes_by_core[3], [1, 2, 3, 4, 5, 6, 7, 8])
def test_core_number(self):
# smoke test real name
- cores=nx.core_number(self.G)
+ cores = nx.core_number(self.G)
def test_find_cores2(self):
- cores=nx.find_cores(self.H)
- nodes_by_core=[]
- for val in [0,1,2]:
- nodes_by_core.append( sorted([k for k in cores if cores[k]==val]))
- assert_equal(nodes_by_core[0],[0])
- assert_equal(nodes_by_core[1],[1, 3])
- assert_equal(nodes_by_core[2],[2, 4, 5, 6])
+ core = nx.find_cores(self.H)
+ nodes_by_core = [sorted([n for n in core if core[n] == val])
+ for val in range(3)]
+ assert_equal(nodes_by_core[0], [0])
+ assert_equal(nodes_by_core[1], [1, 3])
+ assert_equal(nodes_by_core[2], [2, 4, 5, 6])
+
+ def test_directed_find_cores(Self):
+ '''core number had a bug for directed graphs found in issue #1959'''
+ # small example where too timid edge removal can make cn[2] = 3
+ G = nx.DiGraph()
+ edges = [(1, 2), (2, 1), (2, 3), (2, 4), (3, 4), (4, 3)]
+ G.add_edges_from(edges)
+ assert_equal(nx.core_number(G), {1: 2, 2: 2, 3: 2, 4: 2})
+ # small example where too aggressive edge removal can make cn[2] = 2
+ more_edges = [(1, 5), (3, 5), (4, 5), (3, 6), (4, 6), (5, 6)]
+ G.add_edges_from(more_edges)
+ assert_equal(nx.core_number(G), {1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3})
def test_main_core(self):
- main_core_subgraph=nx.k_core(self.H)
- assert_equal(sorted(main_core_subgraph.nodes()),[2,4,5,6])
+ main_core_subgraph = nx.k_core(self.H)
+ assert_equal(sorted(main_core_subgraph.nodes()), [2, 4, 5, 6])
def test_k_core(self):
# k=0
- k_core_subgraph=nx.k_core(self.H,k=0)
- assert_equal(sorted(k_core_subgraph.nodes()),sorted(self.H.nodes()))
+ k_core_subgraph = nx.k_core(self.H, k=0)
+ assert_equal(sorted(k_core_subgraph.nodes()), sorted(self.H.nodes()))
# k=1
- k_core_subgraph=nx.k_core(self.H,k=1)
- assert_equal(sorted(k_core_subgraph.nodes()),[1,2,3,4,5,6])
- # k=2
- k_core_subgraph=nx.k_core(self.H,k=2)
- assert_equal(sorted(k_core_subgraph.nodes()),[2,4,5,6])
+ k_core_subgraph = nx.k_core(self.H, k=1)
+ assert_equal(sorted(k_core_subgraph.nodes()), [1, 2, 3, 4, 5, 6])
+ # k = 2
+ k_core_subgraph = nx.k_core(self.H, k=2)
+ assert_equal(sorted(k_core_subgraph.nodes()), [2, 4, 5, 6])
def test_main_crust(self):
- main_crust_subgraph=nx.k_crust(self.H)
- assert_equal(sorted(main_crust_subgraph.nodes()),[0,1,3])
+ main_crust_subgraph = nx.k_crust(self.H)
+ assert_equal(sorted(main_crust_subgraph.nodes()), [0, 1, 3])
def test_k_crust(self):
- # k=0
- k_crust_subgraph=nx.k_crust(self.H,k=2)
- assert_equal(sorted(k_crust_subgraph.nodes()),sorted(self.H.nodes()))
+ # k = 0
+ k_crust_subgraph = nx.k_crust(self.H, k=2)
+ assert_equal(sorted(k_crust_subgraph.nodes()), sorted(self.H.nodes()))
# k=1
- k_crust_subgraph=nx.k_crust(self.H,k=1)
- assert_equal(sorted(k_crust_subgraph.nodes()),[0,1,3])
+ k_crust_subgraph = nx.k_crust(self.H, k=1)
+ assert_equal(sorted(k_crust_subgraph.nodes()), [0, 1, 3])
# k=2
- k_crust_subgraph=nx.k_crust(self.H,k=0)
- assert_equal(sorted(k_crust_subgraph.nodes()),[0])
+ k_crust_subgraph = nx.k_crust(self.H, k=0)
+ assert_equal(sorted(k_crust_subgraph.nodes()), [0])
def test_main_shell(self):
- main_shell_subgraph=nx.k_shell(self.H)
- assert_equal(sorted(main_shell_subgraph.nodes()),[2,4,5,6])
+ main_shell_subgraph = nx.k_shell(self.H)
+ assert_equal(sorted(main_shell_subgraph.nodes()), [2, 4, 5, 6])
def test_k_shell(self):
# k=0
- k_shell_subgraph=nx.k_shell(self.H,k=2)
- assert_equal(sorted(k_shell_subgraph.nodes()),[2,4,5,6])
+ k_shell_subgraph = nx.k_shell(self.H, k=2)
+ assert_equal(sorted(k_shell_subgraph.nodes()), [2, 4, 5, 6])
# k=1
- k_shell_subgraph=nx.k_shell(self.H,k=1)
- assert_equal(sorted(k_shell_subgraph.nodes()),[1,3])
+ k_shell_subgraph = nx.k_shell(self.H, k=1)
+ assert_equal(sorted(k_shell_subgraph.nodes()), [1, 3])
# k=2
- k_shell_subgraph=nx.k_shell(self.H,k=0)
- assert_equal(sorted(k_shell_subgraph.nodes()),[0])
+ k_shell_subgraph = nx.k_shell(self.H, k=0)
+ assert_equal(sorted(k_shell_subgraph.nodes()), [0])
def test_k_corona(self):
# k=0
- k_corona_subgraph=nx.k_corona(self.H,k=2)
- assert_equal(sorted(k_corona_subgraph.nodes()),[2,4,5,6])
+ k_corona_subgraph = nx.k_corona(self.H, k=2)
+ assert_equal(sorted(k_corona_subgraph.nodes()), [2, 4, 5, 6])
# k=1
- k_corona_subgraph=nx.k_corona(self.H,k=1)
- assert_equal(sorted(k_corona_subgraph.nodes()),[1])
+ k_corona_subgraph = nx.k_corona(self.H, k=1)
+ assert_equal(sorted(k_corona_subgraph.nodes()), [1])
# k=2
- k_corona_subgraph=nx.k_corona(self.H,k=0)
- assert_equal(sorted(k_corona_subgraph.nodes()),[0])
+ k_corona_subgraph = nx.k_corona(self.H, k=0)
+ assert_equal(sorted(k_corona_subgraph.nodes()), [0])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.112 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
decorator==5.1.1
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@ec6dfae2aaebbbbf0a4620002ab795efa6430c25#egg=networkx
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- decorator==5.1.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/algorithms/tests/test_core.py::TestCore::test_directed_find_cores"
]
| [
"networkx/algorithms/tests/test_core.py::TestCore::test_find_cores",
"networkx/algorithms/tests/test_core.py::TestCore::test_core_number",
"networkx/algorithms/tests/test_core.py::TestCore::test_find_cores2",
"networkx/algorithms/tests/test_core.py::TestCore::test_main_core",
"networkx/algorithms/tests/test_core.py::TestCore::test_k_core",
"networkx/algorithms/tests/test_core.py::TestCore::test_main_crust",
"networkx/algorithms/tests/test_core.py::TestCore::test_k_crust",
"networkx/algorithms/tests/test_core.py::TestCore::test_main_shell",
"networkx/algorithms/tests/test_core.py::TestCore::test_k_shell",
"networkx/algorithms/tests/test_core.py::TestCore::test_k_corona"
]
| [
"networkx/algorithms/tests/test_core.py::TestCore::test_trivial"
]
| []
| BSD 3-Clause | 403 | [
"networkx/algorithms/core.py"
]
| [
"networkx/algorithms/core.py"
]
|
|
falconry__falcon-698 | 4f5d704c6f2ffa168846641afb8acad1101ee394 | 2016-01-29 23:52:19 | b78ffaac7c412d3b3d6cd3c70dd05024d79d2cce | jmvrbanac: lgtm :+1: | diff --git a/doc/api/cookies.rst b/doc/api/cookies.rst
index 84d8669..69c89d8 100644
--- a/doc/api/cookies.rst
+++ b/doc/api/cookies.rst
@@ -84,7 +84,7 @@ You can also instruct the client to remove a cookie with the
# Clear the bad cookie
resp.unset_cookie('bad_cookie')
-.. _cookie-secure-atribute:
+.. _cookie-secure-attribute:
The Secure Attribute
~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/api/index.rst b/doc/api/index.rst
index b0f4b20..a389b91 100644
--- a/doc/api/index.rst
+++ b/doc/api/index.rst
@@ -14,3 +14,4 @@ Classes and Functions
hooks
routing
util
+ testing
diff --git a/doc/api/util.rst b/doc/api/util.rst
index 46ab3b6..fa11b32 100644
--- a/doc/api/util.rst
+++ b/doc/api/util.rst
@@ -9,21 +9,6 @@ URI Functions
.. automodule:: falcon.util.uri
:members:
-Testing
--------
-
-.. autoclass:: falcon.testing.TestBase
- :members:
-
-.. autoclass:: falcon.testing.TestResource
- :members:
-
-.. autoclass:: falcon.testing.StartResponseMock
- :members:
-
-.. automodule:: falcon.testing
- :members: rand_string, create_environ
-
Miscellaneous
-------------
diff --git a/falcon/__init__.py b/falcon/__init__.py
index c0214b8..f6256b2 100644
--- a/falcon/__init__.py
+++ b/falcon/__init__.py
@@ -24,7 +24,7 @@ HTTP_METHODS = (
'TRACE',
)
-DEFAULT_MEDIA_TYPE = 'application/json; charset=utf-8'
+DEFAULT_MEDIA_TYPE = 'application/json; charset=UTF-8'
# Hoist classes and functions into the falcon namespace
diff --git a/falcon/api.py b/falcon/api.py
index d05ec71..d10f452 100644
--- a/falcon/api.py
+++ b/falcon/api.py
@@ -230,15 +230,14 @@ class API(object):
if length is not None:
resp._headers['content-length'] = str(length)
- # Set content type if needed
- use_content_type = (body or
- req.method == 'HEAD' or
- resp.status == status.HTTP_416)
-
- if use_content_type:
- media_type = self._media_type
- else:
+ # NOTE(kgriffs): Based on wsgiref.validate's interpretation of
+ # RFC 2616, as commented in that module's source code. The
+ # presence of the Content-Length header is not similarly
+ # enforced.
+ if resp.status in (status.HTTP_204, status.HTTP_304):
media_type = None
+ else:
+ media_type = self._media_type
headers = resp._wsgi_headers(media_type)
diff --git a/falcon/api_helpers.py b/falcon/api_helpers.py
index 7fe1093..bf578b8 100644
--- a/falcon/api_helpers.py
+++ b/falcon/api_helpers.py
@@ -123,7 +123,7 @@ def default_serialize_error(req, resp, exception):
representation = exception.to_xml()
resp.body = representation
- resp.content_type = preferred
+ resp.content_type = preferred + '; charset=UTF-8'
def wrap_old_error_serializer(old_fn):
diff --git a/falcon/response.py b/falcon/response.py
index 6ff027d..dd9948e 100644
--- a/falcon/response.py
+++ b/falcon/response.py
@@ -581,7 +581,7 @@ class Response(object):
# it isn't needed.
items = headers.items()
else:
- items = list(headers.items()) # pragma: no cover
+ items = list(headers.items())
if self._cookies is not None:
# PERF(tbug):
diff --git a/falcon/util/structures.py b/falcon/util/structures.py
index 82995e3..8d5347c 100644
--- a/falcon/util/structures.py
+++ b/falcon/util/structures.py
@@ -1,13 +1,13 @@
# Copied from the Requests library by Kenneth Reitz et al.
-
+#
# Copyright 2013 Kenneth Reitz
-
+#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
-
+#
# http://www.apache.org/licenses/LICENSE-2.0
-
+#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
diff --git a/falcon/util/uri.py b/falcon/util/uri.py
index 2359672..2f68ec9 100644
--- a/falcon/util/uri.py
+++ b/falcon/util/uri.py
@@ -129,7 +129,6 @@ Returns:
"""
-# NOTE(kgriffs): This is actually covered, but not in py33; hence the pragma
if six.PY2:
# This map construction is based on urllib
@@ -193,8 +192,6 @@ if six.PY2:
return decoded_uri
-# NOTE(kgriffs): This is actually covered, but not in py2x; hence the pragma
-
else:
# This map construction is based on urllib
diff --git a/tox.ini b/tox.ini
index fbdeaa5..c7e30c0 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,8 +1,12 @@
[tox]
-# NOTE(kgriffs): The py26, py27, and py34 evns are required when
-# checking combined coverage. After running all three envs, execute
-# "tools/combine_coverage.sh" to create a combined coverage report
-# that can be viewed by opening ".coverage_html/index.html".
+# NOTE(kgriffs): The py26, py27, and py34 envs are required when
+# checking combined coverage. To check coverage:
+#
+# $ tox -e py26,py27,py34 && tools/combine_coverage.sh
+#
+# You can then drill down into coverage details by opening the HTML
+# report at ".coverage_html/index.html".
+
envlist = py26,
py27,
py34,
| Missing Content-Type on 404 Response
Greetings,
I've started [testing my webapps with wsgiref.validate](http://blog.dscpl.com.au/2015/05/returning-string-as-iterable-from-wsgi.html), and discovered that Falcon's 404 responses (and possible others) don't conform to the WSGI spec. The `Content-Type` header is required but not present.
This script demonstrates:
import falcon
from webtest import TestApp
import wsgiref.validate
# the app -- note that it has no routes so all GETs will return 404
app = falcon.API()
# wrap in validator middleware
app = wsgiref.validate.validator(app)
# wrap in test middleware
app = TestApp(app)
# provoke a 404
resp = app.get('/doesntexist', status=[404])
Output is:
Traceback (most recent call last):
<...snip...>
File "/usr/local/lib/python2.7/dist-packages/falcon/api.py", line 247, in __call__
start_response(resp.status, headers)
File "/usr/lib/python2.7/wsgiref/validate.py", line 167, in start_response_wrapper
check_content_type(status, headers)
File "/usr/lib/python2.7/wsgiref/validate.py", line 419, in check_content_type
assert_(0, "No Content-Type header found in headers (%s)" % headers)
File "/usr/lib/python2.7/wsgiref/validate.py", line 129, in assert_
raise AssertionError(*args)
AssertionError: No Content-Type header found in headers ([('content-length', '0')])
FYI, as a sanity check, I ran the same test using Bottle in place of Falcon, and it conforms (sends Content-type with the 404 response.)
I'm assuming that we want to be compliant with the WSGI spec, in which case this is a bug. Happy to submit a PR if you point me in a recommended direction to patch. (Set Content-type in `falcon/responders.py:path_not_found` ?)
| falconry/falcon | diff --git a/doc/api/testing.rst b/doc/api/testing.rst
new file mode 100644
index 0000000..5f4d4b5
--- /dev/null
+++ b/doc/api/testing.rst
@@ -0,0 +1,28 @@
+.. _testing:
+
+Testing
+=======
+
+.. autoclass:: falcon.testing.TestCase
+ :members:
+
+.. autoclass:: falcon.testing.Result
+ :members:
+
+.. autoclass:: falcon.testing.SimpleTestResource
+ :members:
+
+.. autoclass:: falcon.testing.StartResponseMock
+ :members:
+
+.. automodule:: falcon.testing
+ :members: capture_responder_args, rand_string, create_environ
+
+Deprecated
+----------
+
+.. autoclass:: falcon.testing.TestBase
+ :members:
+
+.. autoclass:: falcon.testing.TestResource
+ :members:
diff --git a/falcon/testing/__init__.py b/falcon/testing/__init__.py
index 8990a87..966abe0 100644
--- a/falcon/testing/__init__.py
+++ b/falcon/testing/__init__.py
@@ -13,7 +13,9 @@
# limitations under the License.
# Hoist classes and functions into the falcon.testing namespace
+from falcon.testing.base import TestBase # NOQA
from falcon.testing.helpers import * # NOQA
+from falcon.testing.resource import capture_responder_args # NOQA
+from falcon.testing.resource import SimpleTestResource, TestResource # NOQA
from falcon.testing.srmock import StartResponseMock # NOQA
-from falcon.testing.resource import TestResource # NOQA
-from falcon.testing.base import TestBase # NOQA
+from falcon.testing.test_case import Result, TestCase # NOQA
diff --git a/falcon/testing/base.py b/falcon/testing/base.py
index 31aeecc..5fd92b3 100644
--- a/falcon/testing/base.py
+++ b/falcon/testing/base.py
@@ -26,14 +26,22 @@ from falcon.testing.helpers import create_environ
class TestBase(unittest.TestCase):
- """Extends ``testtools.TestCase`` to support WSGI integration testing.
+ """Extends :py:mod:`unittest` to support WSGI functional testing.
- ``TestBase`` provides a base class that provides some extra plumbing to
- help simulate WSGI calls without having to actually host your API
- in a server.
+ Warning:
+ This class has been deprecated and will be removed in a future
+ release. Please use :py:class:`~.TestCase`
+ instead.
Note:
- If ``testtools`` is not available, ``unittest`` is used instead.
+ If available, uses :py:mod:`testtools` in lieu of
+ :py:mod:`unittest`.
+
+ This base class provides some extra plumbing for unittest-style
+ test cases, to help simulate WSGI calls without having to spin up
+ an actual web server. Simply inherit from this class in your test
+ case classes instead of :py:class:`unittest.TestCase` or
+ :py:class:`testtools.TestCase`.
Attributes:
api (falcon.API): An API instance to target when simulating
@@ -46,6 +54,7 @@ class TestBase(unittest.TestCase):
test_route (str): A simple, generated path that a test
can use to add a route to the API.
"""
+
api_class = falcon.API
srmock_class = StartResponseMock
diff --git a/falcon/testing/helpers.py b/falcon/testing/helpers.py
index e6f8ca2..38d340e 100644
--- a/falcon/testing/helpers.py
+++ b/falcon/testing/helpers.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import cgi
import random
import io
import sys
@@ -28,6 +29,33 @@ DEFAULT_HOST = 'falconframework.org'
httpnow = http_now
+# get_encoding_from_headers() is Copyright 2016 Kenneth Reitz, and is
+# used here under the terms of the Apache License, Version 2.0.
+def get_encoding_from_headers(headers):
+ """Returns encoding from given HTTP Header Dict.
+
+ Args:
+ headers(dict): Dictionary from which to extract encoding. Header
+ names must either be lowercase or the dict must support
+ case-insensitive lookups.
+ """
+
+ content_type = headers.get('content-type')
+
+ if not content_type:
+ return None
+
+ content_type, params = cgi.parse_header(content_type)
+
+ if 'charset' in params:
+ return params['charset'].strip("'\"")
+
+ if 'text' in content_type:
+ return 'ISO-8859-1'
+
+ return None
+
+
def rand_string(min, max):
"""Returns a randomly-generated string, of a random length.
@@ -40,7 +68,7 @@ def rand_string(min, max):
int_gen = random.randint
string_length = int_gen(min, max)
return ''.join([chr(int_gen(ord(' '), ord('~')))
- for i in range(string_length)])
+ for __ in range(string_length)])
def create_environ(path='/', query_string='', protocol='HTTP/1.1',
@@ -50,26 +78,28 @@ def create_environ(path='/', query_string='', protocol='HTTP/1.1',
"""Creates a mock PEP-3333 environ ``dict`` for simulating WSGI requests.
- Args:
- path (str, optional): The path for the request (default '/')
- query_string (str, optional): The query string to simulate, without a
+ Keyword Args:
+ path (str): The path for the request (default '/')
+ query_string (str): The query string to simulate, without a
leading '?' (default '')
- protocol (str, optional): The HTTP protocol to simulate
+ protocol (str): The HTTP protocol to simulate
(default 'HTTP/1.1'). If set to 'HTTP/1.0', the Host header
will not be added to the environment.
scheme (str): URL scheme, either 'http' or 'https' (default 'http')
host(str): Hostname for the request (default 'falconframework.org')
- port (str or int, optional): The TCP port to simulate. Defaults to
+ port (str): The TCP port to simulate. Defaults to
the standard port used by the given scheme (i.e., 80 for 'http'
and 443 for 'https').
- headers (dict or list, optional): Headers as a ``dict`` or an
- iterable collection of (*key*, *value*) ``tuple``'s
+ headers (dict): Headers as a ``dict`` or an iterable yielding
+ (*key*, *value*) ``tuple``'s
app (str): Value for the ``SCRIPT_NAME`` environ variable, described in
PEP-333: 'The initial portion of the request URL's "path" that
corresponds to the application object, so that the application
knows its virtual "location". This may be an empty string, if the
application corresponds to the "root" of the server.' (default '')
- body (str or unicode): The body of the request (default '')
+ body (str): The body of the request (default ''). Accepts both byte
+ strings and Unicode strings. Unicode strings are encoded as UTF-8
+ in the request.
method (str): The HTTP method to use (default 'GET')
wsgierrors (io): The stream to use as *wsgierrors*
(default ``sys.stderr``)
@@ -108,6 +138,7 @@ def create_environ(path='/', query_string='', protocol='HTTP/1.1',
'SERVER_NAME': host,
'SERVER_PORT': port,
+ 'wsgi.version': (1, 0),
'wsgi.url_scheme': scheme,
'wsgi.input': body,
'wsgi.errors': wsgierrors or sys.stderr,
@@ -135,7 +166,7 @@ def create_environ(path='/', query_string='', protocol='HTTP/1.1',
body.seek(0)
if content_length != 0:
- env['CONTENT_LENGTH'] = content_length
+ env['CONTENT_LENGTH'] = str(content_length)
if headers is not None:
_add_headers_to_environ(env, headers)
diff --git a/falcon/testing/resource.py b/falcon/testing/resource.py
index 0e11f32..1f80ed9 100644
--- a/falcon/testing/resource.py
+++ b/falcon/testing/resource.py
@@ -12,12 +12,103 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from falcon import HTTP_200
+from json import dumps as json_dumps
+
+import falcon
from .helpers import rand_string
-class TestResource:
- """Mock resource for integration testing.
+def capture_responder_args(req, resp, resource, params):
+ """Before hook for capturing responder arguments.
+
+ Adds the following attributes to the hooked responder's resource
+ class:
+
+ * captured_req
+ * captured_resp
+ * captured_kwargs
+ """
+
+ resource.captured_req = req
+ resource.captured_resp = resp
+ resource.captured_kwargs = params
+
+
+def set_resp_defaults(req, resp, resource, params):
+ """Before hook for setting default response properties."""
+
+ if resource._default_status is not None:
+ resp.status = resource._default_status
+
+ if resource._default_body is not None:
+ resp.body = resource._default_body
+
+ if resource._default_headers is not None:
+ resp.set_headers(resource._default_headers)
+
+
+class SimpleTestResource(object):
+ """Mock resource for functional testing of framework components.
+
+ This class implements a simple test resource that can be extended
+ as needed to test middleware, hooks, and the Falcon framework
+ itself.
+
+ Only the ``on_get()`` responder is implemented; when adding
+ additional responders in child classes, they can be decorated
+ with the :py:meth:`falcon.testing.capture_responder_args` hook in
+ order to capture the *req*, *resp*, and *params* arguments that
+ are passed to the responder. Responders may also be decorated with
+ the :py:meth:`falcon.testing.set_resp_defaults` hook in order to
+ set *resp* properties to default *status*, *body*, and *header*
+ values.
+
+ Keyword Arguments:
+ status (str): Default status string to use in responses
+ body (str): Default body string to use in responses
+ json (dict): Default JSON document to use in responses. Will
+ be serialized to a string and encoded as UTF-8. Either
+ *json* or *body* may be specified, but not both.
+ headers (dict): Default set of additional headers to include in
+ responses
+
+ Attributes:
+ captured_req (falcon.Request): The last Request object passed
+ into any one of the responder methods.
+ captured_resp (falcon.Response): The last Response object passed
+ into any one of the responder methods.
+ captured_kwargs (dict): The last dictionary of kwargs, beyond
+ ``req`` and ``resp``, that were passed into any one of the
+ responder methods.
+ """
+
+ def __init__(self, status=None, body=None, json=None, headers=None):
+ self._default_status = status
+ self._default_headers = headers
+
+ if json is not None:
+ if body is not None:
+ msg = 'Either json or body may be specified, but not both'
+ raise ValueError(msg)
+
+ self._default_body = json_dumps(json, ensure_ascii=False)
+
+ else:
+ self._default_body = body
+
+ @falcon.before(capture_responder_args)
+ @falcon.before(set_resp_defaults)
+ def on_get(self, req, resp, **kwargs):
+ pass
+
+
+class TestResource(object):
+ """Mock resource for functional testing.
+
+ Warning:
+ This class is deprecated and will be removed in a future
+ release. Please use :py:class:`~.SimpleTestResource`
+ instead.
This class implements the `on_get` responder, captures
request data, and sets response body and headers.
@@ -38,14 +129,12 @@ class TestResource:
responder, if any.
called (bool): ``True`` if `on_get` was ever called; ``False``
otherwise.
-
-
"""
sample_status = "200 OK"
sample_body = rand_string(0, 128 * 1024)
resp_headers = {
- 'Content-Type': 'text/plain; charset=utf-8',
+ 'Content-Type': 'text/plain; charset=UTF-8',
'ETag': '10d4555ebeb53b30adf724ca198b32a2',
'X-Hello': 'OH HAI'
}
@@ -73,6 +162,6 @@ class TestResource:
self.req, self.resp, self.kwargs = req, resp, kwargs
self.called = True
- resp.status = HTTP_200
+ resp.status = falcon.HTTP_200
resp.body = self.sample_body
resp.set_headers(self.resp_headers)
diff --git a/falcon/testing/srmock.py b/falcon/testing/srmock.py
index 21e7544..916c662 100644
--- a/falcon/testing/srmock.py
+++ b/falcon/testing/srmock.py
@@ -15,7 +15,7 @@
from falcon import util
-class StartResponseMock:
+class StartResponseMock(object):
"""Mock object representing a WSGI `start_response` callable.
Attributes:
diff --git a/falcon/testing/test_case.py b/falcon/testing/test_case.py
new file mode 100644
index 0000000..e809b8b
--- /dev/null
+++ b/falcon/testing/test_case.py
@@ -0,0 +1,332 @@
+# Copyright 2013 by Rackspace Hosting, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import wsgiref.validate
+
+try:
+ import testtools as unittest
+except ImportError: # pragma: nocover
+ import unittest
+
+import falcon
+import falcon.request
+from falcon.util import CaseInsensitiveDict
+from falcon.testing.srmock import StartResponseMock
+from falcon.testing.helpers import create_environ, get_encoding_from_headers
+
+
+class Result(object):
+ """Encapsulates the result of a simulated WSGI request.
+
+ Args:
+ iterable (iterable): An iterable that yields zero or more
+ bytestrings, per PEP-3333
+ status (str): An HTTP status string, including status code and
+ reason string
+ headers (list): A list of (header_name, header_value) tuples,
+ per PEP-3333
+
+ Attributes:
+ status (str): HTTP status string given in the response
+ status_code (int): The code portion of the HTTP status string
+ headers (CaseInsensitiveDict): A case-insensitive dictionary
+ containing all the headers in the response
+ encoding (str): Text encoding of the response body, or ``None``
+ if the encoding can not be determined.
+ data (bytes): Raw response body, or ``bytes`` if the response
+ body was empty.
+ text (str): Decoded response body of type ``unicode``
+ under Python 2.6 and 2.7, and of type ``str`` otherwise.
+ Raises an error if the response encoding can not be
+ determined.
+ json (dict): Deserialized JSON body. Raises an error if the
+ response is not JSON.
+ """
+
+ def __init__(self, iterable, status, headers):
+ self._text = None
+
+ self._data = b''.join(iterable)
+ if hasattr(iterable, 'close'):
+ iterable.close()
+
+ self._status = status
+ self._status_code = int(status[:3])
+ self._headers = CaseInsensitiveDict(headers)
+
+ self._encoding = get_encoding_from_headers(self._headers)
+
+ @property
+ def status(self):
+ return self._status
+
+ @property
+ def status_code(self):
+ return self._status_code
+
+ @property
+ def headers(self):
+ return self._headers
+
+ @property
+ def encoding(self):
+ return self._encoding
+
+ @property
+ def data(self):
+ return self._data
+
+ @property
+ def text(self):
+ if self._text is None:
+ if not self.data:
+ self._text = u''
+ else:
+ if self.encoding is None:
+ msg = 'Response did not specify a content encoding'
+ raise RuntimeError(msg)
+
+ self._text = self.data.decode(self.encoding)
+
+ return self._text
+
+ @property
+ def json(self):
+ return json.loads(self.text)
+
+
+class TestCase(unittest.TestCase):
+ """Extends :py:mod:`unittest` to support WSGI functional testing.
+
+ Note:
+ If available, uses :py:mod:`testtools` in lieu of
+ :py:mod:`unittest`.
+
+ This base class provides some extra plumbing for unittest-style
+ test cases, to help simulate WSGI calls without having to spin up
+ an actual web server. Simply inherit from this class in your test
+ case classes instead of :py:class:`unittest.TestCase` or
+ :py:class:`testtools.TestCase`.
+
+ Attributes:
+ api_class (class): An API class to use when instantiating
+ the ``api`` instance (default: :py:class:`falcon.API`)
+ api (object): An API instance to target when simulating
+ requests (default: ``self.api_class()``)
+ """
+
+ api_class = None
+
+ def setUp(self):
+ super(TestCase, self).setUp()
+
+ if self.api_class is None:
+ self.api = falcon.API()
+ else:
+ self.api = self.api_class() # pylint: disable=not-callable
+
+ # Reset to simulate "restarting" the WSGI container
+ falcon.request._maybe_wrap_wsgi_stream = True
+
+ # NOTE(warsaw): Pythons earlier than 2.7 do not have a
+ # self.assertIn() method, so use this compatibility function
+ # instead.
+ if not hasattr(unittest.TestCase, 'assertIn'): # pragma: nocover
+ def assertIn(self, a, b):
+ self.assertTrue(a in b)
+
+ def simulate_get(self, path='/', **kwargs):
+ """Simulates a GET request to a WSGI application.
+
+ Equivalent to ``simulate_request('GET', ...)``
+
+ Args:
+ path (str): The URL path to request (default: '/')
+
+ Keyword Args:
+ query_string (str): A raw query string to include in the
+ request (default: ``None``)
+ headers (dict): Additional headers to include in the request
+ (default: ``None``)
+ """
+ return self.simulate_request('GET', path, **kwargs)
+
+ def simulate_head(self, path='/', **kwargs):
+ """Simulates a HEAD request to a WSGI application.
+
+ Equivalent to ``simulate_request('HEAD', ...)``
+
+ Args:
+ path (str): The URL path to request (default: '/')
+
+ Keyword Args:
+ query_string (str): A raw query string to include in the
+ request (default: ``None``)
+ headers (dict): Additional headers to include in the request
+ (default: ``None``)
+ """
+ return self.simulate_request('HEAD', path, **kwargs)
+
+ def simulate_post(self, path='/', **kwargs):
+ """Simulates a POST request to a WSGI application.
+
+ Equivalent to ``simulate_request('POST', ...)``
+
+ Args:
+ path (str): The URL path to request (default: '/')
+
+ Keyword Args:
+ query_string (str): A raw query string to include in the
+ request (default: ``None``)
+ headers (dict): Additional headers to include in the request
+ (default: ``None``)
+ body (str): A string to send as the body of the request.
+ Accepts both byte strings and Unicode strings
+ (default: ``None``). If a Unicode string is provided,
+ it will be encoded as UTF-8 in the request.
+ """
+ return self.simulate_request('POST', path, **kwargs)
+
+ def simulate_put(self, path='/', **kwargs):
+ """Simulates a PUT request to a WSGI application.
+
+ Equivalent to ``simulate_request('PUT', ...)``
+
+ Args:
+ path (str): The URL path to request (default: '/')
+
+ Keyword Args:
+ query_string (str): A raw query string to include in the
+ request (default: ``None``)
+ headers (dict): Additional headers to include in the request
+ (default: ``None``)
+ body (str): A string to send as the body of the request.
+ Accepts both byte strings and Unicode strings
+ (default: ``None``). If a Unicode string is provided,
+ it will be encoded as UTF-8 in the request.
+ """
+ return self.simulate_request('PUT', path, **kwargs)
+
+ def simulate_options(self, path='/', **kwargs):
+ """Simulates an OPTIONS request to a WSGI application.
+
+ Equivalent to ``simulate_request('OPTIONS', ...)``
+
+ Args:
+ path (str): The URL path to request (default: '/')
+
+ Keyword Args:
+ query_string (str): A raw query string to include in the
+ request (default: ``None``)
+ headers (dict): Additional headers to include in the request
+ (default: ``None``)
+ """
+ return self.simulate_request('OPTIONS', path, **kwargs)
+
+ def simulate_patch(self, path='/', **kwargs):
+ """Simulates a PATCH request to a WSGI application.
+
+ Equivalent to ``simulate_request('PATCH', ...)``
+
+ Args:
+ path (str): The URL path to request (default: '/')
+
+ Keyword Args:
+ query_string (str): A raw query string to include in the
+ request (default: ``None``)
+ headers (dict): Additional headers to include in the request
+ (default: ``None``)
+ body (str): A string to send as the body of the request.
+ Accepts both byte strings and Unicode strings
+ (default: ``None``). If a Unicode string is provided,
+ it will be encoded as UTF-8 in the request.
+ """
+ return self.simulate_request('PATCH', path, **kwargs)
+
+ def simulate_delete(self, path='/', **kwargs):
+ """Simulates a DELETE request to a WSGI application.
+
+ Equivalent to ``simulate_request('DELETE', ...)``
+
+ Args:
+ path (str): The URL path to request (default: '/')
+
+ Keyword Args:
+ query_string (str): A raw query string to include in the
+ request (default: ``None``)
+ headers (dict): Additional headers to include in the request
+ (default: ``None``)
+ """
+ return self.simulate_request('DELETE', path, **kwargs)
+
+ def simulate_request(self, method='GET', path='/', query_string=None,
+ headers=None, body=None, file_wrapper=None):
+ """Simulates a request to a WSGI application.
+
+ Performs a WSGI request directly against ``self.api``.
+
+ Keyword Args:
+ method (str): The HTTP method to use in the request
+ (default: 'GET')
+ path (str): The URL path to request (default: '/')
+ query_string (str): A raw query string to include in the
+ request (default: ``None``)
+ headers (dict): Additional headers to include in the request
+ (default: ``None``)
+ body (str): A string to send as the body of the request.
+ Accepts both byte strings and Unicode strings
+ (default: ``None``). If a Unicode string is provided,
+ it will be encoded as UTF-8 in the request.
+ file_wrapper (callable): Callable that returns an iterable,
+ to be used as the value for *wsgi.file_wrapper* in the
+ environ (default: ``None``).
+
+ Returns:
+ :py:class:`~.Result`: The result of the request
+ """
+
+ if not path.startswith('/'):
+ raise ValueError("path must start with '/'")
+
+ if query_string and query_string.startswith('?'):
+ raise ValueError("query_string should not start with '?'")
+
+ if '?' in path:
+ # NOTE(kgriffs): We could allow this, but then we'd need
+ # to define semantics regarding whether the path takes
+ # precedence over the query_string. Also, it would make
+ # tests less consistent, since there would be "more than
+ # one...way to do it."
+ raise ValueError(
+ 'path may not contain a query string. Please use the '
+ 'query_string parameter instead.'
+ )
+
+ env = create_environ(
+ method=method,
+ path=path,
+ query_string=(query_string or ''),
+ headers=headers,
+ body=body,
+ file_wrapper=file_wrapper,
+ )
+
+ srmock = StartResponseMock()
+ validator = wsgiref.validate.validator(self.api)
+ iterable = validator(env, srmock)
+
+ result = Result(iterable, srmock.status, srmock.headers)
+
+ return result
diff --git a/tests/test_after_hooks.py b/tests/test_after_hooks.py
index b9b1307..c4b4ada 100644
--- a/tests/test_after_hooks.py
+++ b/tests/test_after_hooks.py
@@ -2,7 +2,12 @@ import functools
import json
import falcon
-import falcon.testing as testing
+from falcon import testing
+
+
+# --------------------------------------------------------------------
+# Hooks
+# --------------------------------------------------------------------
def validate_output(req, resp):
@@ -71,6 +76,11 @@ def cuteness_in_the_head(req, resp):
resp.set_header('X-Cuteness', 'cute')
+# --------------------------------------------------------------------
+# Resources
+# --------------------------------------------------------------------
+
+
class WrappedRespondersResource(object):
@falcon.after(serialize_body)
@@ -163,15 +173,18 @@ class FaultyResource(object):
raise falcon.HTTPError(falcon.HTTP_743, 'Query failed')
-class TestHooks(testing.TestBase):
+# --------------------------------------------------------------------
+# Tests
+# --------------------------------------------------------------------
- def simulate_request(self, *args, **kwargs):
- return super(TestHooks, self).simulate_request(
- *args, decode='utf-8', **kwargs)
- def before(self):
+class TestHooks(testing.TestCase):
+
+ def setUp(self):
+ super(TestHooks, self).setUp()
+
self.resource = WrappedRespondersResource()
- self.api.add_route(self.test_route, self.resource)
+ self.api.add_route('/', self.resource)
self.wrapped_resource = WrappedClassResource()
self.api.add_route('/wrapped', self.wrapped_resource)
@@ -179,157 +192,145 @@ class TestHooks(testing.TestBase):
self.wrapped_resource_aware = ClassResourceWithAwareHooks()
self.api.add_route('/wrapped_aware', self.wrapped_resource_aware)
- def test_global_hook(self):
- self.assertRaises(TypeError, falcon.API, None, {})
- self.assertRaises(TypeError, falcon.API, None, 0)
+ def test_output_validator(self):
+ result = self.simulate_get()
+ self.assertEqual(result.status_code, 723)
+ self.assertEqual(result.text, '{\n "title": "Tricky"\n}')
- self.api = falcon.API(after=fluffiness)
- zoo_resource = ZooResource()
+ def test_serializer(self):
+ result = self.simulate_put()
+ self.assertEqual('{"animal": "falcon"}', result.text)
- self.api.add_route(self.test_route, zoo_resource)
+ def test_hook_as_callable_class(self):
+ result = self.simulate_post()
+ self.assertEqual('smart', result.text)
- result = self.simulate_request(self.test_route)
- self.assertEqual(u'fluffy', result)
+ def test_wrapped_resource(self):
+ result = self.simulate_get('/wrapped')
+ self.assertEqual(result.status_code, 200)
+ self.assertEqual(result.text, 'fluffy and cute', )
- def test_global_hook_is_resource_aware(self):
- self.assertRaises(TypeError, falcon.API, None, {})
- self.assertRaises(TypeError, falcon.API, None, 0)
+ result = self.simulate_head('/wrapped')
+ self.assertEqual(result.status_code, 200)
- self.api = falcon.API(after=resource_aware_fluffiness)
- zoo_resource = ZooResource()
+ result = self.simulate_post('/wrapped')
+ self.assertEqual(result.status_code, 405)
- self.api.add_route(self.test_route, zoo_resource)
+ result = self.simulate_patch('/wrapped')
+ self.assertEqual(result.status_code, 405)
- result = self.simulate_request(self.test_route)
- self.assertEqual(u'fluffy', result)
+ # Decorator should not affect the default on_options responder
+ result = self.simulate_options('/wrapped')
+ self.assertEqual(result.status_code, 204)
+ self.assertFalse(result.text)
+
+ def test_wrapped_resource_with_hooks_aware_of_resource(self):
+ expected = 'fluffy and cute'
+
+ result = self.simulate_get('/wrapped_aware')
+ self.assertEqual(result.status_code, 200)
+ self.assertEqual(expected, result.text)
+
+ for test in (self.simulate_head, self.simulate_put, self.simulate_post):
+ result = test('/wrapped_aware')
+ self.assertEqual(result.status_code, 200)
+ self.assertEqual(self.wrapped_resource_aware.resp.body, expected)
+
+ result = self.simulate_patch('/wrapped_aware')
+ self.assertEqual(result.status_code, 405)
+
+ # Decorator should not affect the default on_options responder
+ result = self.simulate_options('/wrapped_aware')
+ self.assertEqual(result.status_code, 204)
+ self.assertFalse(result.text)
+
+
+class TestGlobalHooks(testing.TestCase):
+
+ def test_invalid_type(self):
+ self.assertRaises(TypeError, falcon.API, after={})
+ self.assertRaises(TypeError, falcon.API, after=0)
+
+ def test_global_hook(self):
+ self.api = falcon.API(after=fluffiness)
+ self.api.add_route('/', ZooResource())
+
+ result = self.simulate_get()
+ self.assertEqual(result.text, 'fluffy')
+
+ def test_global_hook_is_resource_aware(self):
+ self.api = falcon.API(after=resource_aware_fluffiness)
+ self.api.add_route('/', ZooResource())
+
+ result = self.simulate_get()
+ self.assertEqual(result.text, 'fluffy')
def test_multiple_global_hook(self):
self.api = falcon.API(after=[fluffiness, cuteness, Smartness()])
- zoo_resource = ZooResource()
-
- self.api.add_route(self.test_route, zoo_resource)
+ self.api.add_route('/', ZooResource())
- result = self.simulate_request(self.test_route)
- self.assertEqual(u'fluffy and cute and smart', result)
+ result = self.simulate_get()
+ self.assertEqual(result.text, 'fluffy and cute and smart')
def test_global_hook_wrap_default_on_options(self):
self.api = falcon.API(after=fluffiness_in_the_head)
- zoo_resource = ZooResource()
+ self.api.add_route('/', ZooResource())
- self.api.add_route(self.test_route, zoo_resource)
+ result = self.simulate_options()
- self.simulate_request(self.test_route, method='OPTIONS')
-
- self.assertEqual(falcon.HTTP_204, self.srmock.status)
- self.assertEqual('fluffy', self.srmock.headers_dict['X-Fluffiness'])
+ self.assertEqual(result.status_code, 204)
+ self.assertEqual(result.headers['X-Fluffiness'], 'fluffy')
def test_global_hook_wrap_default_405(self):
self.api = falcon.API(after=fluffiness_in_the_head)
- zoo_resource = ZooResource()
-
- self.api.add_route(self.test_route, zoo_resource)
+ self.api.add_route('/', ZooResource())
- self.simulate_request(self.test_route, method='POST')
+ result = self.simulate_post()
- self.assertEqual(falcon.HTTP_405, self.srmock.status)
- self.assertEqual('fluffy', self.srmock.headers_dict['X-Fluffiness'])
+ self.assertEqual(result.status_code, 405)
+ self.assertEqual(result.headers['X-Fluffiness'], 'fluffy')
def test_multiple_global_hooks_wrap_default_on_options(self):
self.api = falcon.API(after=[fluffiness_in_the_head,
cuteness_in_the_head])
- zoo_resource = ZooResource()
- self.api.add_route(self.test_route, zoo_resource)
+ self.api.add_route('/', ZooResource())
- self.simulate_request(self.test_route, method='OPTIONS')
+ result = self.simulate_options()
- self.assertEqual(falcon.HTTP_204, self.srmock.status)
- self.assertEqual('fluffy', self.srmock.headers_dict['X-Fluffiness'])
- self.assertEqual('cute', self.srmock.headers_dict['X-Cuteness'])
+ self.assertEqual(result.status_code, 204)
+ self.assertEqual(result.headers['X-Fluffiness'], 'fluffy')
+ self.assertEqual(result.headers['X-Cuteness'], 'cute')
def test_multiple_global_hooks_wrap_default_405(self):
self.api = falcon.API(after=[fluffiness_in_the_head,
cuteness_in_the_head])
- zoo_resource = ZooResource()
- self.api.add_route(self.test_route, zoo_resource)
+ self.api.add_route('/', ZooResource())
- self.simulate_request(self.test_route, method='POST')
+ result = self.simulate_post()
- self.assertEqual(falcon.HTTP_405, self.srmock.status)
- self.assertEqual('fluffy', self.srmock.headers_dict['X-Fluffiness'])
- self.assertEqual('cute', self.srmock.headers_dict['X-Cuteness'])
+ self.assertEqual(result.status_code, 405)
+ self.assertEqual(result.headers['X-Fluffiness'], 'fluffy')
+ self.assertEqual(result.headers['X-Cuteness'], 'cute')
def test_global_after_hooks_run_after_exception(self):
self.api = falcon.API(after=[fluffiness,
resource_aware_cuteness,
Smartness()])
- self.api.add_route(self.test_route, FaultyResource())
-
- actual_body = self.simulate_request(self.test_route)
- self.assertEqual(falcon.HTTP_743, self.srmock.status)
- self.assertEqual(u'fluffy and cute and smart', actual_body)
+ self.api.add_route('/', FaultyResource())
- def test_output_validator(self):
- actual_body = self.simulate_request(self.test_route)
- self.assertEqual(falcon.HTTP_723, self.srmock.status)
- self.assertEqual(u'{\n "title": "Tricky"\n}', actual_body)
-
- def test_serializer(self):
- actual_body = self.simulate_request(self.test_route, method='PUT')
-
- self.assertEqual(u'{"animal": "falcon"}', actual_body)
-
- def test_hook_as_callable_class(self):
- actual_body = self.simulate_request(self.test_route, method='POST')
- self.assertEqual(u'smart', actual_body)
-
- def test_wrapped_resource(self):
- actual_body = self.simulate_request('/wrapped')
- self.assertEqual(falcon.HTTP_200, self.srmock.status)
- self.assertEqual(u'fluffy and cute', actual_body)
-
- self.simulate_request('/wrapped', method='HEAD')
- self.assertEqual(falcon.HTTP_200, self.srmock.status)
-
- self.simulate_request('/wrapped', method='POST')
- self.assertEqual(falcon.HTTP_405, self.srmock.status)
-
- self.simulate_request('/wrapped', method='PATCH')
- self.assertEqual(falcon.HTTP_405, self.srmock.status)
-
- # decorator does not affect the default on_options
- body = self.simulate_request('/wrapped', method='OPTIONS')
- self.assertEqual(falcon.HTTP_204, self.srmock.status)
- self.assertEqual(u'', body)
-
- def test_wrapped_resource_with_hooks_aware_of_resource(self):
- expected = u'fluffy and cute'
-
- actual_body = self.simulate_request('/wrapped_aware')
- self.assertEqual(falcon.HTTP_200, self.srmock.status)
- self.assertEqual(expected, actual_body)
-
- for method in ('HEAD', 'PUT', 'POST'):
- self.simulate_request('/wrapped_aware', method=method)
- self.assertEqual(falcon.HTTP_200, self.srmock.status)
- self.assertEqual(expected, self.wrapped_resource_aware.resp.body)
-
- self.simulate_request('/wrapped_aware', method='PATCH')
- self.assertEqual(falcon.HTTP_405, self.srmock.status)
-
- # decorator does not affect the default on_options
- body = self.simulate_request('/wrapped_aware', method='OPTIONS')
- self.assertEqual(falcon.HTTP_204, self.srmock.status)
- self.assertEqual(u'', body)
+ result = self.simulate_get()
+ self.assertEqual(result.status_code, 743)
+ self.assertEqual(result.text, 'fluffy and cute and smart')
def test_customized_options(self):
self.api = falcon.API(after=fluffiness)
-
self.api.add_route('/one', SingleResource())
- body = self.simulate_request('/one', method='OPTIONS')
- self.assertEqual(falcon.HTTP_501, self.srmock.status)
- self.assertEqual(u'fluffy', body)
- self.assertNotIn('allow', self.srmock.headers_dict)
+ result = self.simulate_options('/one')
+
+ self.assertEqual(result.status_code, 501)
+ self.assertEqual(result.text, 'fluffy')
+ self.assertNotIn(result.headers, 'allow')
diff --git a/tests/test_error_handlers.py b/tests/test_error_handlers.py
index c87500e..30d155a 100644
--- a/tests/test_error_handlers.py
+++ b/tests/test_error_handlers.py
@@ -1,7 +1,5 @@
-import json
-
import falcon
-import falcon.testing as testing
+from falcon import testing
def capture_error(ex, req, resp, params):
@@ -31,6 +29,7 @@ class CustomException(CustomBaseException):
class ErroredClassResource(object):
+
def on_get(self, req, resp):
raise Exception('Plain Exception')
@@ -41,44 +40,35 @@ class ErroredClassResource(object):
raise CustomException('CustomException')
-class TestErrorHandler(testing.TestBase):
+class TestErrorHandler(testing.TestCase):
+
+ def setUp(self):
+ super(TestErrorHandler, self).setUp()
+ self.api.add_route('/', ErroredClassResource())
def test_caught_error(self):
self.api.add_error_handler(Exception, capture_error)
- self.api.add_route(self.test_route, ErroredClassResource())
+ result = self.simulate_get()
+ self.assertEqual(result.text, 'error: Plain Exception')
- body = self.simulate_request(self.test_route)
- self.assertEqual([b'error: Plain Exception'], body)
-
- body = self.simulate_request(self.test_route, method='HEAD')
- self.assertEqual(falcon.HTTP_723, self.srmock.status)
- self.assertEqual([], body)
+ result = self.simulate_head()
+ self.assertEqual(result.status_code, 723)
+ self.assertFalse(result.data)
def test_uncaught_error(self):
self.api.add_error_handler(CustomException, capture_error)
-
- self.api.add_route(self.test_route, ErroredClassResource())
-
- self.assertRaises(Exception,
- self.simulate_request, self.test_route)
+ self.assertRaises(Exception, self.simulate_get)
def test_uncaught_error_else(self):
- self.api.add_route(self.test_route, ErroredClassResource())
-
- self.assertRaises(Exception,
- self.simulate_request, self.test_route)
+ self.assertRaises(Exception, self.simulate_get)
def test_converted_error(self):
self.api.add_error_handler(CustomException)
- self.api.add_route(self.test_route, ErroredClassResource())
-
- body = self.simulate_request(self.test_route, method='DELETE')
- self.assertEqual(falcon.HTTP_792, self.srmock.status)
-
- info = json.loads(body[0].decode())
- self.assertEqual('Internet crashed!', info['title'])
+ result = self.simulate_delete()
+ self.assertEqual(result.status_code, 792)
+ self.assertEqual(result.json[u'title'], u'Internet crashed!')
def test_handle_not_defined(self):
self.assertRaises(AttributeError,
@@ -87,17 +77,13 @@ class TestErrorHandler(testing.TestBase):
def test_subclass_error(self):
self.api.add_error_handler(CustomBaseException, capture_error)
- self.api.add_route(self.test_route, ErroredClassResource())
-
- body = self.simulate_request(self.test_route, method='DELETE')
- self.assertEqual(falcon.HTTP_723, self.srmock.status)
- self.assertEqual([b'error: CustomException'], body)
+ result = self.simulate_delete()
+ self.assertEqual(result.status_code, 723)
+ self.assertEqual(result.text, 'error: CustomException')
def test_error_order(self):
self.api.add_error_handler(Exception, capture_error)
self.api.add_error_handler(Exception, handle_error_first)
- self.api.add_route(self.test_route, ErroredClassResource())
-
- body = self.simulate_request(self.test_route)
- self.assertEqual([b'first error handler'], body)
+ result = self.simulate_get()
+ self.assertEqual(result.text, 'first error handler')
diff --git a/tests/test_headers.py b/tests/test_headers.py
index 7b2f82c..fda337c 100644
--- a/tests/test_headers.py
+++ b/tests/test_headers.py
@@ -1,25 +1,14 @@
from collections import defaultdict
from datetime import datetime
+import ddt
import six
-from testtools.matchers import Contains, Not
import falcon
-import falcon.testing as testing
+from falcon import testing
-class StatusTestResource:
- sample_body = testing.rand_string(0, 128 * 1024)
-
- def __init__(self, status):
- self.status = status
-
- def on_get(self, req, resp):
- resp.status = self.status
- resp.body = self.sample_body
-
-
-class XmlResource:
+class XmlResource(object):
def __init__(self, content_type):
self.content_type = content_type
@@ -27,16 +16,7 @@ class XmlResource:
resp.set_header('content-type', self.content_type)
-class DefaultContentTypeResource:
- def __init__(self, body=None):
- self.body = body
-
- def on_get(self, req, resp):
- if self.body is not None:
- resp.body = self.body
-
-
-class HeaderHelpersResource:
+class HeaderHelpersResource(object):
def __init__(self, last_modified=None):
if last_modified is not None:
@@ -106,7 +86,7 @@ class HeaderHelpersResource:
self.resp = resp
-class LocationHeaderUnicodeResource:
+class LocationHeaderUnicodeResource(object):
URL1 = u'/\u00e7runchy/bacon'
URL2 = u'ab\u00e7' if six.PY3 else 'ab\xc3\xa7'
@@ -120,18 +100,25 @@ class LocationHeaderUnicodeResource:
resp.content_location = self.URL1
-class UnicodeHeaderResource:
+class UnicodeHeaderResource(object):
def on_get(self, req, resp):
resp.set_headers([
(u'X-auTH-toKEN', 'toomanysecrets'),
('Content-TYpE', u'application/json'),
- (u'X-symBOl', u'\u0040'),
- (u'X-symb\u00F6l', u'\u00FF'),
+ (u'X-symBOl', u'@'),
+
+ # TODO(kgriffs): This will cause the wsgiref validator
+ # to raise an error. Falcon itself does not currently
+ # check for non-ASCII chars to save some CPU cycles. The
+ # app is responsible for doing the right thing, and
+ # validating its own output as needed.
+ #
+ # (u'X-symb\u00F6l', u'\u00FF'),
])
-class VaryHeaderResource:
+class VaryHeaderResource(object):
def __init__(self, vary):
self.vary = vary
@@ -141,7 +128,7 @@ class VaryHeaderResource:
resp.vary = self.vary
-class LinkHeaderResource:
+class LinkHeaderResource(object):
def __init__(self):
self._links = []
@@ -156,7 +143,7 @@ class LinkHeaderResource:
resp.add_link(*args, **kwargs)
-class AppendHeaderResource:
+class AppendHeaderResource(object):
def on_get(self, req, resp):
resp.append_header('X-Things', 'thing-1')
@@ -172,33 +159,35 @@ class AppendHeaderResource:
resp.append_header('X-Things', 'thing-1')
-class TestHeaders(testing.TestBase):
[email protected]
+class TestHeaders(testing.TestCase):
- def before(self):
- self.resource = testing.TestResource()
- self.api.add_route(self.test_route, self.resource)
+ def setUp(self):
+ super(TestHeaders, self).setUp()
- def test_content_length(self):
- self.simulate_request(self.test_route)
+ self.sample_body = testing.rand_string(0, 128 * 1024)
+ self.resource = testing.SimpleTestResource(body=self.sample_body)
+ self.api.add_route('/', self.resource)
- headers = self.srmock.headers
+ def test_content_length(self):
+ result = self.simulate_get()
- # Test Content-Length header set
- content_length = str(len(self.resource.sample_body))
- content_length_header = ('content-length', content_length)
- self.assertThat(headers, Contains(content_length_header))
+ content_length = str(len(self.sample_body))
+ self.assertEqual(result.headers['Content-Length'], content_length)
def test_default_value(self):
- self.simulate_request(self.test_route)
+ self.simulate_get()
- value = self.resource.req.get_header('X-Not-Found') or '876'
+ req = self.resource.captured_req
+ value = req.get_header('X-Not-Found') or '876'
self.assertEqual(value, '876')
def test_required_header(self):
- self.simulate_request(self.test_route)
+ self.simulate_get()
try:
- self.resource.req.get_header('X-Not-Found', required=True)
+ req = self.resource.captured_req
+ req.get_header('X-Not-Found', required=True)
self.fail('falcon.HTTPMissingHeader not raised')
except falcon.HTTPMissingHeader as ex:
self.assertIsInstance(ex, falcon.HTTPBadRequest)
@@ -206,45 +195,13 @@ class TestHeaders(testing.TestBase):
expected_desc = 'The X-Not-Found header is required.'
self.assertEqual(ex.description, expected_desc)
- def test_no_body_on_100(self):
- self.resource = StatusTestResource(falcon.HTTP_100)
- self.api.add_route('/1xx', self.resource)
-
- body = self.simulate_request('/1xx')
- self.assertThat(self.srmock.headers_dict,
- Not(Contains('Content-Length')))
+ @ddt.data(falcon.HTTP_204, falcon.HTTP_304)
+ def test_no_content_length(self, status):
+ self.api.add_route('/xxx', testing.SimpleTestResource(status=status))
- self.assertEqual(body, [])
-
- def test_no_body_on_101(self):
- self.resource = StatusTestResource(falcon.HTTP_101)
- self.api.add_route('/1xx', self.resource)
-
- body = self.simulate_request('/1xx')
- self.assertThat(self.srmock.headers_dict,
- Not(Contains('Content-Length')))
-
- self.assertEqual(body, [])
-
- def test_no_body_on_204(self):
- self.resource = StatusTestResource(falcon.HTTP_204)
- self.api.add_route('/204', self.resource)
-
- body = self.simulate_request('/204')
- self.assertThat(self.srmock.headers_dict,
- Not(Contains('Content-Length')))
-
- self.assertEqual(body, [])
-
- def test_no_body_on_304(self):
- self.resource = StatusTestResource(falcon.HTTP_304)
- self.api.add_route('/304', self.resource)
-
- body = self.simulate_request('/304')
- self.assertThat(self.srmock.headers_dict,
- Not(Contains('Content-Length')))
-
- self.assertEqual(body, [])
+ result = self.simulate_get('/xxx')
+ self.assertNotIn('Content-Length', result.headers)
+ self.assertFalse(result.data)
def test_content_header_missing(self):
environ = testing.create_environ()
@@ -252,248 +209,228 @@ class TestHeaders(testing.TestBase):
for header in ('Content-Type', 'Content-Length'):
self.assertIs(req.get_header(header), None)
- def test_passthrough_req_headers(self):
- req_headers = {
+ def test_passthrough_request_headers(self):
+ request_headers = {
'X-Auth-Token': 'Setec Astronomy',
'Content-Type': 'text/plain; charset=utf-8'
}
- self.simulate_request(self.test_route, headers=req_headers)
+ self.simulate_get(headers=request_headers)
- for name, expected_value in req_headers.items():
- actual_value = self.resource.req.get_header(name)
+ for name, expected_value in request_headers.items():
+ actual_value = self.resource.captured_req.get_header(name)
self.assertEqual(actual_value, expected_value)
- self.simulate_request(self.test_route,
- headers=self.resource.req.headers)
+ self.simulate_get(headers=self.resource.captured_req.headers)
# Compare the request HTTP headers with the original headers
- for name, expected_value in req_headers.items():
- actual_value = self.resource.req.get_header(name)
+ for name, expected_value in request_headers.items():
+ actual_value = self.resource.captured_req.get_header(name)
self.assertEqual(actual_value, expected_value)
- def test_get_raw_headers(self):
+ def test_headers_as_list(self):
headers = [
('Client-ID', '692ba466-74bb-11e3-bf3f-7567c531c7ca'),
('Accept', 'audio/*; q=0.2, audio/basic')
]
+ # Unit test
environ = testing.create_environ(headers=headers)
req = falcon.Request(environ)
for name, value in headers:
self.assertIn((name.upper(), value), req.headers.items())
- def test_passthrough_resp_headers(self):
- self.simulate_request(self.test_route)
+ # Functional test
+ self.api.add_route('/', testing.SimpleTestResource(headers=headers))
+ result = self.simulate_get()
- resp_headers = self.srmock.headers
-
- for name, value in self.resource.resp_headers.items():
- expected = (name.lower(), value)
- self.assertThat(resp_headers, Contains(expected))
+ for name, value in headers:
+ self.assertEqual(result.headers[name], value)
def test_default_media_type(self):
- self.resource = DefaultContentTypeResource('Hello world!')
- self.api.add_route(self.test_route, self.resource)
- self.simulate_request(self.test_route)
+ resource = testing.SimpleTestResource(body='Hello world!')
+ self._check_header(resource, 'Content-Type', falcon.DEFAULT_MEDIA_TYPE)
- content_type = falcon.DEFAULT_MEDIA_TYPE
- self.assertIn(('content-type', content_type), self.srmock.headers)
+ @ddt.data(
+ ('text/plain; charset=UTF-8', u'Hello Unicode! \U0001F638'),
- def test_custom_media_type(self):
- self.resource = DefaultContentTypeResource('Hello world!')
- self.api = falcon.API(media_type='application/atom+xml')
- self.api.add_route(self.test_route, self.resource)
- self.simulate_request(self.test_route)
+ # NOTE(kgriffs): This only works because the client defaults to
+ # ISO-8859-1 IFF the media type is 'text'.
+ ('text/plain', 'Hello ISO-8859-1!'),
+ )
+ @ddt.unpack
+ def test_override_default_media_type(self, content_type, body):
+ self.api = falcon.API(media_type=content_type)
+ self.api.add_route('/', testing.SimpleTestResource(body=body))
+ result = self.simulate_get()
- content_type = 'application/atom+xml'
- self.assertIn(('content-type', content_type), self.srmock.headers)
+ self.assertEqual(result.text, body)
+ self.assertEqual(result.headers['Content-Type'], content_type)
+
+ def test_override_default_media_type_missing_encoding(self):
+ body = b'{}'
+
+ self.api = falcon.API(media_type='application/json')
+ self.api.add_route('/', testing.SimpleTestResource(body=body))
+ result = self.simulate_get()
+
+ self.assertEqual(result.data, body)
+ self.assertRaises(RuntimeError, lambda: result.text)
+ self.assertRaises(RuntimeError, lambda: result.json)
def test_response_header_helpers_on_get(self):
last_modified = datetime(2013, 1, 1, 10, 30, 30)
- self.resource = HeaderHelpersResource(last_modified)
- self.api.add_route(self.test_route, self.resource)
- self.simulate_request(self.test_route)
+ resource = HeaderHelpersResource(last_modified)
+ self.api.add_route('/', resource)
+ result = self.simulate_get()
- resp = self.resource.resp
+ resp = resource.resp
content_type = 'x-falcon/peregrine'
- self.assertEqual(content_type, resp.content_type)
- self.assertIn(('content-type', content_type), self.srmock.headers)
+ self.assertEqual(resp.content_type, content_type)
+ self.assertEqual(result.headers['Content-Type'], content_type)
cache_control = ('public, private, no-cache, no-store, '
'must-revalidate, proxy-revalidate, max-age=3600, '
's-maxage=60, no-transform')
- self.assertEqual(cache_control, resp.cache_control)
- self.assertIn(('cache-control', cache_control), self.srmock.headers)
+ self.assertEqual(resp.cache_control, cache_control)
+ self.assertEqual(result.headers['Cache-Control'], cache_control)
etag = 'fa0d1a60ef6616bb28038515c8ea4cb2'
- self.assertEqual(etag, resp.etag)
- self.assertIn(('etag', etag), self.srmock.headers)
+ self.assertEqual(resp.etag, etag)
+ self.assertEqual(result.headers['Etag'], etag)
- last_modified_http_date = 'Tue, 01 Jan 2013 10:30:30 GMT'
- self.assertEqual(last_modified_http_date, resp.last_modified)
- self.assertIn(('last-modified', last_modified_http_date),
- self.srmock.headers)
+ lm_date = 'Tue, 01 Jan 2013 10:30:30 GMT'
+ self.assertEqual(resp.last_modified, lm_date)
+ self.assertEqual(result.headers['Last-Modified'], lm_date)
- self.assertEqual('3601', resp.retry_after)
- self.assertIn(('retry-after', '3601'), self.srmock.headers)
+ self.assertEqual(resp.retry_after, '3601')
+ self.assertEqual(result.headers['Retry-After'], '3601')
- self.assertEqual('/things/87', resp.location)
- self.assertIn(('location', '/things/87'), self.srmock.headers)
+ self.assertEqual(resp.location, '/things/87')
+ self.assertEqual(result.headers['Location'], '/things/87')
- self.assertEqual('/things/78', resp.content_location)
- self.assertIn(('content-location', '/things/78'), self.srmock.headers)
+ self.assertEqual(resp.content_location, '/things/78')
+ self.assertEqual(result.headers['Content-Location'], '/things/78')
- self.assertEqual('bytes 0-499/10240', resp.content_range)
- self.assertIn(('content-range', 'bytes 0-499/10240'),
- self.srmock.headers)
+ content_range = 'bytes 0-499/10240'
+ self.assertEqual(resp.content_range, content_range)
+ self.assertEqual(result.headers['Content-Range'], content_range)
- resp.content_range = (0, 499, 10 * 1024, 'bytes')
- self.assertEqual('bytes 0-499/10240', resp.content_range)
- self.assertIn(('content-range', 'bytes 0-499/10240'),
- self.srmock.headers)
+ resp.content_range = (1, 499, 10 * 1024, 'bytes')
+ self.assertEqual(resp.content_range, 'bytes 1-499/10240')
- req_headers = {
- 'Range': 'items=0-25',
- }
- self.simulate_request(self.test_route, headers=req_headers)
-
- resp.content_range = (0, 25, 100, 'items')
- self.assertEqual('items 0-25/100', resp.content_range)
- self.assertIn(('content-range', 'items 0-25/100'),
- self.srmock.headers)
+ req_headers = {'Range': 'items=0-25'}
+ result = self.simulate_get(headers=req_headers)
+ self.assertEqual(result.headers['Content-Range'], 'items 0-25/100')
# Check for duplicate headers
hist = defaultdict(lambda: 0)
- for name, value in self.srmock.headers:
+ for name, value in result.headers.items():
hist[name] += 1
self.assertEqual(1, hist[name])
def test_unicode_location_headers(self):
- self.api.add_route(self.test_route, LocationHeaderUnicodeResource())
- self.simulate_request(self.test_route)
-
- location = ('location', '/%C3%A7runchy/bacon')
- self.assertIn(location, self.srmock.headers)
+ self.api.add_route('/', LocationHeaderUnicodeResource())
- content_location = ('content-location', 'ab%C3%A7')
- self.assertIn(content_location, self.srmock.headers)
+ result = self.simulate_get()
+ self.assertEqual(result.headers['Location'], '/%C3%A7runchy/bacon')
+ self.assertEqual(result.headers['Content-Location'], 'ab%C3%A7')
# Test with the values swapped
- self.simulate_request(self.test_route, method='HEAD')
-
- location = ('location', 'ab%C3%A7')
- self.assertIn(location, self.srmock.headers)
-
- content_location = ('content-location', '/%C3%A7runchy/bacon')
- self.assertIn(content_location, self.srmock.headers)
+ result = self.simulate_head()
+ self.assertEqual(result.headers['Content-Location'],
+ '/%C3%A7runchy/bacon')
+ self.assertEqual(result.headers['Location'], 'ab%C3%A7')
def test_unicode_headers(self):
- self.api.add_route(self.test_route, UnicodeHeaderResource())
- self.simulate_request(self.test_route)
-
- expect = ('x-auth-token', 'toomanysecrets')
- self.assertIn(expect, self.srmock.headers)
+ self.api.add_route('/', UnicodeHeaderResource())
- expect = ('content-type', 'application/json')
- self.assertIn(expect, self.srmock.headers)
+ result = self.simulate_get('/')
- expect = ('x-symbol', '@')
- self.assertIn(expect, self.srmock.headers)
-
- expect = ('x-symb\xF6l', '\xFF')
- self.assertIn(expect, self.srmock.headers)
+ self.assertEqual(result.headers['Content-Type'], 'application/json')
+ self.assertEqual(result.headers['X-Auth-Token'], 'toomanysecrets')
+ self.assertEqual(result.headers['X-Symbol'], '@')
def test_response_set_and_get_header(self):
- self.resource = HeaderHelpersResource()
- self.api.add_route(self.test_route, self.resource)
+ resource = HeaderHelpersResource()
+ self.api.add_route('/', resource)
for method in ('HEAD', 'POST', 'PUT'):
- self.simulate_request(self.test_route, method=method)
+ result = self.simulate_request(method=method)
content_type = 'x-falcon/peregrine'
- self.assertIn(('content-type', content_type), self.srmock.headers)
- self.assertEquals(self.resource.resp.get_header('content-TyPe'), content_type)
- self.assertIn(('cache-control', 'no-store'), self.srmock.headers)
- self.assertIn(('x-auth-token', 'toomanysecrets'),
- self.srmock.headers)
+ self.assertEqual(result.headers['Content-Type'], content_type)
+ self.assertEqual(resource.resp.get_header('content-TyPe'),
+ content_type)
+
+ self.assertEqual(result.headers['Cache-Control'], 'no-store')
+ self.assertEqual(result.headers['X-Auth-Token'], 'toomanysecrets')
- self.assertEqual(None, self.resource.resp.location)
- self.assertEquals(self.resource.resp.get_header('not-real'), None)
+ self.assertEqual(resource.resp.location, None)
+ self.assertEqual(resource.resp.get_header('not-real'), None)
# Check for duplicate headers
- hist = defaultdict(lambda: 0)
- for name, value in self.srmock.headers:
+ hist = defaultdict(int)
+ for name, value in result.headers.items():
hist[name] += 1
- self.assertEqual(1, hist[name])
+ self.assertEqual(hist[name], 1)
def test_response_append_header(self):
- self.resource = AppendHeaderResource()
- self.api.add_route(self.test_route, self.resource)
+ self.api.add_route('/', AppendHeaderResource())
for method in ('HEAD', 'GET'):
- self.simulate_request(self.test_route, method=method)
- value = self.srmock.headers_dict['x-things']
- self.assertEqual('thing-1,thing-2,thing-3', value)
+ result = self.simulate_request(method=method)
+ value = result.headers['x-things']
+ self.assertEqual(value, 'thing-1,thing-2,thing-3')
- self.simulate_request(self.test_route, method='POST')
- value = self.srmock.headers_dict['x-things']
- self.assertEqual('thing-1', value)
+ result = self.simulate_request(method='POST')
+ self.assertEqual(result.headers['x-things'], 'thing-1')
def test_vary_star(self):
- self.resource = VaryHeaderResource(['*'])
- self.api.add_route(self.test_route, self.resource)
- self.simulate_request(self.test_route)
-
- self.assertIn(('vary', '*'), self.srmock.headers)
-
- def test_vary_header(self):
- self.resource = VaryHeaderResource(['accept-encoding'])
- self.api.add_route(self.test_route, self.resource)
- self.simulate_request(self.test_route)
-
- self.assertIn(('vary', 'accept-encoding'), self.srmock.headers)
-
- def test_vary_headers(self):
- self.resource = VaryHeaderResource(['accept-encoding', 'x-auth-token'])
- self.api.add_route(self.test_route, self.resource)
- self.simulate_request(self.test_route)
-
- vary = 'accept-encoding, x-auth-token'
- self.assertIn(('vary', vary), self.srmock.headers)
-
- def test_vary_headers_tuple(self):
- self.resource = VaryHeaderResource(('accept-encoding', 'x-auth-token'))
- self.api.add_route(self.test_route, self.resource)
- self.simulate_request(self.test_route)
-
- vary = 'accept-encoding, x-auth-token'
- self.assertIn(('vary', vary), self.srmock.headers)
-
- def test_no_content_type(self):
- self.resource = DefaultContentTypeResource()
- self.api.add_route(self.test_route, self.resource)
- self.simulate_request(self.test_route)
-
- self.assertNotIn('content-type', self.srmock.headers_dict)
+ self.api.add_route('/', VaryHeaderResource(['*']))
+ result = self.simulate_get()
+ self.assertEqual(result.headers['vary'], '*')
+
+ @ddt.data(
+ (['accept-encoding'], 'accept-encoding'),
+ (['accept-encoding', 'x-auth-token'], 'accept-encoding, x-auth-token'),
+ (('accept-encoding', 'x-auth-token'), 'accept-encoding, x-auth-token'),
+ )
+ @ddt.unpack
+ def test_vary_header(self, vary, expected_value):
+ resource = VaryHeaderResource(vary)
+ self._check_header(resource, 'Vary', expected_value)
+
+ def test_content_type_no_body(self):
+ self.api.add_route('/', testing.SimpleTestResource())
+ result = self.simulate_get()
+
+ # NOTE(kgriffs): Even when there is no body, Content-Type
+ # should still be included per wsgiref.validate
+ self.assertIn('Content-Type', result.headers)
+ self.assertEqual(result.headers['Content-Length'], '0')
+
+ @ddt.data(falcon.HTTP_204, falcon.HTTP_304)
+ def test_no_content_type(self, status):
+ self.api.add_route('/', testing.SimpleTestResource(status=status))
+
+ result = self.simulate_get()
+ self.assertNotIn('Content-Type', result.headers)
def test_custom_content_type(self):
content_type = 'application/xml; charset=utf-8'
- self.resource = XmlResource(content_type)
- self.api.add_route(self.test_route, self.resource)
-
- self.simulate_request(self.test_route)
- self.assertIn(('content-type', content_type), self.srmock.headers)
+ resource = XmlResource(content_type)
+ self._check_header(resource, 'Content-Type', content_type)
def test_add_link_single(self):
expected_value = '</things/2842>; rel=next'
- self.resource = LinkHeaderResource()
- self.resource.add_link('/things/2842', 'next')
+ resource = LinkHeaderResource()
+ resource.add_link('/things/2842', 'next')
- self._check_link_header(expected_value)
+ self._check_link_header(resource, expected_value)
def test_add_link_multiple(self):
expected_value = (
@@ -506,26 +443,26 @@ class TestHeaders(testing.TestBase):
uri = u'ab\u00e7' if six.PY3 else 'ab\xc3\xa7'
- self.resource = LinkHeaderResource()
- self.resource.add_link('/things/2842', 'next')
- self.resource.add_link(u'http://\u00e7runchy/bacon', 'contents')
- self.resource.add_link(uri, 'http://example.com/ext-type')
- self.resource.add_link(uri, u'http://example.com/\u00e7runchy')
- self.resource.add_link(uri, u'https://example.com/too-\u00e7runchy')
- self.resource.add_link('/alt-thing',
- u'alternate http://example.com/\u00e7runchy')
+ resource = LinkHeaderResource()
+ resource.add_link('/things/2842', 'next')
+ resource.add_link(u'http://\u00e7runchy/bacon', 'contents')
+ resource.add_link(uri, 'http://example.com/ext-type')
+ resource.add_link(uri, u'http://example.com/\u00e7runchy')
+ resource.add_link(uri, u'https://example.com/too-\u00e7runchy')
+ resource.add_link('/alt-thing',
+ u'alternate http://example.com/\u00e7runchy')
- self._check_link_header(expected_value)
+ self._check_link_header(resource, expected_value)
def test_add_link_with_title(self):
expected_value = ('</related/thing>; rel=item; '
'title="A related thing"')
- self.resource = LinkHeaderResource()
- self.resource.add_link('/related/thing', 'item',
- title='A related thing')
+ resource = LinkHeaderResource()
+ resource.add_link('/related/thing', 'item',
+ title='A related thing')
- self._check_link_header(expected_value)
+ self._check_link_header(resource, expected_value)
def test_add_link_with_title_star(self):
expected_value = ('</related/thing>; rel=item; '
@@ -533,54 +470,53 @@ class TestHeaders(testing.TestBase):
'</%C3%A7runchy/thing>; rel=item; '
"title*=UTF-8'en'A%20%C3%A7runchy%20thing")
- self.resource = LinkHeaderResource()
- self.resource.add_link('/related/thing', 'item',
- title_star=('', 'A related thing'))
+ resource = LinkHeaderResource()
+ resource.add_link('/related/thing', 'item',
+ title_star=('', 'A related thing'))
- self.resource.add_link(u'/\u00e7runchy/thing', 'item',
- title_star=('en', u'A \u00e7runchy thing'))
+ resource.add_link(u'/\u00e7runchy/thing', 'item',
+ title_star=('en', u'A \u00e7runchy thing'))
- self._check_link_header(expected_value)
+ self._check_link_header(resource, expected_value)
def test_add_link_with_anchor(self):
expected_value = ('</related/thing>; rel=item; '
'anchor="/some%20thing/or-other"')
- self.resource = LinkHeaderResource()
- self.resource.add_link('/related/thing', 'item',
- anchor='/some thing/or-other')
+ resource = LinkHeaderResource()
+ resource.add_link('/related/thing', 'item',
+ anchor='/some thing/or-other')
- self._check_link_header(expected_value)
+ self._check_link_header(resource, expected_value)
def test_add_link_with_hreflang(self):
expected_value = ('</related/thing>; rel=about; '
'hreflang=en')
- self.resource = LinkHeaderResource()
- self.resource.add_link('/related/thing', 'about',
- hreflang='en')
+ resource = LinkHeaderResource()
+ resource.add_link('/related/thing', 'about', hreflang='en')
- self._check_link_header(expected_value)
+ self._check_link_header(resource, expected_value)
def test_add_link_with_hreflang_multi(self):
expected_value = ('</related/thing>; rel=about; '
'hreflang=en-GB; hreflang=de')
- self.resource = LinkHeaderResource()
- self.resource.add_link('/related/thing', 'about',
- hreflang=('en-GB', 'de'))
+ resource = LinkHeaderResource()
+ resource.add_link('/related/thing', 'about',
+ hreflang=('en-GB', 'de'))
- self._check_link_header(expected_value)
+ self._check_link_header(resource, expected_value)
def test_add_link_with_type_hint(self):
expected_value = ('</related/thing>; rel=alternate; '
'type="video/mp4; codecs=avc1.640028"')
- self.resource = LinkHeaderResource()
- self.resource.add_link('/related/thing', 'alternate',
- type_hint='video/mp4; codecs=avc1.640028')
+ resource = LinkHeaderResource()
+ resource.add_link('/related/thing', 'alternate',
+ type_hint='video/mp4; codecs=avc1.640028')
- self._check_link_header(expected_value)
+ self._check_link_header(resource, expected_value)
def test_add_link_complex(self):
expected_value = ('</related/thing>; rel=alternate; '
@@ -589,21 +525,24 @@ class TestHeaders(testing.TestBase):
'type="application/json"; '
'hreflang=en-GB; hreflang=de')
- self.resource = LinkHeaderResource()
- self.resource.add_link('/related/thing', 'alternate',
- title='A related thing',
- hreflang=('en-GB', 'de'),
- type_hint='application/json',
- title_star=('en', u'A \u00e7runchy thing'))
+ resource = LinkHeaderResource()
+ resource.add_link('/related/thing', 'alternate',
+ title='A related thing',
+ hreflang=('en-GB', 'de'),
+ type_hint='application/json',
+ title_star=('en', u'A \u00e7runchy thing'))
- self._check_link_header(expected_value)
+ self._check_link_header(resource, expected_value)
# ----------------------------------------------------------------------
# Helpers
# ----------------------------------------------------------------------
- def _check_link_header(self, expected_value):
- self.api.add_route(self.test_route, self.resource)
+ def _check_link_header(self, resource, expected_value):
+ self._check_header(resource, 'Link', expected_value)
+
+ def _check_header(self, resource, header, expected_value):
+ self.api.add_route('/', resource)
- self.simulate_request(self.test_route)
- self.assertEqual(expected_value, self.srmock.headers_dict['link'])
+ result = self.simulate_get()
+ self.assertEqual(result.headers[header], expected_value)
diff --git a/tests/test_hello.py b/tests/test_hello.py
index daff9b9..f191916 100644
--- a/tests/test_hello.py
+++ b/tests/test_hello.py
@@ -1,4 +1,4 @@
-from testtools.matchers import Contains
+import ddt
import falcon
import io
@@ -74,156 +74,106 @@ class NoStatusResource(object):
pass
-class TestHelloWorld(testing.TestBase):
[email protected]
+class TestHelloWorld(testing.TestCase):
- def before(self):
- self.resource = HelloResource('body')
- self.api.add_route(self.test_route, self.resource)
-
- self.bytes_resource = HelloResource('body, bytes')
- self.api.add_route('/bytes', self.bytes_resource)
-
- self.data_resource = HelloResource('data')
- self.api.add_route('/data', self.data_resource)
-
- self.chunked_resource = HelloResource('stream')
- self.api.add_route('/chunked-stream', self.chunked_resource)
-
- self.stream_resource = HelloResource('stream, stream_len')
- self.api.add_route('/stream', self.stream_resource)
-
- self.filelike_resource = HelloResource('stream, stream_len, filelike')
- self.api.add_route('/filelike', self.filelike_resource)
-
- self.filelike_helper_resource = HelloResource(
- 'stream, stream_len, filelike, use_helper')
- self.api.add_route('/filelike-helper', self.filelike_helper_resource)
-
- self.no_status_resource = NoStatusResource()
- self.api.add_route('/nostatus', self.no_status_resource)
-
- self.root_resource = testing.TestResource()
- self.api.add_route('/', self.root_resource)
-
- def after(self):
- pass
+ def setUp(self):
+ super(TestHelloWorld, self).setUp()
def test_env_headers_list_of_tuples(self):
env = testing.create_environ(headers=[('User-Agent', 'Falcon-Test')])
self.assertEqual(env['HTTP_USER_AGENT'], 'Falcon-Test')
- def test_empty_route(self):
- self.simulate_request('')
- self.assertTrue(self.root_resource.called)
-
- def test_route_negative(self):
- bogus_route = self.test_route + 'x'
- self.simulate_request(bogus_route)
+ def test_root_route(self):
+ doc = {u"message": u"Hello world!"}
+ resource = testing.SimpleTestResource(json=doc)
+ self.api.add_route('/', resource)
- # Ensure the request was NOT routed to resource
- self.assertFalse(self.resource.called)
- self.assertEqual(self.srmock.status, falcon.HTTP_404)
+ result = self.simulate_get()
+ self.assertEqual(result.json, doc)
- def test_body(self):
- body = self.simulate_request(self.test_route)
- resp = self.resource.resp
+ def test_no_route(self):
+ result = self.simulate_get('/seenoevil')
+ self.assertEqual(result.status_code, 404)
- content_length = int(self.srmock.headers_dict['content-length'])
- self.assertEqual(content_length, len(self.resource.sample_utf8))
+ @ddt.data(
+ ('/body', HelloResource('body'), lambda r: r.body.encode('utf-8')),
+ ('/bytes', HelloResource('body, bytes'), lambda r: r.body),
+ ('/data', HelloResource('data'), lambda r: r.data),
+ )
+ @ddt.unpack
+ def test_body(self, path, resource, get_body):
+ self.api.add_route(path, resource)
- self.assertEqual(self.srmock.status, self.resource.sample_status)
- self.assertEqual(resp.status, self.resource.sample_status)
- self.assertEqual(resp.body.encode('utf-8'), self.resource.sample_utf8)
- self.assertEqual(body, [self.resource.sample_utf8])
+ result = self.simulate_get(path)
+ resp = resource.resp
- def test_body_bytes(self):
- body = self.simulate_request('/bytes')
- resp = self.bytes_resource.resp
+ content_length = int(result.headers['content-length'])
+ self.assertEqual(content_length, len(resource.sample_utf8))
- content_length = int(self.srmock.headers_dict['content-length'])
- self.assertEqual(content_length, len(self.resource.sample_utf8))
-
- self.assertEqual(self.srmock.status, self.resource.sample_status)
- self.assertEqual(resp.status, self.resource.sample_status)
- self.assertEqual(resp.body, self.resource.sample_utf8)
- self.assertEqual(body, [self.resource.sample_utf8])
-
- def test_data(self):
- body = self.simulate_request('/data')
- resp = self.data_resource.resp
-
- content_length = int(self.srmock.headers_dict['content-length'])
- self.assertEqual(content_length, len(self.resource.sample_utf8))
-
- self.assertEqual(self.srmock.status, self.resource.sample_status)
- self.assertEqual(resp.status, self.resource.sample_status)
- self.assertEqual(resp.data, self.resource.sample_utf8)
- self.assertEqual(body, [self.resource.sample_utf8])
+ self.assertEqual(result.status, resource.sample_status)
+ self.assertEqual(resp.status, resource.sample_status)
+ self.assertEqual(get_body(resp), resource.sample_utf8)
+ self.assertEqual(result.data, resource.sample_utf8)
def test_no_body_on_head(self):
- body = self.simulate_request(self.test_route, method='HEAD')
- self.assertEqual(body, [])
- self.assertEqual(self.srmock.status, falcon.HTTP_200)
+ self.api.add_route('/body', HelloResource('body'))
+ result = self.simulate_head('/body')
- def test_stream_chunked(self):
- src = self.simulate_request('/chunked-stream')
+ self.assertFalse(result.data)
+ self.assertEqual(result.status_code, 200)
- dest = io.BytesIO()
- for chunk in src:
- dest.write(chunk)
+ def test_stream_chunked(self):
+ resource = HelloResource('stream')
+ self.api.add_route('/chunked-stream', resource)
- self.assertEqual(dest.getvalue(), self.chunked_resource.sample_utf8)
+ result = self.simulate_get('/chunked-stream')
- for header in self.srmock.headers:
- self.assertNotEqual(header[0].lower(), 'content-length')
+ self.assertEqual(result.data, resource.sample_utf8)
+ self.assertNotIn('content-length', result.headers)
def test_stream_known_len(self):
- src = self.simulate_request('/stream')
- self.assertTrue(self.stream_resource.called)
+ resource = HelloResource('stream, stream_len')
+ self.api.add_route('/stream', resource)
- dest = io.BytesIO()
- for chunk in src:
- dest.write(chunk)
+ result = self.simulate_get('/stream')
+ self.assertTrue(resource.called)
- expected_len = self.stream_resource.resp.stream_len
- content_length = ('content-length', str(expected_len))
- self.assertThat(self.srmock.headers, Contains(content_length))
- self.assertEqual(dest.tell(), expected_len)
-
- self.assertEqual(dest.getvalue(),
- self.chunked_resource.sample_utf8)
+ expected_len = resource.resp.stream_len
+ actual_len = int(result.headers['content-length'])
+ self.assertEqual(actual_len, expected_len)
+ self.assertEqual(len(result.data), expected_len)
+ self.assertEqual(result.data, resource.sample_utf8)
def test_filelike(self):
- for file_wrapper in (None, FileWrapper):
- url = '/filelike'
-
- src = self.simulate_request(url, file_wrapper=file_wrapper)
- self.assertTrue(self.filelike_resource.called)
+ resource = HelloResource('stream, stream_len, filelike')
+ self.api.add_route('/filelike', resource)
- dest = io.BytesIO()
- for chunk in src:
- dest.write(chunk)
+ for file_wrapper in (None, FileWrapper):
+ result = self.simulate_get('/filelike', file_wrapper=file_wrapper)
+ self.assertTrue(resource.called)
- expected_len = self.filelike_resource.resp.stream_len
- content_length = ('content-length', str(expected_len))
- self.assertThat(self.srmock.headers, Contains(content_length))
- self.assertEqual(dest.tell(), expected_len)
+ expected_len = resource.resp.stream_len
+ actual_len = int(result.headers['content-length'])
+ self.assertEqual(actual_len, expected_len)
+ self.assertEqual(len(result.data), expected_len)
def test_filelike_using_helper(self):
- src = self.simulate_request('/filelike-helper')
- self.assertTrue(self.filelike_helper_resource.called)
+ resource = HelloResource('stream, stream_len, filelike, use_helper')
+ self.api.add_route('/filelike-helper', resource)
- dest = io.BytesIO()
- for chunk in src:
- dest.write(chunk)
+ result = self.simulate_get('/filelike-helper')
+ self.assertTrue(resource.called)
- expected_len = self.filelike_helper_resource.resp.stream_len
- content_length = ('content-length', str(expected_len))
- self.assertThat(self.srmock.headers, Contains(content_length))
- self.assertEqual(dest.tell(), expected_len)
+ expected_len = resource.resp.stream_len
+ actual_len = int(result.headers['content-length'])
+ self.assertEqual(actual_len, expected_len)
+ self.assertEqual(len(result.data), expected_len)
def test_status_not_set(self):
- body = self.simulate_request('/nostatus')
+ self.api.add_route('/nostatus', NoStatusResource())
+
+ result = self.simulate_get('/nostatus')
- self.assertEqual(body, [])
- self.assertEqual(self.srmock.status, falcon.HTTP_200)
+ self.assertFalse(result.data)
+ self.assertEqual(result.status_code, 200)
diff --git a/tests/test_utils.py b/tests/test_utils.py
index b082c38..9ba7e0c 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -3,6 +3,7 @@
from datetime import datetime
import functools
import io
+import json
import random
import sys
@@ -10,7 +11,7 @@ import testtools
import six
import falcon
-import falcon.testing
+from falcon import testing
from falcon import util
from falcon.util import uri
@@ -297,25 +298,25 @@ class TestFalconUtils(testtools.TestCase):
('falcon.example.com', 42))
-class TestFalconTesting(falcon.testing.TestBase):
+class TestFalconTesting(testing.TestBase):
"""Catch some uncommon branches not covered elsewhere."""
def test_path_escape_chars_in_create_environ(self):
- env = falcon.testing.create_environ('/hello%20world%21')
+ env = testing.create_environ('/hello%20world%21')
self.assertEqual(env['PATH_INFO'], '/hello world!')
def test_unicode_path_in_create_environ(self):
if six.PY3:
self.skip('Test does not apply to Py3K')
- env = falcon.testing.create_environ(u'/fancy/unícode')
+ env = testing.create_environ(u'/fancy/unícode')
self.assertEqual(env['PATH_INFO'], '/fancy/un\xc3\xadcode')
- env = falcon.testing.create_environ(u'/simple')
+ env = testing.create_environ(u'/simple')
self.assertEqual(env['PATH_INFO'], '/simple')
def test_none_header_value_in_create_environ(self):
- env = falcon.testing.create_environ('/', headers={'X-Foo': None})
+ env = testing.create_environ('/', headers={'X-Foo': None})
self.assertEqual(env['HTTP_X_FOO'], '')
def test_decode_empty_result(self):
@@ -323,4 +324,69 @@ class TestFalconTesting(falcon.testing.TestBase):
self.assertEqual(body, '')
def test_httpnow_alias_for_backwards_compat(self):
- self.assertIs(falcon.testing.httpnow, util.http_now)
+ self.assertIs(testing.httpnow, util.http_now)
+
+
+class TestFalconTestCase(testing.TestCase):
+ """Verify some branches not covered elsewhere."""
+
+ def test_status(self):
+ resource = testing.SimpleTestResource(status=falcon.HTTP_702)
+ self.api.add_route('/', resource)
+
+ result = self.simulate_get()
+ self.assertEqual(result.status, falcon.HTTP_702)
+
+ def test_wsgi_iterable_not_closeable(self):
+ result = testing.Result([], falcon.HTTP_200, [])
+ self.assertFalse(result.data)
+
+ def test_path_must_start_with_slash(self):
+ self.assertRaises(ValueError, self.simulate_get, 'foo')
+
+ def test_cached_text_in_result(self):
+ self.api.add_route('/', testing.SimpleTestResource(body='test'))
+
+ result = self.simulate_get()
+ self.assertEqual(result.text, result.text)
+
+ def test_simple_resource_body_json_xor(self):
+ self.assertRaises(
+ ValueError,
+ testing.SimpleTestResource,
+ body='',
+ json={},
+ )
+
+ def test_query_string(self):
+ class SomeResource(object):
+ def on_get(self, req, resp):
+ doc = {}
+
+ doc['oid'] = req.get_param_as_int('oid')
+ doc['detailed'] = req.get_param_as_bool('detailed')
+
+ resp.body = json.dumps(doc)
+
+ self.api.add_route('/', SomeResource())
+
+ result = self.simulate_get(query_string='oid=42&detailed=no')
+ self.assertEqual(result.json['oid'], 42)
+ self.assertFalse(result.json['detailed'])
+
+ def test_query_string_no_question(self):
+ self.assertRaises(ValueError, self.simulate_get, query_string='?x=1')
+
+ def test_query_string_in_path(self):
+ self.assertRaises(ValueError, self.simulate_get, path='/thing?x=1')
+
+
+class FancyAPI(falcon.API):
+ pass
+
+
+class FancyTestCase(testing.TestCase):
+ api_class = FancyAPI
+
+ def test_something(self):
+ self.assertTrue(isinstance(self.api, FancyAPI))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 10
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"coverage",
"ddt",
"pyyaml",
"requests",
"testtools",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"tools/test-requires"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
ddt==1.7.2
exceptiongroup==1.2.2
-e git+https://github.com/falconry/falcon.git@4f5d704c6f2ffa168846641afb8acad1101ee394#egg=falcon
idna==3.10
iniconfig==2.1.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
python-mimeparse==2.0.0
PyYAML==6.0.2
requests==2.32.3
six==1.17.0
testtools==2.7.2
tomli==2.2.1
urllib3==2.3.0
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- ddt==1.7.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- python-mimeparse==2.0.0
- pyyaml==6.0.2
- requests==2.32.3
- six==1.17.0
- testtools==2.7.2
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/falcon
| [
"tests/test_after_hooks.py::TestHooks::test_output_validator",
"tests/test_after_hooks.py::TestHooks::test_wrapped_resource",
"tests/test_after_hooks.py::TestHooks::test_wrapped_resource_with_hooks_aware_of_resource",
"tests/test_after_hooks.py::TestGlobalHooks::test_global_after_hooks_run_after_exception",
"tests/test_after_hooks.py::TestGlobalHooks::test_global_hook_wrap_default_405",
"tests/test_after_hooks.py::TestGlobalHooks::test_multiple_global_hooks_wrap_default_405",
"tests/test_error_handlers.py::TestErrorHandler::test_converted_error",
"tests/test_headers.py::TestHeaders::test_content_type_no_body",
"tests/test_headers.py::TestHeaders::test_headers_as_list",
"tests/test_headers.py::TestHeaders::test_response_append_header",
"tests/test_headers.py::TestHeaders::test_unicode_location_headers",
"tests/test_hello.py::TestHelloWorld::test_no_route",
"tests/test_hello.py::TestHelloWorld::test_status_not_set",
"tests/test_utils.py::TestFalconTestCase::test_status"
]
| [
"tests/test_utils.py::TestFalconUtils::test_deprecated_decorator"
]
| [
"tests/test_after_hooks.py::TestHooks::test_hook_as_callable_class",
"tests/test_after_hooks.py::TestHooks::test_serializer",
"tests/test_after_hooks.py::TestGlobalHooks::test_customized_options",
"tests/test_after_hooks.py::TestGlobalHooks::test_global_hook",
"tests/test_after_hooks.py::TestGlobalHooks::test_global_hook_is_resource_aware",
"tests/test_after_hooks.py::TestGlobalHooks::test_global_hook_wrap_default_on_options",
"tests/test_after_hooks.py::TestGlobalHooks::test_invalid_type",
"tests/test_after_hooks.py::TestGlobalHooks::test_multiple_global_hook",
"tests/test_after_hooks.py::TestGlobalHooks::test_multiple_global_hooks_wrap_default_on_options",
"tests/test_error_handlers.py::TestErrorHandler::test_caught_error",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order",
"tests/test_error_handlers.py::TestErrorHandler::test_handle_not_defined",
"tests/test_error_handlers.py::TestErrorHandler::test_subclass_error",
"tests/test_error_handlers.py::TestErrorHandler::test_uncaught_error",
"tests/test_error_handlers.py::TestErrorHandler::test_uncaught_error_else",
"tests/test_headers.py::TestHeaders::test_add_link_complex",
"tests/test_headers.py::TestHeaders::test_add_link_multiple",
"tests/test_headers.py::TestHeaders::test_add_link_single",
"tests/test_headers.py::TestHeaders::test_add_link_with_anchor",
"tests/test_headers.py::TestHeaders::test_add_link_with_hreflang",
"tests/test_headers.py::TestHeaders::test_add_link_with_hreflang_multi",
"tests/test_headers.py::TestHeaders::test_add_link_with_title",
"tests/test_headers.py::TestHeaders::test_add_link_with_title_star",
"tests/test_headers.py::TestHeaders::test_add_link_with_type_hint",
"tests/test_headers.py::TestHeaders::test_content_header_missing",
"tests/test_headers.py::TestHeaders::test_content_length",
"tests/test_headers.py::TestHeaders::test_custom_content_type",
"tests/test_headers.py::TestHeaders::test_default_media_type",
"tests/test_headers.py::TestHeaders::test_default_value",
"tests/test_headers.py::TestHeaders::test_no_content_length_1_204_No_Content",
"tests/test_headers.py::TestHeaders::test_no_content_length_2_304_Not_Modified",
"tests/test_headers.py::TestHeaders::test_no_content_type_1_204_No_Content",
"tests/test_headers.py::TestHeaders::test_no_content_type_2_304_Not_Modified",
"tests/test_headers.py::TestHeaders::test_override_default_media_type_1___text_plain__charset_UTF_8____Hello_Unicode_____",
"tests/test_headers.py::TestHeaders::test_override_default_media_type_2___text_plain____Hello_ISO_8859_1___",
"tests/test_headers.py::TestHeaders::test_override_default_media_type_missing_encoding",
"tests/test_headers.py::TestHeaders::test_passthrough_request_headers",
"tests/test_headers.py::TestHeaders::test_required_header",
"tests/test_headers.py::TestHeaders::test_response_header_helpers_on_get",
"tests/test_headers.py::TestHeaders::test_response_set_and_get_header",
"tests/test_headers.py::TestHeaders::test_unicode_headers",
"tests/test_headers.py::TestHeaders::test_vary_header_1____accept_encoding_____accept_encoding__",
"tests/test_headers.py::TestHeaders::test_vary_header_2____accept_encoding____x_auth_token_____accept_encoding__x_auth_token__",
"tests/test_headers.py::TestHeaders::test_vary_header_3____accept_encoding____x_auth_token_____accept_encoding__x_auth_token__",
"tests/test_headers.py::TestHeaders::test_vary_star",
"tests/test_hello.py::TestHelloWorld::test_body_1",
"tests/test_hello.py::TestHelloWorld::test_body_2",
"tests/test_hello.py::TestHelloWorld::test_body_3",
"tests/test_hello.py::TestHelloWorld::test_env_headers_list_of_tuples",
"tests/test_hello.py::TestHelloWorld::test_filelike",
"tests/test_hello.py::TestHelloWorld::test_filelike_using_helper",
"tests/test_hello.py::TestHelloWorld::test_no_body_on_head",
"tests/test_hello.py::TestHelloWorld::test_root_route",
"tests/test_hello.py::TestHelloWorld::test_stream_chunked",
"tests/test_hello.py::TestHelloWorld::test_stream_known_len",
"tests/test_utils.py::TestFalconUtils::test_dt_to_http",
"tests/test_utils.py::TestFalconUtils::test_http_date_to_dt",
"tests/test_utils.py::TestFalconUtils::test_http_now",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_none",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_one",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_several",
"tests/test_utils.py::TestFalconUtils::test_parse_host",
"tests/test_utils.py::TestFalconUtils::test_parse_query_string",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_decode_models_stdlib_unquote_plus",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_models_stdlib_quote",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_value_models_stdlib_quote_safe_tilde",
"tests/test_utils.py::TestFalconUtils::test_uri_decode",
"tests/test_utils.py::TestFalconUtils::test_uri_encode",
"tests/test_utils.py::TestFalconUtils::test_uri_encode_value",
"tests/test_utils.py::TestFalconTesting::test_decode_empty_result",
"tests/test_utils.py::TestFalconTesting::test_httpnow_alias_for_backwards_compat",
"tests/test_utils.py::TestFalconTesting::test_none_header_value_in_create_environ",
"tests/test_utils.py::TestFalconTesting::test_path_escape_chars_in_create_environ",
"tests/test_utils.py::TestFalconTestCase::test_cached_text_in_result",
"tests/test_utils.py::TestFalconTestCase::test_path_must_start_with_slash",
"tests/test_utils.py::TestFalconTestCase::test_query_string",
"tests/test_utils.py::TestFalconTestCase::test_query_string_in_path",
"tests/test_utils.py::TestFalconTestCase::test_query_string_no_question",
"tests/test_utils.py::TestFalconTestCase::test_simple_resource_body_json_xor",
"tests/test_utils.py::TestFalconTestCase::test_wsgi_iterable_not_closeable",
"tests/test_utils.py::FancyTestCase::test_something"
]
| []
| Apache License 2.0 | 404 | [
"falcon/__init__.py",
"doc/api/util.rst",
"falcon/response.py",
"falcon/api.py",
"doc/api/cookies.rst",
"tox.ini",
"falcon/util/uri.py",
"doc/api/index.rst",
"falcon/util/structures.py",
"falcon/api_helpers.py"
]
| [
"falcon/__init__.py",
"doc/api/util.rst",
"falcon/response.py",
"falcon/api.py",
"doc/api/cookies.rst",
"tox.ini",
"falcon/util/uri.py",
"doc/api/index.rst",
"falcon/util/structures.py",
"falcon/api_helpers.py"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.